diff --git a/.gitattributes b/.gitattributes
index e3cc8f0af6f02b931c752da90d50843719bef342..fda4c4269f52adb75cf57a2b0aabd12399f90753 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -3919,3 +3919,65 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
569.jsonl filter=lfs diff=lfs merge=lfs -text
5763.jsonl filter=lfs diff=lfs merge=lfs -text
577.jsonl filter=lfs diff=lfs merge=lfs -text
+5769.jsonl filter=lfs diff=lfs merge=lfs -text
+5742.jsonl filter=lfs diff=lfs merge=lfs -text
+6017.jsonl filter=lfs diff=lfs merge=lfs -text
+6053.jsonl filter=lfs diff=lfs merge=lfs -text
+6042.jsonl filter=lfs diff=lfs merge=lfs -text
+6041.jsonl filter=lfs diff=lfs merge=lfs -text
+606.jsonl filter=lfs diff=lfs merge=lfs -text
+6051.jsonl filter=lfs diff=lfs merge=lfs -text
+6050.jsonl filter=lfs diff=lfs merge=lfs -text
+6063.jsonl filter=lfs diff=lfs merge=lfs -text
+6058.jsonl filter=lfs diff=lfs merge=lfs -text
+6054.jsonl filter=lfs diff=lfs merge=lfs -text
+6060.jsonl filter=lfs diff=lfs merge=lfs -text
+5983.jsonl filter=lfs diff=lfs merge=lfs -text
+5991.jsonl filter=lfs diff=lfs merge=lfs -text
+6067.jsonl filter=lfs diff=lfs merge=lfs -text
+6068.jsonl filter=lfs diff=lfs merge=lfs -text
+6070.jsonl filter=lfs diff=lfs merge=lfs -text
+6035.jsonl filter=lfs diff=lfs merge=lfs -text
+607.jsonl filter=lfs diff=lfs merge=lfs -text
+6074.jsonl filter=lfs diff=lfs merge=lfs -text
+6073.jsonl filter=lfs diff=lfs merge=lfs -text
+6075.jsonl filter=lfs diff=lfs merge=lfs -text
+6080.jsonl filter=lfs diff=lfs merge=lfs -text
+6011.jsonl filter=lfs diff=lfs merge=lfs -text
+6078.jsonl filter=lfs diff=lfs merge=lfs -text
+608.jsonl filter=lfs diff=lfs merge=lfs -text
+6082.jsonl filter=lfs diff=lfs merge=lfs -text
+6064.jsonl filter=lfs diff=lfs merge=lfs -text
+6083.jsonl filter=lfs diff=lfs merge=lfs -text
+6065.jsonl filter=lfs diff=lfs merge=lfs -text
+6057.jsonl filter=lfs diff=lfs merge=lfs -text
+6066.jsonl filter=lfs diff=lfs merge=lfs -text
+6084.jsonl filter=lfs diff=lfs merge=lfs -text
+6094.jsonl filter=lfs diff=lfs merge=lfs -text
+6090.jsonl filter=lfs diff=lfs merge=lfs -text
+6092.jsonl filter=lfs diff=lfs merge=lfs -text
+6096.jsonl filter=lfs diff=lfs merge=lfs -text
+6071.jsonl filter=lfs diff=lfs merge=lfs -text
+6097.jsonl filter=lfs diff=lfs merge=lfs -text
+6059.jsonl filter=lfs diff=lfs merge=lfs -text
+6072.jsonl filter=lfs diff=lfs merge=lfs -text
+6062.jsonl filter=lfs diff=lfs merge=lfs -text
+6081.jsonl filter=lfs diff=lfs merge=lfs -text
+6087.jsonl filter=lfs diff=lfs merge=lfs -text
+6095.jsonl filter=lfs diff=lfs merge=lfs -text
+6056.jsonl filter=lfs diff=lfs merge=lfs -text
+6101.jsonl filter=lfs diff=lfs merge=lfs -text
+6105.jsonl filter=lfs diff=lfs merge=lfs -text
+4313.jsonl filter=lfs diff=lfs merge=lfs -text
+4268.jsonl filter=lfs diff=lfs merge=lfs -text
+4361.jsonl filter=lfs diff=lfs merge=lfs -text
+4360.jsonl filter=lfs diff=lfs merge=lfs -text
+4362.jsonl filter=lfs diff=lfs merge=lfs -text
+4363.jsonl filter=lfs diff=lfs merge=lfs -text
+4365.jsonl filter=lfs diff=lfs merge=lfs -text
+1316.jsonl filter=lfs diff=lfs merge=lfs -text
+1319.jsonl filter=lfs diff=lfs merge=lfs -text
+1326.jsonl filter=lfs diff=lfs merge=lfs -text
+1248.jsonl filter=lfs diff=lfs merge=lfs -text
+1257.jsonl filter=lfs diff=lfs merge=lfs -text
+1262.jsonl filter=lfs diff=lfs merge=lfs -text
diff --git a/1248.jsonl b/1248.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..eaebb4c3594eaf87e058341492824e8ba5737dda
--- /dev/null
+++ b/1248.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3332ed90351f3a29bed401de3b7bcdc7793c27412ec6568559eee3344aafee49
+size 54858320
diff --git a/1257.jsonl b/1257.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..06b1bbbc4dd746f7067dc1d39fb2ce90e2cd55f1
--- /dev/null
+++ b/1257.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ceacfb1fbfd5efba16374d122cdf47bd955498596c039cb36d45bb8fd81619e
+size 56093398
diff --git a/1262.jsonl b/1262.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..5006f1df6d677180b5c193b976062218cc514f67
--- /dev/null
+++ b/1262.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe2f99ad90d7d268f29b5887fef0e99dcd0b34094f492217df79bff1b690ad83
+size 63293544
diff --git a/1316.jsonl b/1316.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..aff0081f95365f99e2ec695bb2bccb8d81d6235d
--- /dev/null
+++ b/1316.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc9501067ccad9c6550d5b93b5f8db7e3c398766df797cb6f5f83a212a787e01
+size 58240153
diff --git a/1319.jsonl b/1319.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..3e81745a2efbad81cb218214ad5640a9d26f0a22
--- /dev/null
+++ b/1319.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5218b695d84478f384362b9c6ba5e73cecb0824d00e4e29cffce24ccdfd66794
+size 55895987
diff --git a/1326.jsonl b/1326.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..1f8049dddc5a9bd597fb72e37d38e6f84a465a66
--- /dev/null
+++ b/1326.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:024fb9f62f69303c3ea894ac1b17756438d649a91c857226be84732f04a15ea5
+size 53172467
diff --git a/3462.jsonl b/3462.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/3463.jsonl b/3463.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/3465.jsonl b/3465.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..999e55601ddf578ea61e95c25211dca35f82cd07
--- /dev/null
+++ b/3465.jsonl
@@ -0,0 +1,995 @@
+{"seq_id":"36170038088","text":"import math\nimport os\nimport os.path as osp\nimport time\nfrom collections import deque\n\nimport joblib\nimport numpy as np\n# noinspection PyPackageRequirements\nimport tensorflow as tf\n\nfrom agents.common import get_throttle\nfrom sim.action import Action\nfrom vendor.openai.baselines import logger\n\nfrom vendor.openai.baselines.common.math_util import explained_variance\n\nimport config as c\n\nTF_VAR_SCOPE = 'ppo2model'\n\n\nclass Model(object):\n def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,\n nsteps, ent_coef, vf_coef, max_grad_norm, **kwargs):\n sess = tf.get_default_session()\n\n act_model = policy(sess, ob_space, ac_space, nbatch_act, 1, reuse=False, **kwargs)\n train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps, reuse=True, **kwargs)\n\n A = train_model.pdtype.sample_placeholder([None])\n ADV = tf.placeholder(tf.float32, [None])\n R = tf.placeholder(tf.float32, [None])\n OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])\n OLDVPRED = tf.placeholder(tf.float32, [None])\n LR = tf.placeholder(tf.float32, [])\n CLIPRANGE = tf.placeholder(tf.float32, [])\n\n neglogpac = train_model.pd.neglogp(A)\n entropy = tf.reduce_mean(train_model.pd.entropy())\n\n vpred = train_model.vf\n vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)\n vf_losses1 = tf.square(vpred - R)\n vf_losses2 = tf.square(vpredclipped - R)\n vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))\n ratio = tf.exp(OLDNEGLOGPAC - neglogpac)\n pg_losses = -ADV * ratio\n pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)\n pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))\n approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))\n clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))\n loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef\n with tf.variable_scope(TF_VAR_SCOPE):\n params = tf.trainable_variables()\n grads = tf.gradients(loss, params)\n if max_grad_norm is not None:\n grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)\n grads = list(zip(grads, params))\n trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)\n _train = trainer.apply_gradients(grads)\n\n def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):\n advs = returns - values\n if len(advs) > 1:\n advs = (advs - advs.mean()) / (advs.std() + 1e-8)\n for _adv in advs:\n if math.isnan(_adv):\n print('huh oh nan time')\n td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,\n CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}\n if states is not None:\n td_map[train_model.S] = states\n td_map[train_model.M] = masks\n # print('running backprop')\n ret = sess.run(\n [pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],\n td_map\n )[:-1]\n return ret\n self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']\n\n def save(save_path):\n print('saving model to %s' % save_path)\n ps = sess.run(params)\n joblib.dump(ps, save_path)\n\n def load(load_path):\n print('loading weights from %s' % load_path)\n loaded_params = joblib.load(load_path)\n restores = []\n for p, loaded_p in zip(params, loaded_params):\n restores.append(p.assign(loaded_p))\n sess.run(restores)\n # If you want to load weights, also save/load observation scaling inside VecNormalize\n\n self.train = train\n self.train_model = train_model\n self.act_model = act_model\n self.step = act_model.step\n self.value = act_model.value\n self.initial_state = act_model.initial_state\n self.save = save\n self.load = load\n tf.global_variables_initializer().run(session=sess) #pylint: disable=E1101\n\n\ndef mis(action_probs, rewards):\n \"\"\" Mistake importance scaling\n It seems that taking the log probability in Policy Gradient reverses the amount of learning you would want for\n negative rewards. i.e. We learn much more from unlikely bad actions, than we do likely ones. Whereas this is what\n we want for positive rewards - to learn more from unlikely good actions, we would want the opposite for negative\n rewards - learn more from likely bad actions because our goal is for bad actions and states to be unlikely.\n I've tested these ideas a bit in baselines and the results seem to be good.\n Although I'm sort of duct-taping on the idea by scaling negative rewards inversely to their odds to reverse\n the effect of taking the log. I also notice that DQN, which does not scale the gradient by log likelihood,\n does better than PG methods on Atari games with mostly negative rewards, i.e. DoubleDunk, ice hockey, and surround,\n with skiing being an exception to this rule - but the score for skiing is weird.\"\"\"\n mis_rewards = []\n for i, reward in enumerate(rewards):\n if 'SCALE_ALL_REWARDS' in os.environ:\n mis_rewards.append(reward * 1.8) # Works (in pong), but not as well as scaling by odds\n else:\n if reward < 0:\n scale = 1 + action_probs[i] / (1 - action_probs[i])\n scale = min(scale, 3)\n mis_rewards.append(reward * scale)\n else:\n mis_rewards.append(reward)\n return mis_rewards\n\n\nclass Runner(object):\n\n def __init__(self, *, env, model, nsteps, gamma, lam):\n self.env = env\n self.model = model\n nenv = env.num_envs\n self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=model.train_model.X.dtype.name)\n self.obs[:] = env.reset()\n self.gamma = gamma\n self.lam = lam\n self.nsteps = nsteps\n self.states = model.initial_state\n self.dones = [False for _ in range(nenv)]\n\n def run(self):\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]\n mb_states = self.states\n epinfos = []\n for _ in range(self.nsteps):\n actions, values, self.states, neglogpacs, action_probs = self.model.step(self.obs, self.states, self.dones)\n\n mb_obs.append(self.obs.copy())\n mb_actions.append(actions)\n mb_values.append(values)\n mb_neglogpacs.append(neglogpacs)\n mb_dones.append(self.dones)\n\n self.obs[:], rewards, self.dones, infos = self.env.step(actions)\n\n rewards = mis(action_probs, rewards)\n\n for info in infos:\n maybe_episode_info = info.get('episode') if info else None\n if maybe_episode_info: epinfos.append(maybe_episode_info)\n\n mb_rewards.append(rewards)\n #batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32)\n mb_actions = np.asarray(mb_actions)\n mb_values = np.asarray(mb_values, dtype=np.float32)\n mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)\n mb_dones = np.asarray(mb_dones, dtype=np.bool)\n last_values = self.model.value(self.obs, self.states, self.dones)\n #discount/bootstrap off value fn\n mb_returns = np.zeros_like(mb_rewards)\n mb_advs = np.zeros_like(mb_rewards)\n lastgaelam = 0\n for t in reversed(range(self.nsteps)):\n if t == self.nsteps - 1:\n nextnonterminal = 1.0 - self.dones\n nextvalues = last_values\n else:\n nextnonterminal = 1.0 - mb_dones[t+1]\n nextvalues = mb_values[t+1]\n delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]\n mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam\n mb_returns = mb_advs + mb_values\n\n # TODO(py27): Python versions < 3.5 do not support starred expressions in tuples, lists, and sets\n return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),\n mb_states, epinfos)\n\n def process_actions(self, actions):\n action = Action.from_gym(actions)\n action.throttle = get_throttle(actual_speed=self.obs['speed'], target_speed=(8 * 100))\n actions = action.as_gym()\n return actions\n\n\n# obs, returns, masks, actions, values, neglogpacs, states = runner.run()\n\n\ndef sf01(arr):\n \"\"\"\n swap and then flatten axes 0 and 1\n \"\"\"\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])\n\n\ndef constfn(val):\n def f(_):\n return val\n return f\n\n\ndef learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,\n vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,\n log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,\n save_interval=0, eval_only=False, **kwargs):\n\n if isinstance(lr, float): lr = constfn(lr)\n else: assert callable(lr)\n if isinstance(cliprange, float): cliprange = constfn(cliprange)\n else: assert callable(cliprange)\n total_timesteps = int(total_timesteps)\n\n nenvs = env.num_envs\n ob_space = env.observation_space\n\n ac_space = env.action_space\n nbatch = nenvs * nsteps\n\n if nenvs < nminibatches and 'lstm' in policy.__name__.lower():\n # We aren't running enough environments to split our observations across\n nbatch_train = nbatch\n else:\n nbatch_train = nbatch // nminibatches\n\n make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,\n nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,\n max_grad_norm=max_grad_norm, **kwargs)\n if save_interval and logger.get_dir():\n import cloudpickle\n with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:\n fh.write(cloudpickle.dumps(make_model))\n model = make_model()\n if c.PPO_RESUME_PATH is not None:\n model.load(c.PPO_RESUME_PATH)\n\n runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)\n\n epinfobuf = deque(maxlen=100)\n tfirststart = time.time()\n\n nupdates = total_timesteps//nbatch\n for update in range(1, nupdates + 1):\n assert nbatch % nminibatches == 0\n nbatch_train = nbatch // nminibatches\n tstart = time.time()\n frac = 1.0 - (update - 1.0) / nupdates\n lrnow = lr(frac)\n cliprangenow = cliprange(frac)\n\n obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632\n\n if eval_only:\n continue\n\n epinfobuf.extend(epinfos)\n mblossvals = []\n if states is None: # nonrecurrent version\n inds = np.arange(nbatch)\n for _ in range(noptepochs):\n np.random.shuffle(inds)\n for start in range(0, nbatch, nbatch_train):\n end = start + nbatch_train\n minibatch_indxs = inds[start:end]\n slices = (arr[minibatch_indxs] for arr in (obs, returns, masks, actions, values, neglogpacs))\n mblossvals.append(model.train(lrnow, cliprangenow, *slices))\n else: # recurrent version\n # assert nenvs % nminibatches == 0\n # envsperbatch = nenvs // nminibatches\n envinds = np.arange(nenvs)\n flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)\n envsperbatch = nbatch_train // nsteps # ((nevns * nsteps) // nminibatches) // nsteps\n envsperbatch = max(envsperbatch, 1)\n for _ in range(noptepochs):\n np.random.shuffle(envinds)\n for start in range(0, nenvs, envsperbatch):\n end = start + envsperbatch\n mbenvinds = envinds[start:end]\n mbflatinds = flatinds[mbenvinds].ravel()\n slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))\n mbstates = states[mbenvinds]\n\n # TODO(py27): Python versions < 3.5 do not allow positional arguments after *expression\n mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))\n\n lossvals = np.mean(mblossvals, axis=0)\n tnow = time.time()\n fps = int(nbatch / (tnow - tstart))\n if update % log_interval == 0 or update == 1:\n ev = explained_variance(values, returns)\n logger.logkv(\"serial_timesteps\", update * nsteps)\n logger.logkv(\"nupdates\", update)\n logger.logkv(\"total_timesteps\", update * nbatch)\n logger.logkv(\"fps\", fps)\n logger.logkv(\"explained_variance\", float(ev))\n logger.logkv('eprewmean', safemean([epinfo['reward'] for epinfo in epinfobuf]))\n logger.logkv('eplenmean', safemean([epinfo['length'] for epinfo in epinfobuf]))\n logger.logkv('time_elapsed', tnow - tfirststart)\n for (lossval, lossname) in zip(lossvals, model.loss_names):\n logger.logkv(lossname, lossval)\n logger.dumpkvs()\n # input('continue?')\n if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():\n checkdir = osp.join(logger.get_dir(), 'checkpoints')\n os.makedirs(checkdir, exist_ok=True)\n savepath = osp.join(checkdir, '%.5i'%update)\n print('Saving to', savepath)\n model.save(savepath)\n env.close()\n\n\ndef safemean(xs):\n return np.nan if len(xs) == 0 else np.mean(xs)\n","repo_name":"deepdrive/deepdrive","sub_path":"vendor/openai/baselines/ppo2/ppo2.py","file_name":"ppo2.py","file_ext":"py","file_size_in_byte":14112,"program_lang":"python","lang":"en","doc_type":"code","stars":862,"dataset":"github-code","pt":"82"}
+{"seq_id":"38186946374","text":"import numpy as np\n\n\nclass Tuple:\n id = 0\n\n def __init__(self, pid=0, offset=0, qid_atts=None):\n self.pid = Tuple.id\n Tuple.id += 1\n if qid_atts is None:\n qid_atts = {}\n self.qidAtts = qid_atts\n self.pid = pid\n self.offset = offset\n\n def __str__(self):\n return str(self.pid)\n\n\nclass Cluster:\n\n \"\"\"\n tuples: list\n list of tuples in the cluster: [t1, t2, ...]\n genAtts: dict\n dictionary with minimum and maximum of each pid in Cluster that will be used in calculation of Tau.\n The key is the attribute's name, and the value is a a tuple (min, max) containing the minimum and maximum values\n in the cluster -> {att_name: (min, max)}.\n \"\"\"\n\n id = 0\n\n def __init__(self, tuple_):\n self.id = Cluster.id\n Cluster.id += 1\n self.tuples = [tuple_]\n self.genAtts = {}\n # Transforming {key, value} in {key, (value, value)}\n for key, value in tuple_.qidAtts.items():\n self.genAtts[key] = (value, value)\n\n # add tuple to [tuples] and update min max in each attribute in genAtts\n def add_tuple(self, tuple_):\n \"\"\"\n Adds a new tuple in the cluster and then calls put_values(tuple_.qidAtts).\n\n :param tuple_: new tuple\n \"\"\"\n self.tuples.append(tuple_)\n self.put_values(tuple_.qidAtts)\n\n def put_values(self, qids):\n \"\"\"\n Updates the genAtts list, i.e. the (min, max) of each attribute.\n :param qids: Attributes of a new tuple used to update genAtts.\n \"\"\"\n for key in qids.keys():\n # if key in qidAtts, check if value < minimum or value > maximum.\n if key in self.genAtts:\n minimum, maximum = self.genAtts[key]\n\n if qids[key] < minimum:\n minimum = qids[key]\n elif qids[key] > maximum:\n maximum = qids[key]\n self.genAtts[key] = (minimum, maximum)\n else:\n self.genAtts[key] = (qids[key], qids[key])\n\n def centroid(self):\n \"\"\"\n Calculates the cluster's centroid as the average of each attribute.\n\n :return: average of attributes from tuples in cluster.\n \"\"\"\n sum_att = np.zeros(len(self.genAtts))\n for tuple_ in self.tuples:\n\n for i, att in enumerate(tuple_.qidAtts.values()):\n sum_att[i] += att\n\n mean_atts = sum_att/len(self.tuples)\n return mean_atts\n\n def __len__(self):\n \"\"\"\n :return: number of tuples in the cluster.\n \"\"\"\n return len(self.tuples)\n\n\n# Setting min and max from all pids in all stream\nclass QidAttsDomain:\n \"\"\"\n qidAtts: dict\n dictionary with minimum and maximum of each element in the stream so far that will be used in calculation of Tau\n The key is the attribute's name, and the value is a tuple (min, max) containing the minimum and maximum values\n -> {att_name: (min, max)}.\n \"\"\"\n def __init__(self, qid_atts={}):\n self.qidAtts = {}\n # Transforming {key, value} in {key, (value, value)}\n for key, value in qid_atts.items():\n self.qidAtts[key] = (value, value)\n\n # consider qid = {qid,value}\n def put_values(self, qids):\n \"\"\"\n Updates the genAtts list, i.e. the (min, max) of each attribute.\n :param qids: Attributes of a new tuple used to update genAtts.\n \"\"\"\n for key in qids.keys():\n # if key in qidAtts, check if value < minimum or value > maximum.\n\n if key in self.qidAtts:\n minimum, maximum = self.qidAtts[key]\n if qids[key] < minimum:\n minimum = qids[key]\n elif qids[key] > maximum:\n maximum = qids[key]\n\n self.qidAtts[key] = (minimum, maximum)\n else:\n self.qidAtts[key] = (qids[key], qids[key])\n\n\nclass Tau:\n \"\"\"\n Keeps track of the last mi cluster published, and calculates the average of their info_loss.\n \"\"\"\n def __init__(self, mi=0, value=0):\n \"\"\"\n :param mi: number of published clusters to be used to calculate Tau\n :param value: the info_loss average of the last mi published clusters\n \"\"\"\n self.value = value\n self.last_clusters_info_loss = []\n self.mi = mi\n\n def update(self, cluster_info_loss):\n \"\"\"\n Updates tau value.\n\n :param cluster_info_loss: info loss from last published cluster\n \"\"\"\n\n if len(self.last_clusters_info_loss) < self.mi:\n self.last_clusters_info_loss.append(cluster_info_loss)\n # if anonymizedClusters size is >= mi, should pop the oldest one before adding\n else:\n self.last_clusters_info_loss.pop(0)\n self.last_clusters_info_loss.append(cluster_info_loss)\n\n self.value = sum(self.last_clusters_info_loss) / len(self.last_clusters_info_loss)\n\n\n\n","repo_name":"israelcvidal/doca","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"18799979014","text":"import asyncio\nimport logging\n\nimport websockets\n\n\nasync def server_main(websocket, path):\n logging.info(f'server_main, path:{path}')\n while True:\n rx_msg = await websocket.recv()\n logging.info(f'< {rx_msg}')\n\n if rx_msg.startswith('start_bm'):\n tokens = rx_msg.split()\n assert len(tokens) == 3\n\n size = int(tokens[1])\n cnt = int(tokens[2])\n logging.info(f'size:{size}, cnt:{cnt}')\n tx_msg = 'a' * size\n for i in range(cnt):\n await websocket.send(tx_msg)\n await websocket.send(f'end_bm')\n else:\n tx_msg = f'echo {rx_msg!r}'\n await websocket.send(tx_msg)\n logging.info(f'> {tx_msg}')\n\nif __name__ == '__main__':\n # debug\n LOG_FORMAT = '%(pathname)s:%(lineno)03d | %(asctime)s | %(levelname)s | %(message)s'\n # LOG_LEVEL = logging.DEBUG # DEBUG(10), INFO(20), (0~50)\n LOG_LEVEL = logging.INFO # DEBUG(10), INFO(20), (0~50)\n\n logging.basicConfig(format=LOG_FORMAT, level=LOG_LEVEL)\n\n start_server = websockets.serve(server_main, \"localhost\", 8080)\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()\n\n","repo_name":"nhlsm/websocket_benchmark","sub_path":"w41_1_python_websocket_server.py","file_name":"w41_1_python_websocket_server.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"24664343104","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 22 18:13:24 2017\n\n@author: andraa\n\"\"\"\nimport datetime\nimport pandas as pd\nfrom os.path import isfile, join\nfrom os import listdir\n \n\nfolder = '/media/andraa/10160545101605452/kaggle/WSDM-kaggle/data/intermediate_data/user_log_merge'\n\nmerge_file = '/media/andraa/10160545101605452/kaggle/WSDM-kaggle/data/intermediate_data/user_log_members.csv'\n\n\nfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\nf_out = open(merge_file, 'w')\nf_init = open(join(folder, files[0]))\n\nprint(files[0])\nfor l in f_init.readlines():\n f_out.write(l)\n\nfor f_in in files[1:]:\n print(f_in)\n for l in open(join(folder, f_in)).readlines()[1:]:\n f_out.write(l)\n\n\nf_out.close()","repo_name":"AndraAnoaica/kaggle_music","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"39538017537","text":"from base64 import encode\nimport socket\nimport threading\nimport tkinter\nimport tkinter.scrolledtext\nfrom tkinter import simpledialog\n\n# It needs to match the server port\nHOST = '127.0.0.1' \nPORT = 9090\n\n''' You can also use your public IP address to host on the web instead locally\n The user will have to specify the public IP address in order to connect\n Need to open ports on the server side as well\n'''\n\n# Creating a client that has a socket that connects with host and port\nclass Client:\n\n def __init__(self, host, port):\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n msg = tkinter.TK()\n msg.withdraw()\n\n # Getting nickname from the user\n self.nickname = simpledialog.askstring(\"Nickname\", \"Please choose a nickname\", parent=msg)\n\n self.gui_done = False\n self.running = True\n\n # Threads to build the GUI and maintains the GUI and connects to the server\n gui_thread = threading.Thread(target=self.gui_loop)\n receive_thread = threading.Thread(target=self.receive)\n\n gui_thread.start()\n receive_thread.start()\n\n # Build the GUI\n def gui_loop(self):\n self.win = tkinter.Tk()\n self.win.configure(bg=\"lightgray\")\n\n self.chat_label = tkinter.Label(self.win, text=\"Chat:\", bg=\"lightgray\")\n self.chat_label.config(font=(\"Arial\", 12))\n self.chat_label.pack(padx=20, pady=5)\n\n self.text_area = tkinter.scrolledtext.ScrolledText(self.win)\n self.text_area.pack(padx=20, pady=5)\n\n # Disabled means the content cannot be changed. To change it, revert it to enabled make \n # the changes and revert back to disabled.\n self.text_area.config(state='disabled') \n\n self.msg_label = tkinter.Label(self.win, text=\"Message:\", bg=\"lightgray\")\n self.msg_label.config(font=(\"Arial\", 12))\n self.msg_label.pack(padx=20, pady=5)\n\n self.input_area = tkinter.Text(self.win, height=3)\n self.input_area.pack(padx=20, pady=5)\n\n self.send_button = tkinter.Button(self.win, text=\"Send, command=self.write\")\n self.send_button.config(font=(\"Arial\", 12))\n self.send_button.pack(padx=20, pady=5)\n\n self.gui_done = True\n\n self.win.protocol(\"WM_DELETE_WINDOW\", self.stop)\n\n self.win.mainloop()\n\n\n def write(self):\n message = f\"{self.nickname}: {self.input_area.get('1.0', 'end')}\"\n self.sock.send(message.encode('utf-8'))\n self.input_area.delete('1.0', 'end')\n\n\n def stop(self):\n self.running = False\n self.win.destroy()\n self.sock.close()\n exit(0)\n\n\n def receive(self):\n while self.running:\n try:\n message = self.sock.recv(1024).decode('utf-8')\n if message == 'NICK':\n self.sock.send(self.nickname.encode('utf-8'))\n\n else:\n if self.gui_done:\n self.text_area.config(state='normal')\n self.text_area.config('end', message)\n self.text_area.yview('end')\n self.text_area.config(state='disabled')\n except ConnectionAbortedError:\n break\n except:\n print('Error')\n self.sock.close()\n break\n\nclient = Client(HOST, PORT)\n","repo_name":"nicolasnkGH/Simple-GUI-Chat","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"34939346944","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncollection_list_requestor.py\n\nA time queue action to periodically request a list of collections\n\"\"\"\nimport logging\nimport os\nimport time\n\n_collection_polling_interval = float(os.environ.get(\n \"NIMBUSIO_ANTI_ENTROPY_COLLECTION_POLLING_INTERVAL\", \"86400.0\")\n)\n\nclass CollectionListRequestor(object):\n \"\"\"\n A time queue action to periodically request a list of collections\n \"\"\"\n def __init__(self, state):\n self._log = logging.getLogger(\"CollectionListRequestor\")\n self._state = state\n\n @classmethod\n def next_run(cls):\n return time.time() + _collection_polling_interval\n\n def run(self, halt_event):\n \"\"\"\n request a list of collection ids from the local database\n \"\"\"\n if halt_event.is_set():\n self._log.info(\"halt-event is set, exiting\")\n return\n\n collection_id_generator = \\\n self._state[\"central-database-connection\"].generate_all_rows(\n \"\"\"\n select id \n from nimbusio_central.collection\n where deletion_time is null\n \"\"\"\n )\n for (collection_id, ) in collection_id_generator:\n self._state[\"collection-ids\"].add(collection_id)\n\n self._log.info(\"%s known collection ids\" % (\n len(self._state[\"collection-ids\"]), \n ))\n \n return [(self.run, self.next_run(), )]\n\n","repo_name":"jocelyn-monitor/nimbus.io","sub_path":"anti_entropy_server/collection_list_requestor.py","file_name":"collection_list_requestor.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"35268882624","text":"from flask import Flask, request, jsonify, render_template\nimport os\nfrom flask_cors import CORS, cross_origin\nfrom datetime import datetime\n\nfrom services.card_detector.application.ai.inference.prediction import CardsDetector\nfrom services.card_detector.application.ai.utils.utils import decodeImage\n\nos.putenv('LANG', 'en_US.UTF-8')\nos.putenv('LC_ALL', 'en_US.UTF-8')\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/predict\", methods=['POST'])\n@cross_origin()\ndef predictRoute():\n try:\n image = request.json['image']\n image_name = \"input_image_\" + str(datetime.now()).split(':')[-1] + \".jpg\"\n cards_detector.settings.logger.info(\"Received Post Request for inference--!!\")\n decodeImage(image, image_name, cards_detector.settings.INPUT_IMAGE_PATH)\n cards_detector.settings.logger.info(\n \"Image stored in directory -- \" + cards_detector.settings.INPUT_IMAGE_PATH + \"--with image name--\" + str(\n image_name))\n result = cards_detector.predict(cards_detector.settings.INPUT_IMAGE_PATH + image_name)\n return jsonify(result)\n except BaseException as ex:\n cards_detector.settings.logger.error(\"Following Error occurred while inference---!!\", str(ex))\n return jsonify(str(ex))\n\n\nif __name__ == \"__main__\":\n cards_detector = CardsDetector()\n port = 9000\n app.run(host='127.0.0.1', port=port)\n","repo_name":"R-aryan/Cards_Detection_Using_FASTER-RCNN","sub_path":"services/card_detector/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"38690213007","text":"import importlib\nimport time\nfrom datetime import datetime\nimport asyncio\n#from asyncio import get_event_loop_policy\nfrom pyrogram import idle\nfrom uvloop import install\nfrom Ubotlibs import *\nfrom Ubot import BOTLOG_CHATID, aiosession, bots, app, ids, LOOP\nfrom platform import python_version as py\nfrom Ubot.logging import LOGGER\nfrom pyrogram import __version__ as pyro\nfrom Ubot.modules import ALL_MODULES\nfrom Ubotlibs import *\nfrom Ubot.core.db.activedb import *\nfrom Ubot.core.db.usersdb import *\nfrom config import SUPPORT, CHANNEL, CMD_HNDLR, ADMIN1_ID\nimport os\nfrom dotenv import load_dotenv\n\n\nMSG_BOT = \"\"\"\n╼┅━━━━━━━━━━╍━━━━━━━━━━┅╾\n• **Alive\n• **Phython**: `{}`\n• **Pyrogram**: `{}`\n• **Users**: `{}`\n╼┅━━━━━━━━━━╍━━━━━━━━━━┅╾\n\"\"\"\n\nMSG_ON = \"\"\"\n**pyRainger Actived ✅**\n╼┅━━━━━━━━━━╍━━━━━━━━━━┅╾\n• **Versi** : `{}`\n• **Phython** : `{}`\n• **Pyrogram** : `{}`\n• **Masa Aktif** : `{}`\n• **Akan Berakhir**: `{}`\n**Ketik** `{}alive` **untuk Mengecheck Bot**\n╼┅━━━━━━━━━━╍━━━━━━━━━━┅╾\n\"\"\"\n\nMSG = \"\"\"\n**Users**: `{}`\n**ID**: `{}`\n\"\"\"\n\n\nasync def main():\n await app.start()\n LOGGER(\"Ubot\").info(\"Memulai Ubot Pyro..\")\n for all_module in ALL_MODULES:\n importlib.import_module(\"Ubot.modules\" + all_module)\n for bot in bots:\n try:\n await bot.start()\n ex = await bot.get_me()\n user_id = ex.id\n await buat_log(bot)\n botlog_chat_id = await get_botlog(user_id)\n LOGGER(\"Info\").info(\"Startup Completed\")\n LOGGER(\"√\").info(f\"Started as {ex.first_name} | {ex.id} \")\n await join(bot)\n await bot.send_message(botlog_chat_id, MSG_ON.format(BOT_VER, py(), pyro))\n ids.append(ex.id)\n except Exception as e:\n LOGGER(\"X\").info(f\"{e}\")\n await idle()\n await aiosession.close()\n await app.stop()\n \n\nif __name__ == \"__main__\":\n LOGGER(\"Ubot\").info(\"Starting Ubot\")\n install()\n LOOP.run_until_complete(main())\n","repo_name":"RaingerXD/pyRaingerV1_heroku","sub_path":"Ubot/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"73832773708","text":"def _eval(newSentences, label_map, args, eval_dataset):\n tag2Idx = {v: k for k, v in label_map.items()}\n trueEntityID, predEntityID = entityIDGeneration(newSentences)\n\n f1_record = []\n if args.determine_entity:\n labels = []\n preds = []\n for sent in newSentences:\n for token_info in sent:\n labels.append(token_info[1])\n preds.append(token_info[2])\n assert len(labels) == len(preds)\n p, r, f1 = compute_token_f1(labels, preds)\n f1_record.append(f1)\n print(\"Entity: Precision: {}, Recall: {}, F1: {}\".format(p, r, f1))\n else:\n if args.flag == 'ALL' or args.inference:\n flags = [f for f in tag2Idx.keys()][1:-2]\n for flag in flags:\n precision, recall, f1 = compute_precision_recall_f1(trueEntityID, predEntityID, flag, tag2Idx[flag])\n print(flag + \": Precision: {}, Recall: {}, F1: {}\".format(precision, recall, f1))\n overall_precision, overall_recall, overall_f1 = compute_overall_precision_recall_f1(trueEntityID, predEntityID, tag2Idx)\n f1_record.append(overall_f1)\n print(\"OVERALL: Precision: {}, Recall: {}, F1: {}\".format(overall_precision, overall_recall, overall_f1))\n else:\n p, r, f1 = compute_precision_recall_f1(trueEntityID, predEntityID, args.flag, 1)\n f1_record.append(f1)\n print(args.flag + \": Precision: {}, Recall: {}, F1: {} on {}\".format(p, r, f1, eval_dataset))\n\n return sum(f1_record)\n\n\ndef entityIDGeneration(sentences):\n sent_id = 0\n type_ = \"#\"\n flag = -1\n\n label_start_id = 0\n pred_start_id = 0\n\n true_entities = []\n pred_entities = []\n for sentence in sentences:\n # print(\"sentence\")\n # print(sentence)\n pre_label = \"O\"\n sent_true_entities = []\n sent_pred_entities = []\n for i, (word, label, pred) in enumerate(sentence):\n if label == \"O\":\n if not pre_label == \"O\":\n label_end_id = i - 1\n # print(\"entity label: \", sent_id, label_start_id, label_end_id, type)\n sent_true_entities.append(\"_\".join([str(i) for i in [sent_id, label_start_id, label_end_id]] + [type_]))\n else:\n if \"B-\" in label:\n label = label.split(\"-\")[-1]\n if not pre_label == \"O\":\n label_end_id = i - 1\n sent_true_entities.append(\"_\".join([str(i) for i in [sent_id, label_start_id, label_end_id]] + [type_]))\n label_start_id = i\n type_ = label\n else:\n continue\n pre_label = label\n if not pre_label == \"O\":\n label_end_id = len(sentence) - 1\n # print(\"entity label: \", sent_id, label_start_id, label_end_id, type)\n sent_true_entities.append(\"_\".join([str(i) for i in [sent_id, label_start_id, label_end_id]] + [type_]))\n\n pre_pred = 1\n for i, (word, label, pred) in enumerate(sentence):\n if pred == 1:\n if not pre_pred == 1:\n pred_end_id = i - 1\n # print(\"entity pred: \", sent_id, pred_start_id, pred_end_id, flag)\n sent_pred_entities.append(\"_\".join([str(i) for i in [sent_id, pred_start_id, pred_end_id, flag]]))\n else:\n if not pre_pred == pred:\n if not pre_pred == 1:\n pred_end_id = i - 1\n sent_pred_entities.append(\"_\".join([str(i) for i in [sent_id, pred_start_id, pred_end_id, flag]]))\n pred_start_id = i\n flag = pred\n else:\n continue\n pre_pred = pred\n\n if not pre_pred == 1:\n pred_end_id = len(sentence) - 1\n # print(\"entity pred: \", sent_id, pred_start_id, pred_end_id, flag)\n sent_pred_entities.append(\"_\".join([str(i) for i in [sent_id, pred_start_id, pred_end_id, flag]]))\n\n sent_id += 1\n true_entities.append(sent_true_entities)\n pred_entities.append(sent_pred_entities)\n return true_entities, pred_entities\n\n\ndef compute_token_f1(labels, preds):\n # recall = tp/(tp + fn)\n # precision = tp/(tp + fp)\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n\n assert len(labels) == len(preds)\n for i in range(len(labels)):\n if (labels[i].startswith(\"B\") or labels[i].startswith(\"I\")) and preds[i] == 1:\n tp += 1\n elif (labels[i].startswith(\"B\") or labels[i].startswith(\"I\")) and preds[i] == 0:\n fn += 1\n elif labels[i].startswith(\"O\") and preds[i] == 0:\n tn += 1\n elif labels[i].startswith(\"O\") and preds[i] == 1:\n fp += 1\n if tp == 0:\n recall = 0\n precision = 0\n else:\n recall = float(tp) / (float(tp) + float(fn))\n precision = float(tp) / (float(tp) + float(fp))\n if recall == 0 or precision == 0:\n f1 = 0\n else:\n f1 = (2 * precision * recall) / (precision + recall)\n return precision, recall, f1\n\n\ndef compute_precision_recall_f1(true_entities, pred_entities, flag, pflag):\n tp = 0\n np_ = 0\n pp = 0\n for i in range(len(true_entities)):\n sent_true = true_entities[i]\n sent_pred = pred_entities[i]\n for e in sent_true:\n if flag in e:\n np_ += 1\n temp = e.replace(flag, str(pflag))\n if temp in sent_pred:\n tp += 1\n for e in sent_pred:\n if int(e.split(\"_\")[-1]) == pflag:\n pp += 1\n if pp == 0:\n p = 0\n else:\n p = float(tp) / float(pp)\n if np_ == 0:\n r = 0\n else:\n r = float(tp) / float(np_)\n if p == 0 or r == 0:\n f1 = 0\n else:\n f1 = float(2 * p * r) / float((p + r))\n return p, r, f1\n\n\ndef compute_overall_precision_recall_f1(true_entities, pred_entities, tag2Idx):\n tp = 0\n np_ = len(sum(true_entities, []))\n pp = len(sum(pred_entities, []))\n temp = ' '\n\n assert len(true_entities) == len(pred_entities)\n for i in range(len(true_entities)):\n sent_true = true_entities[i]\n sent_pred = pred_entities[i]\n for e in sent_true:\n for flag in tag2Idx:\n if flag in e:\n temp = e.replace(flag, str(tag2Idx[flag]))\n if temp in sent_pred:\n tp += 1\n if pp == 0:\n p = 0\n else:\n p = float(tp) / float(pp)\n if np_ == 0:\n r = 0\n else:\n r = float(tp) / float(np_)\n if p == 0 or r == 0:\n f1 = 0\n else:\n f1 = float(2 * p * r) / float((p + r))\n return p, r, f1\n","repo_name":"kangISU/Conf-MPU-BERT-DS-NER","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"30418344733","text":"from enum import Enum\nimport time\nimport weakref\n\nfrom .backend.QtCore import pyqtSignal, QLineF, QRect, Qt\nfrom .backend.QtWidgets import QGraphicsScene, QGraphicsSceneMouseEvent\n\n\nclass MouseEventState(Enum):\n OFF = 0\n ON = 1\n ENTER = 2\n EXIT = 3\n\n\nclass MouseDragEvent:\n \"\"\"Mouse event delivered by :class:`GraphicsScene` when a item is dragged.\n \"\"\"\n def __init__(self, move_ev, press_ev, last_ev,\n state: MouseEventState = MouseEventState.ON):\n self._state = state\n self.accepted = False\n self.current_item = None\n self._button_down_scene_pos = {}\n self._button_down_screen_pos = {}\n for btn in [Qt.MouseButton.LeftButton,\n Qt.MouseButton.MiddleButton,\n Qt.MouseButton.RightButton]:\n self._button_down_scene_pos[btn] = move_ev.buttonDownScenePos(btn)\n self._button_down_screen_pos[btn] = move_ev.buttonDownScreenPos(btn)\n self._scene_pos = move_ev.scenePos()\n self._screen_pos = move_ev.screenPos()\n if last_ev is None:\n self._last_scene_pos = press_ev.scenePos()\n self._last_screen_pos = press_ev.screenPos()\n else:\n self._last_scene_pos = last_ev.scenePos()\n self._last_screen_pos = last_ev.screenPos()\n self._buttons = move_ev.buttons()\n self._button = press_ev.button()\n self._modifiers = move_ev.modifiers()\n self.accepted_item = None\n\n def accept(self):\n \"\"\"An item should call this method if it can handle the event.\n\n This will prevent the event being delivered to any other items.\"\"\"\n self.accepted = True\n self.accepted_item = self.current_item\n\n def ignore(self):\n \"\"\"An item should call this method if it cannot handle the event.\n\n This will allow the event to be delivered to other items.\"\"\"\n self.accepted = False\n\n def isAccepted(self):\n return self.accepted\n\n def scenePos(self):\n \"\"\"Return the current scene position of the mouse.\"\"\"\n return self._scene_pos\n\n def screenPos(self):\n \"\"\"Return the current screen position (pixels relative to widget) of the mouse.\"\"\"\n return self._screen_pos\n\n def buttonDownScenePos(self, btn=None):\n \"\"\"\n Return the scene position of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n \"\"\"\n if btn is None:\n btn = self.button()\n return self._button_down_scene_pos[btn]\n\n def buttonDownScreenPos(self, btn=None):\n \"\"\"\n Return the screen position (pixels relative to widget) of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n \"\"\"\n if btn is None:\n btn = self.button()\n return self._button_down_screen_pos[btn]\n\n def lastScenePos(self):\n \"\"\"\n Return the scene position of the mouse immediately prior to this event.\n \"\"\"\n return self._last_scene_pos\n\n def lastScreenPos(self):\n \"\"\"\n Return the screen position of the mouse immediately prior to this event.\n \"\"\"\n return self._last_screen_pos\n\n def buttons(self):\n \"\"\"\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n \"\"\"\n return self._buttons\n\n def button(self):\n \"\"\"Return the button that initiated the drag (may be different from the buttons currently pressed)\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n\n \"\"\"\n return self._button\n\n def pos(self):\n \"\"\"\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n \"\"\"\n return self.current_item.mapFromScene(self._scene_pos)\n\n def lastPos(self):\n \"\"\"\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n \"\"\"\n return self.current_item.mapFromScene(self._last_scene_pos)\n\n def buttonDownPos(self, btn=None):\n \"\"\"\n Return the position of the mouse at the time the drag was initiated\n in the coordinate system of the item that the event was delivered to.\n \"\"\"\n if btn is None:\n btn = self.button()\n return self.current_item.mapFromScene(self._button_down_scene_pos[btn])\n\n def entering(self):\n \"\"\"Whether this event is the first one since a drag was initiated.\"\"\"\n return self._state == MouseEventState.ENTER\n\n def exiting(self):\n \"\"\"Whether this event is the last one since a drag was initiated.\"\"\"\n return self._state == MouseEventState.EXIT\n\n def __repr__(self):\n if self.current_item is None:\n lp = self._last_scene_pos\n p = self._scene_pos\n else:\n lp = self.lastPos()\n p = self.pos()\n return \"
\\r\\n')\n\n for i in range(len(song_lyrics)):\n for _ in song_lyrics[i].split():\n if link not in not_english:\n temp.append(_)\n print(songs - 15, 'songs with lyrics found')\n\n for word in range(len(temp)):\n for z in range(temp[word].count('<')):\n to_remove.clear()\n tag = 0\n for _ in range(len(temp[word])):\n if tag == 1:\n if temp[word][_] == '>':\n to_remove.append(_)\n tag = 0\n break\n\n if temp[word][_] == '<':\n to_remove.append(_)\n tag = 1\n\n if len(to_remove) == 1:\n temp[word] = temp[word][to_remove[0] + 1:]\n\n if len(to_remove) == 2:\n temp[word] = temp[word][:to_remove[0]] + temp[word][to_remove[1] + 1:]\n\n to_remove.clear()\n\n for not_a_word in range(len(temp)):\n if temp[not_a_word] == 'p':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word] == 'span':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word][:4] == 'id=\"':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word][:8] == 'iComment':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word][:10] == 'data-chunk':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word][:6] == 'href=\"':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word][:7] == 'class=\"':\n to_remove.append(not_a_word)\n\n elif temp[not_a_word] == '':\n to_remove.append(not_a_word)\n\n for count, remove in enumerate(to_remove):\n temp.remove(temp[remove - count])\n\n print('Number of data points (in words): ', len(temp))\n for i in temp:\n final += i\n final += ' '\n\n return final\n","repo_name":"Tennis-Ball/AI-Singer","sub_path":"lyrics_modules/get_lyric_data.py","file_name":"get_lyric_data.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"34901953347","text":"# -*- coding: utf-8 -*-\nfrom os.path import join,isfile,isdir,dirname,basename\nfrom shutil import copyfile\nimport getopt,os,sys,re\nimport io,json\nimport global_m as gb\nfrom datetime import date\n\n\n'''\nOrganization: MDIBL\nAuthor: Lucie N. Hutchins\nContact: lucie.hutchins@mdibl.org\nDate: June 2019\n\n'''\ndef get_header():\n header='''\n****************** json_generator ********************************************************\n\nThe tool generates sample-specific json files for a given experiment\n\n***************************************************************************************\n '''\n return header\n\ndef prog_usage():\n usage=get_header()\n usage+='''\n\n Usage: PROG [-h] -c path2project_runID_main_config/cfgs/pipeline.cfg [-j path2project_runID_json_template/cfgs/template.json] [-s fastq]\n Where:\n -h To show the usage\n -c path2runID/cfgs/pipeline.cfg or --cfg=path2runID/cfgs/pipeline.cfg ... required, \n -j path2runID/cfgs/template.json or --jtemp=path2runID/cfgs/template.json ... optional\n (default - gets template path from pipeline.cfg), \n -s fatsq.gz or --suffix=fastq.gz ... optional(default fastq), reads files suffix \n \n What It Does: Uses the json template to generate sample-specific json files under \n the location specified in the pipeline.cfg for json files. \n\n Example: \n python PROG -c path2results/teamName/projectName/runID/cfgs/pipeline.cfg -s fastq\n OR \n python PROG -c path2results/teamName/projectName/runID/cfgs/pipeline.cfg \n -j path2results/teamName/projectName/runID/cfgs/template.json\n OR\n python PROG --cfg=path2results/teamName/projectName/runID/cfgs/pipeline.cfg \n \n ASSUMPTIONS: \n 1) User has full permission to create sample-specific json files\n 2) The json template has been generated in the same directory as the pipeline.cfg file\n '''\n print(\"%s\"%(usage))\n##\n# A data model to store sample info\n#\nclass SampleDOM:\n def __init__(self,sample_id,reads_list,reads_suffix):\n self.id=sample_id\n self.reads=[]\n self.set_sample(reads_list,reads_suffix)\n\n def set_sample(self,reads_list,reads_suffix):\n if reads_list:\n for read_file in reads_list:\n read_file=read_file.strip()\n if read_file.startswith(self.id) and read_file.endswith(reads_suffix):\n self.reads.append(read_file)\n \n def get_read_file(self,sampleID,read_number):\n # Logic:\n # if the len of sample_reads array is one, return the first element\n # else:\n # use the map-reduced algorithm to get the right file name\n #\n if len(self.reads)<=0: return None\n elif len(self.reads)<2: \n try: \n return self.reads[0].replace(\".gz\",\"\")\n except:pass\n else:\n # Map step\n # Create a list of string tokens using one string(read_file)\n ## we want our regular expression to capture both \"_\" and non-alphanumeric characters\n try:\n token_file=self.reads[0].replace(sampleID,\"sample\")\n tokens=re.split(r'[\\W+|_]',token_file)\n ##Based on our standars readID is field#2 in the name\n read_id=tokens[1]\n if read_id.startswith(\"R\"):read_number=\"R\"+read_number\n # Create a dictionary with read_file:read_file.tokens key:value pair\n reads={}\n for read_file in self.reads:\n token_file=read_file.replace(sampleID,\"sample\")\n reads[read_file]=re.split(r'[\\W+|_]',token_file)\n # Reduction step - reduce each dict>value using string tokens\n for token in tokens:\n if token in read_number: continue\n for read_file in reads:\n if token in reads[read_file]:reads[read_file].remove(token)\n # Assembly and quantification step\n except:pass\n read_file=None\n for read in reads:\n if read_number in reads[read]:read_file=read\n return read_file.replace(\".gz\",\"\")\n\nif __name__== \"__main__\":\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hc:j:s:\", \n [\"help\", \"cfg=\",\"jtemp=\",\"suffix\"])\n except getopt.GetoptError as err:\n # print help information and exit:\n print(\"ERROR:%s\" % (str(err) )) # will print something like \"option -a not recognized\"\n prog_usage()\n sys.exit(1)\n #set program arguments\n json_template=None\n pipeline_config=None\n log_file=None\n json_base_dir=None\n design_file=None\n reads_suffix=\"fastq\"\n for o, a in opts:\n if o in (\"-c\", \"--cfg\"):pipeline_config = a\n elif o in (\"-j\",\"--jtemp\"):json_template = a\n elif o in (\"-s\",\"--suffix\"):reads_suffix = a\n elif o in (\"-h\", \"--help\"):\n prog_usage()\n sys.exit()\n else:\n assert False, \"unhandled option\"\n if pipeline_config is None or not isfile(pipeline_config):\n msg=\"ERROR: pipeline.cfg missing\"\n print(\"%s - Check %s\"%(msg,pipeline_config))\n prog_usage()\n sys.exit()\n #get project global environment variables \n # variables of interest for this step:\n # 1)LOG_BASE\n # 2)JSON_TEMPLATE\n # 3)PATH2_JSON_FILES\n # 4)DESIGN_FILE \n # 5)READS_BASE\n # 6)RUN_ID\n \n project_env=gb.loadEnv(pipeline_config) \n if not project_env[\"LOG_BASE\"]:\n print(\"ERROR: Log directory missing - see:%s\"%(project_env[\"LOG_BASE\"]))\n print(\"create the above directory and try again.\")\n sys.exit()\n if not project_env[\"PATH2_JSON_FILES\"]:\n print(\"ERROR: Json files base directory missing - see:%s\"%(project_env[\"PATH2_JSON_FILES\"]))\n print(\"create the above directory and try again.\")\n sys.exit()\n if not project_env[\"ORIGINAL_READS_BASE\"]:\n print(\"ERROR: Path to Reads files is incorrect - see:%s\"%(project_env[\"ORIGINAL_READS_BASE\"]))\n sys.exit()\n\n if not isdir(project_env[\"LOG_BASE\"]):\n gb.mkdir_p(project_env[\"LOG_BASE\"])\n log_file=join(project_env[\"LOG_BASE\"],basename(__file__)+\".log\")\n if not isdir(project_env[\"PATH2_JSON_FILES\"]):\n gb.mkdir_p(project_env[\"PATH2_JSON_FILES\"])\n json_base_dir=project_env[\"PATH2_JSON_FILES\"]\n if json_template is None: \n json_template=project_env[\"JSON_TEMPLATE\"]\n design_file=project_env[\"DESIGN_FILE\"]\n project_run_id=\"\"\n if \"RUN_ID\" in project_env:\n project_run_id=project_env[\"RUN_ID\"]\n\n if not isdir(json_base_dir):\n print(\"ERROR: Json files base directory does not exist - see:%s\"%(json_base_dir))\n print(\"create the above directory and try again.\")\n sys.exit()\n if not isfile(design_file): \n print(\"ERROR: The design file is missing - see:%s\"%(design_file))\n sys.exit()\n if not isfile(json_template):\n print(\"ERROR: Json template file is missing - see:%s\"%(json_template))\n sys.exit()\n ## Load json template into an object\n json_obj=None\n with open(json_template) as f:\n json_obj=json.load(f)\n if json_obj is None:\n print(\"ERROR: Failed to open Json template - see:%s\"%(json_template))\n sys.exit()\n ##Enforce our standards by making a copy of json template under this runID cfgs directory if needed\n cfgs_base=dirname(pipeline_config).strip()\n json_template_base=dirname(json_template).strip()\n json_template_file=basename(json_template).strip()\n if cfgs_base not in json_template_base: \n copyfile(json_template,join(cfgs_base,json_template_file))\n \n log=open(log_file,'w') \n log.write(\"**********************************\\n\")\n log.write(\"**********************************\\n\")\n log.write(\"Date:%s\\n\"%( date.today()))\n log.write(\"\\n\")\n log.write(\"Log file:%s\\n\"%(log_file))\n log.write(\"Json template:%s\\n\"%(json_template)) \n log.write(\"Json files base directory:%s\\n\"%(json_base_dir)) \n log.write(\"Experiment Design File:%s\\n\"%(design_file))\n log.write(\"Experiment Reads base:%s\\n\"%(project_env[\"ORIGINAL_READS_BASE\"]))\n log.write(\"Experiment Run config File:%s\\n\"%(pipeline_config))\n bad_format=False\n json_obj[\"project_run_id\"]=project_run_id\n ## get list of reads file names\n reads_base=project_env[\"ORIGINAL_READS_BASE\"]\n reads=[f for f in os.listdir(reads_base) if isfile(join(reads_base,f))]\n with open(design_file,'r') as f:\n try:\n for line in f.readlines():\n if \"Sample\" in line:continue\n if \"sample_id\" in line:continue\n #Remove leading and trailing whitespace from line\n line=line.strip()\n fields=line.split('\\t')\n sample=SampleDOM(fields[0].strip(),reads,reads_suffix)\n read_file_format=sample.id+\"[delimiter]readID[delimiter][...]\"+reads_suffix\n log.write(\"----------------------------\\n\")\n log.write(\"SampleID:%s\\n\"%(sample.id))\n log.write(\"Read files suffix:%s\\n\"%(reads_suffix))\n log.write(\"Number of Reads:%d\\n\"%(len(sample.reads)))\n \n if len(sample.reads)<=0:\n try:\n log.write(\"ERROR: Bad read files name - expected format - %s\\n\"%(read_file_format))\n log.write(\"Original reads files are expected under - %s\\n\"%(project_env[\"ORIGINAL_READS_BASE\"]))\n except:pass\n bad_format=True\n continue\n read1=join(project_env[\"READS_BASE\"],sample.get_read_file(sample.id,\"1\"))\n read2=None\n sample_json_obj=json_obj\n sample_json_file=join(json_base_dir,sample.id+\".\"+project_env[\"ORGANISM\"]+\".json\")\n sample_json_obj[\"input_fastq_read1_files\"][0][\"path\"]=read1\n if len(sample.reads)>1:read2=join(project_env[\"READS_BASE\"],sample.get_read_file(sample.id,\"2\"))\n log.write(\" READ1:%s\\n\"%(read1))\n if read2 is not None:\n log.write(\" READ2:%s\\n\"%(read2))\n sample_json_obj[\"input_fastq_read2_files\"][0][\"path\"]=read2\n log.write(\"Json file:%s\\n\"%(sample_json_file))\n try:\n to_unicode = unicode\n except NameError:\n to_unicode = str\n with io.open(sample_json_file, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(sample_json_obj,indent=2, sort_keys=True,separators=(',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n print(\"Sample:%s\\nJson file:%s\\n\"%(sample.id,sample_json_file))\n except:pass\n if bad_format:\n log.write(\"Failed\\n\")\n print(\"Program failed - check the log file:%s\\n\"%(log_file))\n sys.exit(1)\n log.write(\"Program complete\\n\")\n print(\"Program complete\\n\")\n sys.exit()\n","repo_name":"mdibl/biocore_utils","sub_path":"src/python/json_generator.py","file_name":"json_generator.py","file_ext":"py","file_size_in_byte":11157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"3630512358","text":"import turtle as t\nfrom turtle import Screen\n\ntim = t.Turtle()\nscreen = Screen()\n\n########### Challenge 2 - Draw a Dashed Line ########\nwin_width = screen.window_width()\ntim.shape(\"turtle\")\ntim.pencolor('red')\ntim.penup()\ntim.setx(-1 * (win_width / 2))\ntim.width(3)\ntim.pendown()\n\nfor _ in range(int(win_width / 20)):\n if abs(tim.xcor()) > win_width / 2:\n tim.right(90)\n\n tim.forward(10)\n tim.penup()\n tim.forward(10)\n tim.pendown()\n\n\nscreen.exitonclick()\n","repo_name":"cuauhtlahuac/100DaysOfPythonCode","sub_path":"day18/draw_dashed_line.py","file_name":"draw_dashed_line.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"27046373153","text":"import random\nimport sys\nn = 100\nif len(sys.argv) == 3:\n\tif int(sys.argv[2]) < 1:\n\t\tprint(\"TOO LOW OF VALUE FOR N\")\n\telse:\n\t\tn = int(sys.argv[2])\n\t\n\nf = open(sys.argv[1], encoding='utf-8')\nlyrics = f.readlines()\n\nstrings = \" \".join(lyrics)\nstrings = strings.lower()\nstrLyrics = \" \".join(lyrics)\nstrLyrics = strLyrics.replace(\"?\",\" \")\nstrLyrics = strLyrics.replace(\"\\n\",\" \")\nstrLyrics = strLyrics.replace('\"',\" \")\nstrLyrics = strLyrics.replace(\",\",\" \")\nstrLyrics = strLyrics.lower()\nstrLyrics = strLyrics.replace(\"-\",\" \")\nstrLyrics = strLyrics.replace(\".\",\" \")\nstrLyrics = strLyrics.replace(\":\",\" \")\nstrLyrics = strLyrics.replace(\";\",\" \")\nstrLyrics = strLyrics.replace(\"(\",\" \")\nstrLyrics = strLyrics.replace(\")\",\" \")\nstrLyrics = strLyrics.replace(\"!\",\" \")\nblack_sabbath = strLyrics.split(' ') \n\n\n#START OF UNIGRAMS\nunigrams = {} #empty Dictionary\n#This for loop creates a unigram dictionary and counts each word.\nfor ozzy_token in black_sabbath:\n\tif ozzy_token != \"\":\n\n\t\tif unigrams.get(ozzy_token) == None:\n\t\t\tunigrams[ozzy_token] = 1\n\n\t\telse:\n\t\t\tnum = unigrams[ozzy_token]\n\t\t\tunigrams[ozzy_token] = num + 1\n\n#Testing the Unigram Model\nkey = list(unigrams.keys())\nval = list(unigrams.values())\nsentence = random.choices(key, weights = val, k = n)\nstr2 = \" \".join(sentence)\n\n\n#UNCOMMENT\nprint(\"UNIGRAMS MODEL\")\nprint(str2)\n#UNCOMMENT\nprint(\" \")\n\n#UNIGRAMS END\n######################################################\n#BIGRAMS START\nprint(\"BIGRAMS MODEL\")\nbigrams = {}\nfor i in range(len(black_sabbath)):\n\tif black_sabbath[i] != \"\" and black_sabbath[i+1] != \"\":\n\t\t#if this bigram doesn't exist\n\n\t\tif bigrams.get(black_sabbath[i]) == None:\n\t\t\tsecond = {black_sabbath[i+1] :1}\n\t\t\tbigrams[black_sabbath[i]] = second\n\t\n\t\t#If this bigram exists with this word\n\t\telif bigrams.get(black_sabbath[i]) != None:\n\t\t\texisting = bigrams[black_sabbath[i]]\n\t\t\t#if existing first word doesn't have this second word\n\t\t\tif(existing.get(black_sabbath[i+1]) == None ):\n\t\t\t\texisting[black_sabbath[i + 1]] = 1\n\t\t\t\tbigrams[black_sabbath[i]].update(existing)\n\t\t\telse:\n\t\t\t#if this bigram occured already add one to its count\n\t\t\t\tnum = existing[black_sabbath[i+1]]\n\t\t\t\texisting[black_sabbath[i+1]] = num + 1\n\t\t\t\tbigrams[black_sabbath[i]] = existing\n\n\ni = 0\nuni = random.choices(key,weights = val, k = 1)\nnextWord = \" \".join(uni)\nsong = []\nwhile i < n:\n\tif bigrams.get(nextWord,None) == None:\n\t\tuni = random.choices(key,weights = val, k = 1)\n\t\tnextWord = \" \".join(uni)\n\telse:\n\t\ttemp = bigrams.get(nextWord) \t\n\t\tkey2 = (list(temp.keys())) \n\t\tvals2 = (list(temp.values()))\n\t\tphrase = random.choices(key2, weights = vals2, k = 1)\n\t\tsong.append(\" \" + \" \".join(phrase))\n\t\tnextWord = \" \".join(phrase)\n\t\ti+= 1\n\nprint(\"\".join(song).strip())\n\n\n\n#END OF BIGRAMS\n######################################################\n#START OF TRIGRAMS\ntrigrams = {} \nfor i in range(len(black_sabbath)):\n\tif black_sabbath[i] != \"\" and black_sabbath[i+1] != \"\" and black_sabbath[i+2] != \"\": \n\t\t#Case 1: First word in trigram doesn't exist\n\t\tif trigrams.get(black_sabbath[i]) == None:\n\t\t\tnext_two = {black_sabbath[i+1] :{black_sabbath[i+2]:1}} \n\t\t\ttrigrams[black_sabbath[i]] = next_two\t\n\n\t\t#Case 2: First word exists, second word doesn't exist.\n\t\telif trigrams.get(black_sabbath[i]) != None:\n\t\t\tfirst_word = trigrams[black_sabbath[i]]\n\t\t\t\n\t\n\t\t\t#if second word doesn't exist add it. \n\t\t\tif first_word.get(black_sabbath[i+1]) == None:\n\t\t\t\tnext_two = {black_sabbath[i+1] :{black_sabbath[i+2]:1}}\n\t\t\t\ttrigrams[black_sabbath[i]].update(next_two)\n\t\t\t#second Word is there third isn't\n\t\t\telif first_word.get(black_sabbath[i+1]) != None: \n\t\t\t\tsecond_word = trigrams[black_sabbath[i]][black_sabbath[i+1]]\t\t\t\t\n\t\t\t\t#HAS THIRD WORD\n\t\t\t\tif second_word.get(black_sabbath[i+2]) != None:\n\t\t\t\t\tnum = second_word[black_sabbath[i+2]]\n\t\t\t\t\ttrigrams[black_sabbath[i]][black_sabbath[i+1]][black_sabbath[i+2]] = num + 1\n\t\t\t\t#NO THIRD WORD\t \n\t\t\t\telif second_word.get(black_sabbath[i+2]) == None:\n\t\t\t\t\tlast = {black_sabbath[i+2]: 1}\n\t\t\t\t\ttrigrams[black_sabbath[i]][black_sabbath[i+1]].update(last)\n\n\n\n\t\nprint(\" \")\nprint(\"TRIGRAMS\")\ni = 0\nuni = random.choices(key,weights = val, k = 1)\nnextWord = \" \".join(uni)\nsong = []\nwhile i < n:\n\t##If no trigram starts with this.\n\tif trigrams.get(nextWord) == None:\t\t\n\t\tuni = random.choices(key,weights = val, k = 1)\n\t\tnextWord = \" \".join(uni)\t\n\telse: #If there is a trigram\n\t\tfirst = trigrams.get(nextWord) #FIRST WORD\n\t\ttemp = list(first.keys())\t\n\t\tsecond = random.choices(temp) #SECOND WORD\t\n\t\tthird = first.get(second[0]) #RN ITS A DICT\n\t\tthird_keys = list(third.keys())\n\t\tthird_vals = list(third.values())\n\t\tthird = random.choices(third_keys,weights=third_vals, k = 1)\n\t\ttri_list = [nextWord,second[0]]\n\t\tphrase = \" \".join(tri_list)\t\n\t\tsong.append(phrase)\n\t\tnextWord = \"\".join(third[0])\n\t\ti = i+1\n\n\n\t\n\n\t\n\n\nprint(\" \".join(song).strip())\n#END OF TRIGRAMS\n\n\n\n\n\n\n\n\n","repo_name":"MicahHarlan/BlackSabbathSongGenerator","sub_path":"mharlan_ngram.py","file_name":"mharlan_ngram.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"9120140441","text":"# import threading\nimport concurrent.futures\nimport time\n\nstart = time.perf_counter()\n\n\ndef do_smth(sec):\n print(f\"Sleep {sec} second(s)!\")\n time.sleep(sec)\n return f\"Done sleeping {sec} second(s)\"\n\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n secs = [5, 4, 3, 2, 1]\n results = executor.map(do_smth, secs)\n for result in results:\n print(result)\n\n\n\n # results = [executor.submit(do_smth, sec) for sec in secs]\n # for f in concurrent.futures.as_completed(results):\n # print(f.result())\n\n\n# threads = []\n# for _ in range(10):\n# t = threading.Thread(target=do_smth, args = [1.5])\n# t.start()\n# threads.append(t)\n\n# for thread in threads:\n# thread.join()\n\nprint(f\"Finished in {time.perf_counter() - start} seconds!\")","repo_name":"dkleptsov/small_projects","sub_path":"multithreading/multithread.py","file_name":"multithread.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"16650273581","text":"\"\"\"Set of numbers displayed after selecting blue or purple after task.\"\"\"\n\nimport ipywidgets as widgets\nfrom ipywidgets import VBox, HBox\nfrom IPython.display import display\nimport time\n\n# Set of buttons shown after player selects blue or purple\nthree_button = widgets.Button(description='3')\nfour_button = widgets.Button(description='4')\nseven_button = widgets.Button(description='7')\neight_button = widgets.Button(description='8')\nout = widgets.Output()\n\n# Arranges the buttons into a 2x2 table\nleft_box = VBox([three_button, seven_button])\nright_box = VBox([four_button, eight_button])\nout_greater = HBox([left_box, right_box])\n \ndisplay(out_greater, out)\n\ndef three_chosen(clicked):\n \"\"\"After the three button is chosen, a statement corresponding \n to the three button will be shown along with a thank you statement. \n \n Parameters\n ----------\n clicked : Button (ipywidgets.widgets.widget_button.Button)\n Allows function to run if button is clicked.\n \n Returns\n -------\n print('Thanks for playing!') : string\n A statement thanking the player.\n \"\"\" \n with out: \n print('\\nHere is your task:\\n' \n '\\nEat an apple (or any fruit of your choice)\\n')\n # Delays return statement to give player time to read task\n time.sleep(1.5)\n \n return print('Thanks for playing!')\n\n# Allows this function to occur only when the three button is clicked on \nthree_button.on_click(three_chosen)\n\ndef four_chosen(clicked):\n \"\"\"After the four button is chosen, a statement corresponding \n to the four button will be shown along with a thank you statement. \n \n Parameters\n ----------\n clicked : Button (ipywidgets.widgets.widget_button.Button)\n Allows function to run if button is clicked.\n \n Returns\n -------\n print('Thanks for playing!') : string\n A statement thanking the player.\n \"\"\" \n with out:\n print('\\nHere is your task:\\n' \n '\\nTreat yourself with your favorite dessert!\\n')\n # Delays return statement to give player time to read task\n time.sleep(1.5)\n\n return print('Thanks for playing!')\n\n# Allows this function to occur only when the four button is clicked on \nfour_button.on_click(four_chosen)\n\ndef seven_chosen(clicked):\n \"\"\"After the seven button is chosen, a statement corresponding \n to the seven button will be shown along with a thank you statement. \n \n Parameters\n ----------\n clicked : Button (ipywidgets.widgets.widget_button.Button)\n Allows function to run if button is clicked.\n \n Returns\n -------\n print('Thanks for playing!') : string\n A statement thanking the player.\n \"\"\" \n with out:\n print('\\nHere is your task:\\n' \n '\\nTake a 5 minute break\\n')\n # Delays return statement to give player time to read task\n time.sleep(1.5)\n \n return print('Thanks for playing!')\n\n# Allows this function to occur only when the seven button is clicked on\nseven_button.on_click(seven_chosen) \n\ndef eight_chosen(clicked):\n \"\"\"After the eight button is chosen, a statement corresponding \n to the eight button will be shown along with a thank you statement. \n \n Parameters\n ----------\n clicked : Button (ipywidgets.widgets.widget_button.Button)\n Allows function to run if button is clicked.\n \n Returns\n -------\n print('Thanks for playing!') : string\n A statement thanking the player.\n \"\"\" \n with out:\n print('\\nHere is your task:\\n' \n '\\nClean your desk\\n')\n # Delays return statement to give player time to read task\n time.sleep(1.5)\n \n return print('Thanks for playing!')\n\n# Allows this function to occur only when the eight button is clicked on\neight_button.on_click(eight_chosen) ","repo_name":"ckwon822/COGS18Final","sub_path":"task_greater_numbers.py","file_name":"task_greater_numbers.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"10649739004","text":"#\n#\n\nimport isce\nimport isceobj\nimport stdproc\nfrom isceobj.Util.Poly2D import Poly2D\nimport logging\nfrom isceobj.Util.decorators import use_api\n\nimport os\nimport numpy as np\nimport shelve\n\nlogger = logging.getLogger('isce.insar.runResampleSubbandSlc')\n\n# Modified by V. Brancato 10.14.2019 added \"self\" as input parameter of resampleSLC\ndef resampleSlc(self,referenceFrame, secondaryFrame, imageSlc2, radarWavelength, coregDir,\n azoffname, rgoffname, azpoly = None, rgpoly = None, misreg=False):\n logger.info(\"Resampling secondary SLC\")\n\n imageSlc1 = referenceFrame.getImage().filename\n\n inimg = isceobj.createSlcImage()\n inimg.load(imageSlc2 + '.xml')\n inimg.setAccessMode('READ')\n\n prf = secondaryFrame.PRF\n\n doppler = secondaryFrame._dopplerVsPixel\n factor = 1.0 # this should be zero for zero Doppler SLC.\n coeffs = [factor * 2*np.pi*val/prf/prf for val in doppler]\n\n dpoly = Poly2D()\n dpoly.initPoly(rangeOrder=len(coeffs)-1, azimuthOrder=0, coeffs=[coeffs])\n\n rObj = stdproc.createResamp_slc()\n rObj.slantRangePixelSpacing = secondaryFrame.getInstrument().getRangePixelSize()\n #rObj.radarWavelength = secondaryFrame.getInstrument().getRadarWavelength()\n rObj.radarWavelength = radarWavelength\n rObj.dopplerPoly = dpoly \n\n # for now let's start with None polynomial. Later this should change to\n # the misregistration polynomial\n rObj.azimuthOffsetsPoly = azpoly\n rObj.rangeOffsetsPoly = rgpoly\n rObj.imageIn = inimg\n\n rngImg = isceobj.createImage()\n rngImg.load(rgoffname + '.xml')\n rngImg.setAccessMode('READ')\n\n aziImg = isceobj.createImage()\n aziImg.load(azoffname + '.xml')\n aziImg.setAccessMode('READ')\n\n width = rngImg.getWidth()\n length = rngImg.getLength()\n\n# Modified by V. Brancato on 10.14.2019 (if Rubbersheeting in range is turned on, flatten the interferogram during cross-correlation)\n if not self.doRubbersheetingRange:\n print('Rubber sheeting in range is turned off, flattening the interferogram during resampling')\n flatten = True\n print(flatten)\n else:\n print('Rubber sheeting in range is turned on, flattening the interferogram during interferogram formation')\n flatten=False\n print(flatten)\n# end of Modification\n \n rObj.flatten = flatten\n rObj.outputWidth = width\n rObj.outputLines = length\n rObj.residualRangeImage = rngImg\n rObj.residualAzimuthImage = aziImg\n\n if referenceFrame is not None:\n rObj.startingRange = secondaryFrame.startingRange\n rObj.referenceStartingRange = referenceFrame.startingRange\n rObj.referenceSlantRangePixelSpacing = referenceFrame.getInstrument().getRangePixelSize()\n rObj.referenceWavelength = radarWavelength\n \n # preparing the output directory for coregistered secondary slc\n #coregDir = self.insar.coregDirname\n\n os.makedirs(coregDir, exist_ok=True)\n\n # output file name of the coregistered secondary slc\n img = secondaryFrame.getImage() \n coregFilename = os.path.join(coregDir , os.path.basename(img.filename))\n\n imgOut = isceobj.createSlcImage()\n imgOut.setWidth(width)\n imgOut.filename = coregFilename\n imgOut.setAccessMode('write')\n\n rObj.resamp_slc(imageOut=imgOut)\n\n imgOut.renderHdr()\n\n return coregFilename\n\n\ndef runResampleSubbandSlc(self, misreg=False):\n '''Run method for split spectrum.\n '''\n\n if not self.doSplitSpectrum:\n print('Split spectrum not requested. Skipping...')\n return\n \n referenceFrame = self._insar.loadProduct( self._insar.referenceSlcCropProduct)\n secondaryFrame = self._insar.loadProduct( self._insar.secondarySlcCropProduct)\n\n# Modified by V. Brancato 10.14.2019\n\n if self.doRubbersheetingAzimuth:\n print('Using rubber in azimuth sheeted offsets for resampling sub-bands')\n azoffname = os.path.join( self.insar.offsetsDirname, self.insar.azimuthRubbersheetFilename)\n\n else:\n print('Using refined offsets for resampling sub-bands')\n azoffname = os.path.join( self.insar.offsetsDirname, self.insar.azimuthOffsetFilename)\n \n if self.doRubbersheetingRange:\n print('Using rubber in range sheeted offsets for resampling sub-bands')\n rgoffname = os.path.join( self.insar.offsetsDirname, self.insar.rangeRubbersheetFilename)\n else:\n print('Using refined offsets for resampling sub-bands')\n rgoffname = os.path.join( self.insar.offsetsDirname, self.insar.rangeOffsetFilename)\n# ****************** End of Modification\n \n # rgoffname = os.path.join( self.insar.offsetsDirname, self.insar.rangeOffsetFilename)\n azpoly = self.insar.loadProduct( os.path.join(self.insar.misregDirname, self.insar.misregFilename) + '_az.xml')\n rgpoly = self.insar.loadProduct( os.path.join(self.insar.misregDirname, self.insar.misregFilename) + '_rg.xml')\n\n\n imageSlc2 = os.path.join(self.insar.splitSpectrumDirname, self.insar.lowBandSlcDirname, \n os.path.basename(secondaryFrame.getImage().filename))\n\n wvlL = self.insar.lowBandRadarWavelength\n coregDir = os.path.join(self.insar.coregDirname, self.insar.lowBandSlcDirname)\n \n lowbandCoregFilename = resampleSlc(self,referenceFrame, secondaryFrame, imageSlc2, wvlL, coregDir,\n azoffname, rgoffname, azpoly=azpoly, rgpoly=rgpoly,misreg=False)\n\n imageSlc2 = os.path.join(self.insar.splitSpectrumDirname, self.insar.highBandSlcDirname,\n os.path.basename(secondaryFrame.getImage().filename))\n wvlH = self.insar.highBandRadarWavelength\n coregDir = os.path.join(self.insar.coregDirname, self.insar.highBandSlcDirname)\n\n highbandCoregFilename = resampleSlc(self,referenceFrame, secondaryFrame, imageSlc2, wvlH, coregDir, \n azoffname, rgoffname, azpoly=azpoly, rgpoly=rgpoly, misreg=False)\n\n self.insar.lowBandSlc2 = lowbandCoregFilename\n self.insar.highBandSlc2 = highbandCoregFilename\n \n","repo_name":"isce-framework/isce2","sub_path":"components/isceobj/StripmapProc/runResampleSubbandSlc.py","file_name":"runResampleSubbandSlc.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","stars":431,"dataset":"github-code","pt":"82"}
+{"seq_id":"1950601808","text":"# coding: utf-8\n\nfrom News.database import Monitor\nfrom News.database import session\n\n\ndef _record_error_log(item, error):\n m = Monitor(\n crawl_url=item[\"crawl_url\"],\n original_url=item[\"original_url\"],\n crawl_source=item[\"crawl_source\"],\n original_source=item[\"original_source\"],\n channel=item[\"channel\"],\n error=error,\n )\n try:\n session.add(m)\n session.commit()\n except Exception as e:\n session.rollback()\n","repo_name":"xiaol/NewsCrawlerPG","sub_path":"News/test/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"40138059690","text":"'''Cree una función que retorne el número de palabras\npresentes en un String que le llega cómo parámetro.\n\n(obs: considere que toda palabra válida está separada\npor un espacio de la anterior)'''\n\ndef NumeroDePalabras():\n Oracion=input('Digite una oracion: ')\n\n if Oracion.isdigit():\n while True:\n print('No se admiten numeros...')\n Oracion=input('Digite una oracion: ')\n if not Oracion.isdigit():\n break\n\n espacios=Oracion.split(' ')\n print(len(espacios))\n\n\nNumeroDePalabras()","repo_name":"JPerez1005/Python","sub_path":"C/C1/Practicas Homework/5Funciones/NumeroDePalabras.py","file_name":"NumeroDePalabras.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"34223387221","text":"FONT = (\"--X--XXX-XXX-X-X-XXX--XX-XXX-XXX--XX-XX--\"\n \"-XX----X---X-X-X-X---X-----X-X-X-X-X-X-X-\"\n \"--X---XX--X--XXX-XX--XXX--X--XXX-XXX-X-X-\"\n \"--X--X-----X---X---X-X-X-X---X-X---X-X-X-\"\n \"--X--XXX-XXX---X-XX---XX-X---XXX-XX---XX-\")\n\nfrom pprint import pprint \n\ndef divide_font(FONT):\n numbers = [FONT[num:num+40] for num in range(0,len(FONT), 41)]\n #pprint (numbers)\n num_arr = []\n for i in range(0,len(numbers[0]),4):\n a = [j[i:i+4] for j in numbers]\n a = [j for i in a for j in i]\n a = map(lambda x : 0 if x == '-' else 1, a)\n num_arr.append(a)\n #pprint(a)\n #print '\\n'\n #pprint(num_arr)\n #print len(num_arr)\n last = num_arr.pop()\n num_arr.insert(0, last)\n #num_arr[-1] + num_arr[:-1] \n pprint (num_arr)\n return num_arr\n\ndef checkio(image):\n num_img = ''\n numbers = divide_font(FONT)\n for i in range(0,len(image[0])-1,4):\n num = [j[i:i+4] for j in image]\n num = [j for i in num for j in i]\n #print num \n #print '\\n'\n for j,i in enumerate(numbers):\n #sum(k[0]!=k[1] for k in zip(num,i))\n if sum(k[0]!=k[1] for k in zip(num,i)) < 2 :\n #print sum(k[0]!=k[1] for k in zip(num,i))\n #print zip(num,i) \n #print 'bingo'\n #print j+1\n num_img += str(j)\n #print num_img\n return int(num_img)\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio([[0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0],\n [0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0]]) == 394, \"394 clear\"\n assert checkio([[0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],\n [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],\n [0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0]]) == 394, \"again 394 but with noise\"\n","repo_name":"a1ip/checkio-17","sub_path":"mono-captcha.py","file_name":"mono-captcha.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"41519780965","text":"#!/usr/bin/python\nprint('Content-type: text/html\\n')\n\n'''\n\nSynergetic Lite\n\nremovePeriodReservation.py\n\nAllows the user to \"un-reserve\" a period for all students and teachers in a class.\n\nBy Nick Patrikeos on 22DEC17\n\n'''\n\nimport cgi\nimport cgitb; cgitb.enable()\nimport sqlite3\nfrom dbFunctions import *\nimport random\n\nform = cgi.FieldStorage()\nperiodNum = form.getvalue('periodNum')\nclassID = form.getvalue('classID')\nvalues = {'classID':classID, 'periodNum':periodNum}\n\ndb = sqlite3.connect('synergetic.db')\ncursor = db.cursor()\ncursor.execute('PRAGMA foreign_keys = ON')\n\ncursor.execute('SELECT Teacher FROM Classes WHERE Class_ID = :classID', values)\nteacherID = cursor.fetchall()[0][0]\n\nprint('')\n\ncursor.execute('DELETE FROM TeacherPeriods WHERE Class = :classID AND Period_Num = :periodNum', values)\n\ncursor.execute('SELECT Student FROM Enrolments WHERE Class = :classID', values)\nstudents = [i[0] for i in cursor.fetchall()]\n\nfor student in students:\n values['studentID'] = student\n cursor.execute('DELETE FROM StudentPeriods WHERE Class = :classID AND Student = :studentID AND Period_Num = :periodNum', values)\n\n\nprint('')\ndb.commit()\ndb.close()\n","repo_name":"NicktheGreek1985/PythonCGIProjects","sub_path":"Synergetic Lite/removePeriodReservation.py","file_name":"removePeriodReservation.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"30672563012","text":"\r\nimport random\r\nimport pygame\r\nimport math\r\nfrom pygame import mixer\r\n\r\npygame.init()\r\nmixer.init()\r\n\r\n# create the screen\r\nscreen=pygame.display.set_mode((800,600))\r\n#icon and title\r\npygame.display.set_caption(\"pokecapture\")\r\nicon=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\pokeball.png\")\r\npygame.display.set_icon(icon)\r\n\r\n#background\r\nbg=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\bg1.png\")\r\n\r\n# player\r\npokeball=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\pokeball.png\")\r\nplayerImg=pokeball\r\nplayerX=374\r\nplayerY=536\r\nplayerX_change=0\r\ndef player(x,y):\r\n screen.blit(playerImg,(x,y))\r\n#sound\r\nmixer.music.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\bgsound.wav\")\r\nmixer.music.play(-1)\r\n #score\r\nscore=0\r\nfont=pygame.font.SysFont('inkfree.ttf',40)\r\ntextX=10\r\ntextY=10\r\n\r\ndef show_score():\r\n score1=font.render('SCORE:'+str(score),True, (0,0,0))\r\n screen.blit(score1,(30,30))\r\n#pokemons\r\nbullbasaur=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\bullbasaur.png\")\r\ncharmander=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\charmander.png\")\r\ndratini=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\dratini.png\")\r\neevee=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\eevee.png\")\r\njigglypuff=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\jigglypuff.png\")\r\nmeowth=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\meowth (2).png\")\r\npikachu=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\pikachu.png\")\r\npsyduck=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\psyduck.png\")\r\nsnorlax=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\snorlax.png\")\r\nsquirtle=pygame.image.load(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\squirtle.png\")\r\npoke=[bullbasaur,charmander,dratini,eevee,jigglypuff,meowth,pikachu,psyduck,snorlax,squirtle]\r\n\r\npokeImg=[meowth,pikachu]\r\npokeX=[]\r\npokeY=[]\r\npokeY_change=[1,1]\r\nfor i in range(8):\r\n n=random.randint(0,9)\r\n poke1=poke[n]\r\n pokeImg.append(poke1)\r\n pokeX.append(random.randint(0,768))\r\n pokeY.append(random.randint(-80,400))\r\n \r\n pokeY_change.append(1)\r\nfor i in range (2):\r\n pokeX.append(random.randint(0,768))\r\n pokeY.append(random.randint(-80,400))\r\ndef pokemon(x,y,i,l):\r\n screen.blit(l[i],(x,y))\r\n#collision\r\ndef collision(x,y,playerX,playerY):\r\n dist=math.sqrt((math.pow(x-playerX,2))+(math.pow(y-playerY,2)))\r\n if dist<=27:\r\n return True\r\n\r\n#game over\r\nover_font=pygame.font.SysFont('inkfree.ttf',60)\r\ndef gameover():\r\n overtext=over_font.render(\"GAME OVER\",True,(0,0,0))\r\n screen.blit(overtext,(190,300))\r\n screen.blit(pokeball,(130,400))\r\n#game loop\r\n\r\nrunning=True\r\nwhile running:\r\n \r\n screen.fill((0,0,0))\r\n screen.blit(bg,(0,0))\r\n for event in pygame.event.get(): \r\n if event.type==pygame.QUIT:\r\n running=False\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_LEFT:\r\n playerX_change=-3\r\n if event.key==pygame.K_RIGHT:\r\n playerX_change=3\r\n if event.type==pygame.KEYUP:\r\n playerX_change=0\r\n \r\n playerX+=playerX_change\r\n if playerX<=0:\r\n playerX=0\r\n elif playerX>=736:\r\n playerX=736\r\n player(playerX,playerY)\r\n#show score\r\n \r\n for i in range(10):\r\n pokeY[i]+=pokeY_change[i]\r\n \r\n pokemon(pokeX[i],pokeY[i],i,pokeImg)\r\n col=collision(pokeX[i],pokeY[i],playerX,playerY)\r\n\r\n if pokeY[i]>=600:\r\n pokeY[i]=random.randint(-20,40)\r\n pokeX[i]=random.randint(0,768)\r\n if col:\r\n char=pokeImg[i]\r\n if char==pikachu:\r\n np=random.randint(0,9)\r\n pokeX[i]=random.randint(0,736)\r\n pokeY[i]=random.randint(0,40)\r\n pokemon(pokeX[i],pokeY[i],np,poke)\r\n cap=mixer.Sound(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\capture.wav\")\r\n cap.play()\r\n score+=5\r\n elif char==meowth:\r\n for i in range(10):\r\n screen.fill((34,34,34))\r\n gameover()\r\n \r\n running=False\r\n else:\r\n np=random.randint(0,9)\r\n pokeX[i]=random.randint(0,736)\r\n pokeY[i]=random.randint(0,40)\r\n pokemon(pokeX[i],pokeY[i],np,poke)\r\n cap=mixer.Sound(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\capture.wav\")\r\n cap.play()\r\n \r\n score+=5\r\n np=random.randint(0,9)\r\n pokeX[i]=random.randint(0,736)\r\n pokeY[i]=random.randint(0,40)\r\n pokemon(pokeX[i],pokeY[i],np,poke)\r\n cap=mixer.Sound(\"C:\\\\users\\\\khuhan rawat\\\\Desktop\\\\pokemon\\\\capture.wav\")\r\n cap.play()\r\n \r\n show_score()\r\n\r\n\r\n#Run=True\r\n\r\n \r\n\r\n pygame.display.update()\r\n","repo_name":"Mystic-miracle/Pokecapture","sub_path":"G!.py","file_name":"G!.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"2999263502","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport re\nimport urllib\nimport json\nimport base64\nimport random\nimport datetime\nimport tornado\nfrom db import mongo\nfrom db import live_mongo\nfrom db import theme_mongo\nfrom pymongo import DESCENDING, ASCENDING\nfrom bson import objectid\nfrom libs import BaseHandler\nfrom conf import config\n\nnewlist = ['4e4d610cdf714d2966000002','4fb479f75ba1c65561000027','4e4d610cdf714d2966000003','4fb47a305ba1c60ca5000223','4ef0a35c0569795756000000','4e4d610cdf714d2966000001'] #风景、视觉、动漫、城市、情感、动物\nhotlist = ['4e4d610cdf714d2966000000','4e4d610cdf714d2966000002','4fb479f75ba1c65561000027','4fb47a465ba1c65561000028','4e4d610cdf714d2966000006','4e58c2570569791a19000000'] #美女、风景、视觉、物语、男人、影视\n\n\nclass CommendHandler(BaseHandler):\n def get(self):\n weekpaper = None #recommend paper at week\n weeklive = None # recommend live at week\n adimage = None\n\n #newest paper\n nlist = []\n for i in newlist:\n cid = objectid.ObjectId(i)\n img = mongo.image.find({'cid':cid},limit=1).sort('atime', DESCENDING)\n if img.count()>0:\n nlist.append(img[0])\n\n #hot paper\n try:\n _imglist, length = self.hot_image_cache.find_list(config.Cache.hot_image_cache, 0, 5)\n if _imglist:\n imglist = [json.loads(i) for i in _imglist]\n else:\n imglist = mongo.image.find(limit=6,skip=0).sort('rank',DESCENDING)\n except:\n imglist = mongo.image.find(limit=6,skip=0).sort('rank',DESCENDING)\n\n self.render(\"commend.html\",\n context=self.context,\n news=nlist,\n hots=imglist,\n )\n\n\nclass NewCommendHandler(BaseHandler):\n def get(self):\n self.render(\"newcommend.html\",\n context=self.context,\n )\n\nclass MoreNewPaperHandler(BaseHandler):\n def get(self):\n limit = 18\n skip = self.get_argument(\"skip\",default=0)\n try:\n skip = int(skip)\n except:\n skip = 0\n\n imglist = mongo.image.find(limit=limit,skip=skip).sort('atime',DESCENDING)\n images = []\n index = skip\n for i in imglist:\n if not self.session.hd:\n netimg = str(i['thumb_fobj'])\n elif self.session.net=='pc':\n netimg = str(i['fobjs']['640x480'])\n else: # self.session.net=='wifi':\n netimg = str(i['fobjs']['160x120'])\n\n images.append({\n 'id':str(i['_id']),\n 'image':netimg,\n 'skip':index\n })\n index += 1\n\n self._buffer = json.dumps({'code':0,'resp':images})\n callback = self.get_argument('jsoncallback', default=None)\n if callback:\n self._buffer = \"%s(%s)\" % (callback,self._buffer)\n self.write(self._buffer)\n\nclass MoreNewLiveHandler(BaseHandler):\n def get(self):\n limit = 9 \n skip = self.get_argument(\"skip\",default=0)\n try:\n skip = int(skip)\n except:\n skip = 0\n\n apklist = live_mongo.apk.find(skip=skip,limit=limit).sort('atime',DESCENDING)\n lives = []\n index = skip\n for i in apklist:\n lives.append({\n 'id':str(i['_id']),\n 'thumbid':str(i['thumbid'][0]),\n 'skip':index\n })\n index += 1\n\n self._buffer = json.dumps({'code':0,'resp':lives})\n callback = self.get_argument('jsoncallback', default=None)\n if callback:\n self._buffer = \"%s(%s)\" % (callback,self._buffer)\n self.write(self._buffer)\n\nclass NewPaperDetailHandler(BaseHandler):\n def get(self):\n imgid = self.get_argument(\"imgid\",default=None)\n _skip = self.get_argument(\"skip\",default=0)\n ctype = self.get_argument(\"type\",default=\"date\")\n showmsg = self.session.show_msg\n self.session.show_msg = None\n\n try:\n skip=int(_skip)\n if skip < 0:\n skip = 0\n _skip = None\n except:\n skip=0\n\n img=None\n read_from_cache = False\n\n try:\n if _skip:\n if ctype=='date':\n img = mongo.image.find(skip=skip,limit=1).sort('atime',DESCENDING)[0]\n else:\n try:\n img = self.hot_image_cache.find_one(config.Cache.hot_image_cache, skip)\n if img:\n read_from_cache = True\n img = json.loads(img)\n else:\n img = mongo.image.find(skip=skip,limit=1).sort('rank',DESCENDING)[0]\n except:\n img = mongo.image.find(skip=skip,limit=1).sort('rank',DESCENDING)[0]\n\n else:\n iid = objectid.ObjectId(imgid)\n img = mongo.image.find_one({'_id':iid})\n if not img:\n raise\n except:\n raise\n return self.notfound()\n\n front = skip - 1\n end = skip + 1\n\n if not read_from_cache:\n if end>=mongo.image.count():\n end = -1\n else:\n if not self.hot_image_cache.find_one(config.Cache.hot_image_cache, end):\n end = -1\n\n if _skip == None:\n front = -1\n end = -1\n\n referer = urllib.quote(self.request.uri)\n isfav=-1\n if self.session.uid:\n pri=mongo.private.find_one({'uid':self.session.uid,'imgid': img['_id']})\n if pri:\n isfav=1\n else:\n isfav=0\n\n tags = mongo.img2tag.find({'imgid': objectid.ObjectId(img['_id'])}).sort('num', DESCENDING)\n tags = [i for i in tags]\n self.render(\"compaper_detail.html\",\n context=self.context,\n image=img,\n front=front,\n end=end,\n isfav=isfav,\n referer=referer,\n tags=tags,\n message=showmsg,\n type=ctype,\n )\n\nclass NewLiveDetailHandler(BaseHandler):\n def get(self):\n apkid = self.get_argument(\"apkid\",default=None)\n skip = self.get_argument(\"skip\",default=0)\n ctype = self.get_argument(\"type\", default=\"date\")\n\n try:\n skip=int(skip)\n except:\n skip=0\n\n apk=None\n try:\n if apkid==None:\n if ctype=='date':\n apks=live_mongo.apk.find(skip=skip,limit=1).sort('atime',DESCENDING)\n else:\n apks=live_mongo.apk.find(skip=skip,limit=1).sort('rank',DESCENDING)\n\n try:\n apk=apks[0]\n pid=apk['_id']\n except:\n raise\n else:\n pid = objectid.ObjectId(apkid)\n apk = live_mongo.apk.find_one({'_id':pid})\n if not apk:\n raise\n except:\n return self.notfound()\n\n #cal mark\n marks = live_mongo.mark2apk.find({'apkid':apk['_id']})\n msum = 0.0\n mcount = 0\n for m in marks:\n msum += m['mark']\n mcount += 1\n score = 0\n if mcount>0:\n score = round(msum/mcount)\n score = int(score)\n\n\n front = skip-1\n end = skip+1\n if end>=live_mongo.apk.count():\n end = -1\n\n referer = urllib.quote(self.request.uri)\n isfav=-1\n if self.session.uid:\n pri=live_mongo.private.find_one({'uid':self.session.uid,'apkid':pid})\n if pri:\n isfav=1\n else:\n isfav=0\n\n self.render(\"comlive_detail.html\",\n context=self.context,\n apk=apk,\n front=front,\n end=end,\n favstate=isfav,\n referer=referer,\n score=score,\n amount=mcount,\n type=ctype,\n )\n\nclass HotCommendHandler(BaseHandler):\n def get(self):\n self.render(\"hotcommend.html\",\n context=self.context,\n )\n\nclass MoreHotPaperHandler(BaseHandler):\n def get(self):\n limit = 18\n skip = self.get_argument(\"skip\",default=0)\n try:\n skip = int(skip)\n except:\n skip = 0\n\n try:\n _imglist, length = self.hot_image_cache.find_list(config.Cache.hot_image_cache, skip, limit-1)\n if _imglist:\n imglist = [json.loads(i) for i in _imglist]\n else:\n imglist = mongo.image.find(limit=limit,skip=skip).sort('rank',DESCENDING)\n except:\n imglist = mongo.image.find(limit=limit,skip=skip).sort('rank',DESCENDING)\n\n\n images = []\n index = skip\n for i in imglist:\n if not self.session.hd:\n netimg = str(i['thumb_fobj'])\n elif self.session.net=='pc':\n netimg = str(i['fobjs']['640x480'])\n else: # self.session.net=='wifi':\n netimg = str(i['fobjs']['160x120'])\n\n images.append({\n 'id':str(i['_id']),\n 'image':netimg,\n 'skip':index\n })\n index += 1\n\n self._buffer = json.dumps({'code':0,'resp':images})\n callback = self.get_argument('jsoncallback', default=None)\n if callback:\n self._buffer = \"%s(%s)\" % (callback,self._buffer)\n self.write(self._buffer)\n\nclass MoreHotLiveHandler(BaseHandler):\n def get(self):\n limit = 9\n skip = self.get_argument(\"skip\",default=0)\n try:\n skip = int(skip)\n except:\n skip = 0\n\n apklist = live_mongo.apk.find(skip=skip,limit=limit).sort('rank',DESCENDING)\n lives = []\n index = skip\n for i in apklist:\n lives.append({\n 'id':str(i['_id']),\n 'thumbid':str(i['thumbid'][0]),\n 'skip':index\n })\n index += 1\n\n self._buffer = json.dumps({'code':0,'resp':lives})\n callback = self.get_argument('jsoncallback', default=None)\n if callback:\n self._buffer = \"%s(%s)\" % (callback,self._buffer)\n self.write(self._buffer)\n\n","repo_name":"zytjm/tornado-mongo-based-webserver","sub_path":"mobile_server/app/commend.py","file_name":"commend.py","file_ext":"py","file_size_in_byte":10616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"28785151500","text":"import copy\nfrom typing import Optional, Union\nimport unittest\nfrom google.protobuf import descriptor\nfrom google.protobuf import message\nfrom google.protobuf import text_format\n\n\ndef _clear_field(proto: message.Message, field_path: str) -> None:\n \"\"\"Clears field_path in proto.\n\n field_path contains field names separated by '.' into the proto, e.g.,\n my_sub_message.my_repeated_field.my_field.\n A field is removed by calling ClearField.\n\n Args:\n proto: A proto message to be modified.\n field_path: The path to the field to be cleared.\n \"\"\"\n\n next_field_name, _, path_suffix = field_path.partition(\".\")\n if next_field_name not in proto.DESCRIPTOR.fields_by_name:\n raise ValueError(\n f\"Field {next_field_name} in field path {field_path} does not refer to\"\n f\" a known field for message {proto.DESCRIPTOR.full_name}.\"\n )\n\n # root case, field_path was just a field\n if not path_suffix:\n proto.ClearField(next_field_name)\n return\n\n # next_field can refer to:\n # - a submessage (or oneof of submessages)\n # - a repeated field of messages\n next_field: descriptor.FieldDescriptor = proto.DESCRIPTOR.fields_by_name[\n next_field_name\n ]\n if next_field.type != descriptor.FieldDescriptor.TYPE_MESSAGE:\n raise ValueError(\n f\"Field {next_field_name} in field path {field_path} does not refer to\"\n f\" a message field for message {proto.DESCRIPTOR.full_name}.\"\n )\n\n if next_field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n sub_field_list = getattr(proto, next_field_name)\n for sub_message in sub_field_list:\n _clear_field(sub_message, path_suffix)\n return\n\n if not proto.HasField(next_field_name):\n return\n sub_message = getattr(proto, next_field_name)\n _clear_field(sub_message, path_suffix)\n\n\ndef _sort_repeated_fields(proto: message.Message, deduplicate: bool) -> None:\n \"\"\"Sorts all repeated fields including in submessages.\n\n This is typically called to have a canonical order of repeated fields in the\n message for comparison. Thus no particular order is guaranteed, but only that\n the order is deterministic for multiple calls on equal messages.\n\n Args:\n proto: A proto message to be modified.\n deduplicate: Determines if duplicate elements in repeated fields should be\n removed.\n \"\"\"\n\n # recurse first, then sort\n field: descriptor.FieldDescriptor\n for field in proto.DESCRIPTOR.fields:\n if field.type != descriptor.FieldDescriptor.TYPE_MESSAGE:\n continue\n # At this point field can be\n # - just a single message\n # - a repeated field (list) of messages\n # - a map to a scalar value\n # - a map to message values\n if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n sub_field_list = getattr(proto, field.name)\n\n if (\n field.type == descriptor.FieldDescriptor.TYPE_MESSAGE\n and field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n and field.message_type.fields_by_name[\"value\"].type\n != descriptor.FieldDescriptor.TYPE_MESSAGE\n ):\n # this is a map to build in types (not to message) - nothing to recurse\n continue\n\n if (\n field.type == descriptor.FieldDescriptor.TYPE_MESSAGE\n and field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n ):\n # this is a map to messages\n for _, sub_message in sub_field_list.items():\n _sort_repeated_fields(sub_message, deduplicate)\n else:\n # this is just a repeated field of messages\n for sub_message in sub_field_list:\n _sort_repeated_fields(sub_message, deduplicate)\n elif proto.HasField(field.name):\n # a single message field\n sub_message = getattr(proto, field.name)\n _sort_repeated_fields(sub_message, deduplicate)\n\n # now, sort each field, where sub-fields are already sorted (and thus\n # canonical)\n for field in proto.DESCRIPTOR.fields:\n if field.label != descriptor.FieldDescriptor.LABEL_REPEATED:\n continue\n\n if (\n field.type == descriptor.FieldDescriptor.TYPE_MESSAGE\n and field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n ):\n continue # do not sort maps\n\n sub_field_list = getattr(proto, field.name)\n if not sub_field_list:\n continue\n\n if field.type == descriptor.FieldDescriptor.TYPE_MESSAGE:\n key_fn = text_format.MessageToString\n else:\n key_fn = lambda x: x\n sub_field_list.sort(key=key_fn)\n sub_field_list_no_duplicates = []\n prev = None\n for sub_msg in sub_field_list:\n if not deduplicate or (prev is None or key_fn(prev) != key_fn(sub_msg)):\n sub_field_list_no_duplicates.append(sub_msg)\n prev = sub_msg\n del sub_field_list[:]\n sub_field_list.extend(sub_field_list_no_duplicates)\n\n\ndef _floats_in_tolerance(value_a: float, value_b: float, rtol: float) -> bool:\n return abs(value_a - value_b) <= rtol * max(abs(value_a), abs(value_b))\n\n\ndef _equalize_floats_in_tolerance(\n proto_a: message.Message, proto_b: message.Message, rtol: float\n) -> None:\n \"\"\"Replaces all floats in proto_a with floats from proto_b, if both are in rtol.\n\n All equivalent floating point values (floats and doubles) in proto_a will be\n replaced by the exact values from proto_b, such that there will be no more\n difference between these two messages regarding floats within rtol. This is\n typically called to facilitate a readable diff including non-float fields.\n\n Args:\n proto_a: A proto message to be modified.\n proto_b: A given proto message.\n rtol: A relative tolerance defining if the floats are considered equivalent.\n rtol is considered as a proportion of the float with the larger magnitude.\n \"\"\"\n if proto_a.DESCRIPTOR != proto_b.DESCRIPTOR:\n return\n\n # Relevant fields to be handled by this function.\n # Directly:\n # - floats (float and double)\n # - repeated floats\n # - map to float\n # By recursion:\n # - message fields\n # - repeated messages\n # - map to messages\n proto_a_field_names = set(fd.name for fd, _ in proto_a.ListFields())\n proto_b_field_names = set(fd.name for fd, _ in proto_b.ListFields())\n for field_name in proto_a_field_names.intersection(proto_b_field_names):\n field: descriptor.FieldDescriptor = proto_a.DESCRIPTOR.fields_by_name[\n field_name\n ]\n\n value_a = getattr(proto_a, field.name)\n value_b = getattr(proto_b, field.name)\n\n if (\n field.type == descriptor.FieldDescriptor.TYPE_FLOAT\n or field.type == descriptor.FieldDescriptor.TYPE_DOUBLE\n ):\n if field.label != descriptor.FieldDescriptor.LABEL_REPEATED:\n # field is just a float\n if _floats_in_tolerance(value_a, value_b, rtol):\n setattr(proto_a, field.name, value_b)\n else:\n # field is a list of floats\n for index in range(min(len(value_a), len(value_b))):\n if _floats_in_tolerance(value_a[index], value_b[index], rtol):\n value_a[index] = value_b[index]\n\n if field.type != descriptor.FieldDescriptor.TYPE_MESSAGE:\n continue\n\n if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n if (\n field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n ):\n value_type = field.message_type.fields_by_name[\"value\"]\n # field is a map\n for key, mapped_value_a in value_a.items():\n mapped_value_b = value_b.get(key)\n if mapped_value_b is None:\n continue\n if (\n value_type.type == descriptor.FieldDescriptor.TYPE_FLOAT\n or value_type.type == descriptor.FieldDescriptor.TYPE_DOUBLE\n ):\n # field is a map to floats\n if _floats_in_tolerance(mapped_value_a, mapped_value_b, rtol):\n value_a[key] = mapped_value_b\n elif value_type.type == descriptor.FieldDescriptor.TYPE_MESSAGE:\n # field is a map to messages - recurse\n _equalize_floats_in_tolerance(\n mapped_value_a, mapped_value_b, rtol=rtol\n )\n else:\n # field is a list of messages - recuse\n for sub_message_a, sub_message_b in zip(value_a, value_b):\n _equalize_floats_in_tolerance(sub_message_a, sub_message_b, rtol=rtol)\n else:\n # field is just a single message - recurse\n _equalize_floats_in_tolerance(value_a, value_b, rtol=rtol)\n\n\n# pylint:disable-next=invalid-name\ndef assertProto2Equal(\n testobj: unittest.case.TestCase,\n proto_a: Union[message.Message, str, bytes],\n proto_b: message.Message,\n *,\n ignored_fields: Optional[list[str]] = None,\n rtol: Optional[float] = None,\n) -> None:\n \"\"\"Asserts that two protos are equal.\n\n Args:\n testobj: The test case that called this comparison.\n proto_a: A proto to compare.\n proto_b: A proto to compare to.\n ignored_fields: List of field paths into the proto to be ignored during\n comparison.\n rtol: Relative tolerance to compare floating point values. If not set,\n floats are compared using string comparison.\n \"\"\"\n\n if isinstance(proto_a, str | bytes):\n proto_a = text_format.Parse(proto_a, proto_b.__class__())\n\n copied = False\n if ignored_fields is not None:\n proto_a = copy.deepcopy(proto_a)\n proto_b = copy.deepcopy(proto_b)\n copied = True\n for field_path in ignored_fields:\n _clear_field(proto_a, field_path)\n _clear_field(proto_b, field_path)\n\n if rtol is not None:\n if not copied:\n proto_a = copy.deepcopy(proto_a)\n proto_b = copy.deepcopy(proto_b)\n _equalize_floats_in_tolerance(proto_a, proto_b, rtol)\n\n txt_a = text_format.MessageToString(proto_a)\n txt_b = text_format.MessageToString(proto_b)\n testobj.assertMultiLineEqual(txt_a, txt_b)\n\n\n# pylint:disable-next=invalid-name\ndef assertProto2Contains(\n testobj: unittest.case.TestCase,\n proto_needle: Union[message.Message, str, bytes],\n proto_haystack: message.Message,\n *,\n ignored_fields: Optional[list[str]] = None,\n) -> None:\n \"\"\"Asserts that fields from proto_needle are set the same in proto_haystack.\n\n Args:\n testobj: The test case that called this comparison.\n proto_needle: A proto to compare with proto_haystack.\n proto_haystack: A proto that contains all fields in proto_needle and others.\n ignored_fields: List of field paths into the proto to be ignored during\n comparison.\n \"\"\"\n if isinstance(proto_needle, str | bytes):\n proto_needle = text_format.Parse(proto_needle, proto_haystack.__class__())\n else:\n proto_needle = copy.deepcopy(proto_needle)\n proto_haystack = copy.deepcopy(proto_haystack)\n if ignored_fields is not None:\n for field_path in ignored_fields:\n _clear_field(proto_needle, field_path)\n _clear_field(proto_haystack, field_path)\n\n proto_needle_full = copy.deepcopy(proto_haystack)\n proto_needle_full.MergeFrom(proto_needle)\n\n _sort_repeated_fields(proto_needle_full, deduplicate=True)\n _sort_repeated_fields(proto_haystack, deduplicate=True)\n\n txt_needle = text_format.MessageToString(proto_needle_full)\n txt_haystack = text_format.MessageToString(proto_haystack)\n testobj.assertMultiLineEqual(txt_needle, txt_haystack)\n\n\n# pylint:disable-next=invalid-name\ndef assertProto2SameElements(\n testobj: unittest.case.TestCase,\n proto_a: Union[message.Message, str, bytes],\n proto_b: message.Message,\n *,\n ignored_fields: Optional[list[str]] = None,\n keep_duplicate_values: Optional[bool] = None,\n) -> None:\n \"\"\"Asserts that fields from proto_a and proto_b are the same.\n\n For repeated fields, both messages must have the same items, but count or\n order does not matter.\n The semantics are similar to, e.g., absltest.assertSameElements.\n This method does not care about any duplicates unless keep_duplicate_values\n is set to true.\n\n Args:\n testobj: The test case that called this comparison.\n proto_a: A proto to compare with proto_b.\n proto_b: The proto to compare to.\n ignored_fields: List of field paths into the proto to be ignored during\n comparison.\n keep_duplicate_values: Keep duplicate values before comparing. If not set or\n set to false, duplicate values will be considered one value. This makes it\n possible to compare similar to set semantics.\n \"\"\"\n if isinstance(proto_a, str | bytes):\n proto_a = text_format.Parse(proto_a, proto_b.__class__())\n\n proto_a = copy.deepcopy(proto_a)\n proto_b = copy.deepcopy(proto_b)\n if ignored_fields is not None:\n for field_path in ignored_fields:\n _clear_field(proto_a, field_path)\n _clear_field(proto_b, field_path)\n\n deduplicate = True\n if keep_duplicate_values is not None and keep_duplicate_values:\n deduplicate = False\n\n _sort_repeated_fields(proto_a, deduplicate)\n _sort_repeated_fields(proto_b, deduplicate)\n\n txt_a = text_format.MessageToString(proto_a)\n txt_b = text_format.MessageToString(proto_b)\n testobj.assertMultiLineEqual(txt_a, txt_b)\n","repo_name":"intrinsic-dev/intrinsic_sdks","sub_path":"intrinsic/solutions/testing/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":12983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"3655117611","text":"import os\nimport glob\nimport subprocess\n\n\npath = os.path.join(os.getcwd(),'sudokus')\n# subprocess.Popen('\"C:\\\\Program Files (x86)\\\\CodeBlocks\\\\MinGW\\\\bin\"\\\\gcc.exe sudoku.c -o sudoku.exe',shell=True)\n\nfile_number = 1\nfor filename in glob.glob(os.path.join(path, '*.dat')):\n\tf = open(filename, 'r')\n\ti = 0\n\tsudoku_str = \"{\"\n\tsudoku_str2 =\"\"\n\tfor line in f:\n\t\tif (i>1):\n\t\t\tsudoku_str+= \"{\"\n\t\t\tline = line.split()\t\t\t\t\t\t\n\t\t\t# print(line)\n\t\t\taux = 0\n\t\t\tfor number in line:\n\t\t\t\tsudoku_str += str(number)\n\t\t\t\tsudoku_str2 += str(number)\n\t\t\t\tif(aux < 8):\n\t\t\t\t\tsudoku_str+=\",\"\n\t\t\t\taux = aux + 1\n\t\t\tsudoku_str += \"}\"\n\t\t\tif(i<10):\n\t\t\t\tsudoku_str+=\",\"\n\t\ti+=1\n\tsudoku_str += \"}\"\n\tprint(sudoku_str2)\n\tsubprocess.Popen('sudoku.exe '+sudoku_str2+'>> output\\\\output_'+str(file_number)+'.txt',shell=True)\n\t\n\tprint('\\n')\n\tf.close()\n\tfile_number += 1\n","repo_name":"GuilhermeBorges/Sudoku","sub_path":"executa.py","file_name":"executa.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"9878330565","text":"exp = []\nx = str(input('Digite uma expressão: ')).strip()\nfor c in x:\n if c == '(':\n exp.append('(')\n elif c == ')':\n if len(exp) > 0:\n exp.pop()\n else:\n exp.append(')')\n print(len(exp))\n break\nif len(exp) == 0:\n print('Expressão válida!')\nelse:\n print('Expressão invalida!')","repo_name":"Raphael-Azevedo/Exercicios_Python","sub_path":"Exercicios em Python/ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"74764755149","text":"import sqlite3\nfrom googletrans import Translator\n\n\ndef get_top_results(update, context):\n conn = sqlite3.connect(\"data.db\")\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT username, full_name, results FROM user_info ORDER BY results DESC LIMIT 5\")\n\n results = cursor.fetchall()\n\n conn.close()\n\n if not results:\n update.message.reply_text(text=\"Unfortunately, no one has run the test yet and has not shown any results😔Maybe you will be the first, click /quiz and test yourself🥹\", parse_mode=\"html\")\n else:\n message = \"\\n\".join([\n f\"{i + 1} {user[1]}'s result is {user[2]}\"\n for i, user in enumerate(results)\n ])\n update.message.reply_text(message, parse_mode=\"html\")\n\n\ndef get_my_level(update,context):\n conn=sqlite3.connect(\"data.db\")\n c=conn.cursor()\n c.execute(f\"WITH SortedUsers AS (SELECT username, full_name, user_id, results,ROW_NUMBER() OVER (ORDER BY results DESC) AS position FROM user_info) SELECT position, username, full_name, results FROM SortedUsers WHERE user_id = {update.message.from_user.id}\")\n\n results=c.fetchone()\n conn.close()\n if results is None:\n update.message.reply_text(\"But you haven't done the quiz yet. That's why you don't have any points. Please click the /quiz command first and collect points by starting the quiz😉\",parse_mode=\"html\")\n else:\n l=[i for i in results]\n update.message.reply_text(\n f\"Your level are {l[0]}🏆Dear {l[2]} your total score {l[3]} .Never stop🚫\",parse_mode=\"html\"\n )\ndef detect_language(text):\n translator = Translator()\n detected = translator.detect(text)\n return detected.lang\n\ndef lang_trans(update,context):\n\n if detect_language(update.message.text)=='en':\n translator = Translator()\n text=update.message.text\n # Translate text from one language to another\n result = translator.translate(f\"{text}\", src=\"en\", dest=\"uz\")\n\n # Access the translated text\n translated_text = result.text\n update.message.reply_text(text=translated_text)\n if detect_language(update.message.text)=='uz':\n translator = Translator()\n text = update.message.text\n # Translate text from one language to another\n result = translator.translate(f\"{text}\", src=\"uz\", dest=\"en\")\n\n # Access the translated text\n translated_text = result.text\n update.message.reply_text(text=translated_text)\n\n\n","repo_name":"umidyor/Quiz_bot_eng","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"73760584909","text":"import speedtest\nfrom threading import Thread\nimport time\nfrom .db.speed_table import SpeedTable\nfrom datetime import datetime\n\nclass GetSpeedData(Thread):\n def __init__(self, log = False):\n self.log = log\n\n def run(self):\n while True:\n self.getData()\n time.sleep(60)\n\n def convertToMb(self, speed):\n return \"{:.2f}\".format(speed/1048576) # Bytes to MBytes\n\n def getData(self):\n st = speedtest.Speedtest()\n downloadSpeed = float(st.download())\n uploadSpeed = float(st.upload())\n timestamp = int(time.time())\n\n downloadSpeed = self.convertToMb(downloadSpeed)\n uploadSpeed = self.convertToMb(uploadSpeed)\n SpeedTable.insert(downloadSpeed, uploadSpeed, timestamp)\n\n if(self.log):\n dt_object = datetime.fromtimestamp(timestamp)\n print('{} --- Download speed: {} Mb/s --- Upload speed: {} Mb/s'.format(dt_object, downloadSpeed, uploadSpeed))","repo_name":"Joselsneto/Internet-Speed-Tracker","sub_path":"src/get_speed_data.py","file_name":"get_speed_data.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"8625183216","text":"import cv2\nimport os\n\n\nclass Video:\n def __init__(self, path, reheight, rewidht):\n self.pathVideo = path\n self.capture = cv2.VideoCapture(path)\n self.resultsPath = \"results\"\n if not os.path.exists(self.resultsPath):\n os.makedirs(self.resultsPath)\n self.num_frames = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT))\n self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.fps = int(self.capture.get(cv2.CAP_PROP_FPS))\n self.rewidth = rewidht\n self.reheight = reheight\n\n\nif __name__ == '__main__':\n video = Video(\"videos/video_test.mp4\")\n","repo_name":"mcv-m6-video/mcv-m6-2021-team6","sub_path":"W4/Video.py","file_name":"Video.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"11006096923","text":"from django.shortcuts import render\n\n# Create your views here.\n\nimport highlighter.backend as backend\nfrom .models import SummaryEntry, LabelType\nfrom .form import SummaryForm\n\n\ndef highlighter_view(r, *args, **kwargs):\n\t\"\"\"\n\tmain discharge summary labeller view\n\t\"\"\"\n\n\t#vars: cleaned_data, labels\n\tprocessed_text = \"(Enter summary to see labels.)\"\n\tdefinition_html = \"(Enter summary to see definitions.)\"\n\tform = SummaryForm()\n\tif r.method == \"POST\":\n\t\tform = SummaryForm(r.POST)\n\t\tif form.is_valid():\n\t\t\tcleaned_data = form.cleaned_data\n\t\t\tlabels = cleaned_data.pop('labels')\n\t\t\ts = SummaryEntry.objects.create(**cleaned_data)\n\t\t\ts.labels.set(labels)\n\n\t\t\t# if using ML model, use backend.get_summary() function instead.\n\t\t\tprocessed_text, definition_html = backend.get_summary_scispacy(cleaned_data, labels)\n\t\t\ts.processed = processed_text\n\t\t\ts.save() #this step is key!!! :) saves it!\n\n\t\telse:\n\t\t\tprint (\"Post:\", r.POST, form.is_valid())\n\t\t\tform = SummaryForm()\n\t\t\tprint(\"Errors in form:\", form.errors)\n\telse:\n\t\tprint(\"Not a POST method.\")\n\n\tcontext={\n\t\t'form':form,\n\t\t'processed_text': processed_text,\n\t\t'definitions': definition_html,\n\t}\n\treturn render(r, 'highlighter_temp.html', context)\n\t# this is still relative to templates directory!!\n","repo_name":"gloriafang123/mitmlhc2020-public-discharge-labeller","sub_path":"mysite/highlighter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"32605575490","text":"\"\"\"SpaceTravels URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('registration/', include('tourists.urls')), # Showing Django where he should searching for urlpatterns \n path('', include('SpaceTravels.views')), # Pointing into urlpatterns in views.py file in main folder\n path('api-tourists/', include('tourists.api.urls')), # Pointing on our api urls in tourists app\n path('api-flights/', include('flights.api.urls')), # Pointing on our api urls in flights app\n]\n","repo_name":"lukaszkania/SpaceTravelsBackEnd","sub_path":"SpaceTravels/SpaceTravels/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"74656155468","text":"from numba import cuda\n\n\n@cuda.jit\ndef mutate(population, random_values, size_individual, mutation_rate):\n index = cuda.grid(1)\n if index < population.shape[0]:\n individual_mutate(population[index], random_values[index], size_individual, mutation_rate)\n\n\n@cuda.jit(device=True)\ndef individual_mutate(individual, random_values, size_individual, mutation_rate):\n for position_1 in range(size_individual):\n if random_values[0] < mutation_rate:\n position_2 = round(random_values[1] * (size_individual - 1))\n if not position_1 == position_2:\n swap_value = individual[position_1]\n individual[position_1] = individual[position_2]\n individual[position_2] = swap_value\n","repo_name":"TimLC/Genetic_Algorithm_GPU-CPU","sub_path":"optimized_genetic_algorithm/genetic/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"807951755","text":"import pd\n\n\ndef py2pd(value):\n \"\"\"Convert a Python data type to a PureData type\"\"\"\n return value\n\ndef pd2py(value):\n \"\"\"Convert a PureData data type to a Python type\"\"\"\n return value\n\n\ndef pdlist2pylist(value):\n \"\"\"Convert a PureData list to a Python list\"\"\"\n # value is one list, make it a string\n try:\n s = ''\n for i in range(len(value)):\n s = s + str(value[i]) + \" \" \n s = s.replace(\" \", \",\")\n s = \"[\" + s + \"]\"\n lst = eval(s)\n return lst[0]\n except:\n pd.error(\"There is syntax error in the list\")\n return None\n\n\n\n\n\n\n\n\n\n\n","repo_name":"charlesneimog/py4pd","sub_path":"resources/scripts/src/convertion.py","file_name":"convertion.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"82"}
+{"seq_id":"70063454348","text":"import numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport collections\nimport multiprocessing\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\n\ndef dist(x,y):\n return np.sum((x-y)**2)\n\ndef ChooseInitialMeans(data,k):\n means = []\n for _ in range(k):\n random_centroid = []\n for i in range(data.shape[1]):\n a = min(data[:, i])\n b = max(data[:, i])\n random_centroid.append( np.random.uniform(a,b) )\n means.append(random_centroid)\n #means, clusters = mykmns.kmeans_main(X, k)\n return means\n\n\ndef kmeansOnceWeights(data,weights,k,n,n_per_cluster):\n means = ChooseInitialMeans(data, k)\n\n for iter in range(50):\n #print(iter)\n\n if iter>0:\n means = []\n for k0 in ids:\n indices = [i for i, cl in enumerate(closest_cluster) if cl == k0]\n if len(indices) > 0:\n cut = np.take(data, indices, axis=0)\n means.append(np.apply_along_axis(np.mean, axis=0, arr=cut))\n\n clusters = dict(enumerate(means))\n ids = list(clusters.keys())\n diffs = []\n for id in ids:\n diffs.append(np.apply_along_axis(lambda x: dist(x, clusters[id]), axis=1, arr=data))\n\n diffs = np.asarray(diffs)\n\n clust_sizes = dict(zip(ids, np.zeros(len(ids))))\n closest_cluster = []\n for i in range(n):\n row = diffs[:, i]\n w0 = weights[i]\n inds_sorted = np.argsort(row)\n for id_opt in inds_sorted:\n if clust_sizes[id_opt] < n_per_cluster:\n closest_cluster.append(id_opt)\n clust_sizes[id_opt] += w0\n break\n\n inner_diffs = []\n for k0 in ids:\n indices = [i for i, cl in enumerate(closest_cluster) if cl == k0]\n if len(indices) > 0:\n cut = np.take(diffs, indices, axis=1)\n inner_diffs.append(np.apply_along_axis(np.mean, axis=1, arr=cut)[k0])\n\n return ids, closest_cluster, sum(inner_diffs)\n\n\n\ndef kmeans(data,weights,k,n=None,n_per_cluster=None,B=10):\n\n if n is None:\n n = data.shape[0]\n\n if n_per_cluster is None:\n n_per_cluster = int(np.ceil(sum(weights) / k))\n\n results = []\n for b in range(B):\n print(b)\n results.append(kmeansOnceWeights(data,weights, k, n, n_per_cluster))\n\n inner_diffs = [r[2] for r in results]\n opt = np.argmin(inner_diffs)\n\n counter = collections.Counter(results[opt][1])\n print(counter)\n\n return results[opt][0], results[opt][1]\n\n\ndef kmeans_parallel(data,weights,k,n=None,n_per_cluster=None,B=10):\n\n if n is None:\n n = data.shape[0]\n\n if n_per_cluster is None:\n n_per_cluster = int(np.ceil(sum(weights) / k))\n\n def processInput(b):\n print(b)\n return kmeansOnceWeights(data, weights, k, n, n_per_cluster)\n\n # inputs = [(b, data, weights, k, n, n_per_cluster) for b in range(B)]\n inputs = range(B)\n num_cores = multiprocessing.cpu_count()\n\n with Pool(num_cores-1) as p:\n results = p.map(processInput, inputs)\n\n inner_diffs = [r[2] for r in results]\n opt = np.argmin(inner_diffs)\n\n counter = collections.Counter(results[opt][1])\n print(counter)\n\n return results[opt][0], results[opt][1]\n\n\nif __name__==\"__main__\":\n home_dir = \"/media/bruno/data/chatbot_project/sent2sent\"\n\n k = 2\n data = pickle.load(open(home_dir + \"/data.pickle\", \"rb\"))\n weights = pickle.load(open(home_dir + \"/weights.pickle\", \"rb\"))\n n = data.shape[0]\n\n n_per_cluster = int(np.ceil(sum(weights) / k))\n\n B = 10\n print(data.shape)\n ids, closest_cluster = kmeans_parallel(data, weights, k)\n\n for k0 in ids:\n indices = [i for i, cl in enumerate(closest_cluster) if cl == k0]\n cut = np.take(data, indices, axis=0)\n x, y = cut[:, 0], cut[:, 1]\n v = np.random.rand(3, 1)\n plt.scatter(x, y, c=tuple(v[:, 0]))\n # print(\"cluster \" + str(cl) + \" size = \" + str(len(clusters[cl])))\n\n plt.show()","repo_name":"BOpermanis/chatbot_project","sub_path":"sent2sent/kmeans3_weighted.py","file_name":"kmeans3_weighted.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"71720743948","text":"import cv2 \nimport numpy as np\nfrom random import randint\n\n#################################### PLACEHOLDER FUNCTIONS FOR LATER ########################################\ndef recieveMsg():\n \n x = randint(3,14)*100\n y = randint(3,9)*100\n \n return x,y,1\n\ndef getLocatorPhoto():\n \n photo = cv2.imread(\"slika0.jpg\")\n \n return photo\n\ndef extractMap(photo):\n \n MAP = cv2.imread(\"mapa.png\")\n \n return MAP\n\n\n\n#################################### REAL FUNCTIONS IN USE CURRENTLY ########################################\n\n# to know which jetcar is being traced, each ID is connected to its roof-marker color\ndef getColor(ID):\n \n if ID == 1:\n return [0,255,255]\n \n \n\n# coordinates recieved are extracted from a wide-angle lens camera. They need to be adjusted accordingly (un-fisheyed) \ndef undistortCoords(x,y):\n\n # makes an image with one white px, and un-distorts it\n img = np.zeros((2464,3264,3))\n img[y-1:y+1,x-1:x+1,:] = [255,255,255]\n img = undistort(img)\n \n #finds the position of the white px\n img = img[:,:,0]\n horizontal = img.sum(axis=0)\n vertical = img.sum(axis=1)\n \n x = np.argmax(horizontal)\n y = np.argmax(vertical)\n \n return x,y\n \n \n \ndef visualizeMarker(x,y,ID):\n \n x,y = undistortCoords(x,y)\n \n color = getColor(ID)\n marker = np.zeros_like(MAP)\n marker[y-5:y+5,x-5:x+5,:] = color\n \n return marker, x,y\n\n\n\ndef undistort(img, balance=1, dim2=(816,616), dim3=(1632,1332)):\n \n K=np.array([[403.5072678987361, 0.0, 390.5537285576421], [0.0, 403.056903943273, 303.0726428457018], [0.0, 0.0, 1.0]])\n D=np.array([[-0.02877771348636789], [-0.012216466999853827], [0.020949602322686396], [-0.015176688869367766]])\n \n\n dim1 = img.shape[:2][::-1] #dim1 is the dimension of input image to un-distort\n assert dim1[0]/dim1[1] == dim2[0]/dim2[1], \"Image to undistort needs to have same aspect ratio as the ones used in calibration\"\n if not dim2:\n dim2 = dim1\n if not dim3:\n dim3 = dim1\n scaled_K = K * dim1[0] / dim2[0] # The values of K is to scale with image dimension.\n scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0\n \n # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!\n new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)\n map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)\n return cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)\n\n\n\n\n\n\n\n\nphoto = getLocatorPhoto() #FTTP\nphoto = undistort(photo)\nMAP = extractMap(photo)\n\nwhile True:\n \n x,y,ID = recieveMsg()\n marker,x,y = visualizeMarker(x,y,ID)\n \n cv2.imshow(\"Map with marker(s):\",marker+MAP)\n cv2.waitKey(1)\n print(x,y,end='\\r')\ncv2.destroyAllWindows()\n ","repo_name":"duspic/SmartCity_Model","sub_path":"L2S_communication/locator2server_server.py","file_name":"locator2server_server.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"42272491474","text":"import pygame as pg\nfrom parameters import screen, W, H\nimport player\nimport globals\n\npg.init()\n\nclock = pg.time.Clock()\n\nplayer = player.Player(50, 50, 0.8)\n\ncolours = {'bg': \"#F2EDD7\", 'ground': \"#755139\"}\n\nrun = True\n\nwhile run:\n\n clock.tick(60)\n\n screen.fill(colours['bg'])\n\n pg.draw.rect(screen, colours['ground'], pg.Rect(0, globals.GROUND_LEVEL, W, H - globals.GROUND_LEVEL))\n\n player.update()\n player.draw()\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RIGHT:\n globals.moving_right = True\n stay = False\n if event.key == pg.K_LEFT:\n globals.moving_left = True\n stay = False\n if event.key == pg.K_UP:\n globals.jumping = True\n if event.type == pg.KEYUP:\n if event.key == pg.K_RIGHT:\n globals.moving_right = False\n stay = True\n if event.key == pg.K_LEFT:\n globals.moving_left = False\n stay = True\n\n pg.display.flip()\n","repo_name":"Oksana515/Platformer_walking_n_jumping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"1363686842","text":"from outrankingDigraphs import *\nt = PerformanceTableau('zeitRanking2005')\n\ninput('Performance tableau')\nt.showHTMLPerformanceHeatmap(colorLevels=5,\\\n rankingRule=None,\\\n pageTitle='Performance Tableau \\'Zeit Ranking 2005\\'')\n\nfrom sortingDigraphs import *\nqs = QuantilesSortingDigraph(t,limitingQuantiles=7,LowerClosed=False)\ninput('7-tiles sorting')\nqs.showSorting()\ninput('7-tiles qunatile ordering')\nqs.showQuantileOrdering(strategy='average')\n\ninput('Ranking with heatmap')\nt.showHTMLPerformanceHeatmap(colorLevels=5,rankingRule='NetFlows',\n Correlations=True,pageTitle='Performance Tableau \\'Zeit Ranking 2006\\'')\n\n# absolute quantiles rating\nfrom performanceQuantiles import *\npq = PerformanceQuantiles(t,numberOfBins=9,LowerClosed=False)\nnqs = NormedQuantilesRatingDigraph(pq,t)\ninput('9-tiled rating heatmap')\nnqs.showHTMLRatingHeatmap(ndigits=0,colorLevels=5,Correlations=True,pageTitle='3-tiled rating of the universities')\n\n# best choice from preranked digraph\nfrom sparseOutrankingDigraphs import *\nprg = PreRankedOutrankingDigraph(t,5)\ninput('5-tiles preranked relation map')\nprg.showHTMLRelationMap()\ninput('Preranked Best choice recommendation')\nprg.showBestChoiceRecommendation()\n","repo_name":"rbisdorff/Digraph3","sub_path":"examples/zeit2005Demo.py","file_name":"zeit2005Demo.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"82"}
+{"seq_id":"74814861067","text":"import requests\nimport json\nfrom hackernews.celery import app\nfrom pprint import pprint\nimport os\nfrom news.models import Item, Author\nfrom django.db.models import Max\nfrom datetime import datetime\nfrom pytz import timezone\nfrom django.db import IntegrityError\n\n\nutc = timezone(\"UTC\")\n\n\ndef get_max_item():\n resp = requests.get(\"https://hacker-news.firebaseio.com/v0/maxitem.json\")\n return resp.json()\n\n\n@app.task\ndef get_history():\n max_item_id = get_max_item()\n max_item_no_db = Item.objects.aggregate(max_item_id=Max(\"item_id\"))[\"max_item_id\"]\n print(f\"Max Item ID from API = {max_item_id}\")\n print(f\"Current Max Item ID from DB = {max_item_no_db}\")\n print(f\"Catching up with {max_item_id - max_item_no_db}\")\n stories_left = 100\n while max_item_no_db < max_item_id and stories_left > 0:\n max_item_no_db += 1\n get_item.delay(max_item_no_db)\n\n\n@app.task\ndef get_latest():\n resp = requests.get(\"https://hacker-news.firebaseio.com/v0/jobstories.json\")\n ids = resp.json()\n for id in ids:\n get_item.delay(id)\n\n\n@app.task\ndef get_item(id):\n resp = requests.get(f'https://hacker-news.firebaseio.com/v0/item/{id}.json')\n item = resp.json()\n\n parent = None\n if \"parent\" in item:\n try:\n parent = Item.objects.get(item_id=item[\"parent\"])\n except Item.DoesNotExist:\n get_item(item[\"parent\"])\n\n try:\n item_db = Item.objects.get(item_id=item[\"id\"])\n if item_db.category == \"story\" and item[\"type\"] != \"story\":\n item_db.category = item[\"type\"]\n \n except Item.DoesNotExist:\n item_db = Item(\n item_id = item[\"id\"],\n category = item[\"type\"],\n created_date = utc.localize(datetime.utcfromtimestamp(item[\"time\"])) if item.get(\"time\") else None, \n )\n \n item_db.parent = parent\n item_db.text = item.get(\"text\", \"\")\n item_db.url = item.get(\"url\")\n item_db.title = item.get(\"title\", \"\")\n item_db.score = item.get(\"score\")\n \n if \"by\" in item:\n item_db.author = get_user(item[\"by\"])\n \n try:\n item_db.save()\n except IntegrityError:\n pass\n\n # kids = []\n for kid_id in item.get(\"kids\", []):\n subitem = get_item(kid_id)\n # kids.append(subitem)\n # item[\"kids\"] = kids\n return item\n\n\ndef get_user(user_id):\n resp = requests.get(f'https://hacker-news.firebaseio.com/v0/user/{user_id}.json')\n data = resp.json()\n username = data[\"id\"]\n try:\n author = Author.objects.get(username=username)\n except Author.DoesNotExist:\n author = Author(\n username = username,\n created = utc.localize(datetime.utcfromtimestamp(data[\"created\"])),\n karma = data[\"karma\"],\n no_submitted = len(data.get(\"submitted\", []))\n )\n try:\n author.save()\n except IntegrityError:\n pass\n return author\n","repo_name":"adebisit/hacker-news-app","sub_path":"news/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"20829653956","text":"class Solution:\n def brute_force(self, heights):\n \"\"\"\n TC:O(n^2) TLE\n SC:O(1)\n \"\"\"\n n=len(heights)\n max_area=0\n \n for i in range(n):\n min_height=float('inf')\n for j in range(i,n):\n min_height=min(min_height, heights[j])\n max_area=max(max_area, min_height * (j-i+1))\n return max_area\n def divide_and_conquer_helper(self,heights, start, end):\n \"\"\"\n TC:O(nlogn) TLE\n SC:O(n)\n \"\"\"\n if start>end:\n return 0\n \n min_index=start\n for i in range(start, end+1):\n if heights[min_index]>heights[i]:\n min_index=i\n \n res1=heights[min_index]*(end-start+1)\n res2=self.divide_and_conquer_helper(heights,start,min_index-1)\n res3=self.divide_and_conquer_helper(heights,min_index+1,end)\n \n max_area=max(res1, max(res2, res3))\n \n return max_area\n \n def divide_and_conquer(self, heights):\n return self.divide_and_conquer_helper(heights, start=0, end=len(heights)-1)\n def stack_helper(self, heights):\n \"\"\"\n TC: O(N)\n SC: O(N)\n \"\"\"\n n=len(heights)\n stack=list()\n max_area=0\n stack.append(-1)\n \n for i in range(n):\n while (stack[-1]!=-1 and heights[i]<=heights[stack[-1]]):\n temp_area=heights[stack.pop()] * (i-stack[-1]-1)\n max_area=max(max_area,temp_area)\n stack.append(i)\n \n while stack[-1]!=-1:\n max_area=max(max_area, heights[stack.pop()] * (n-stack[-1]-1))\n \n return max_area\n def largestRectangleArea(self, heights: List[int]) -> int:\n if not heights or len(heights)==0:\n return 0\n #return self.brute_force(heights)\n #return self.divide_and_conquer(heights)\n return self.stack_helper(heights)","repo_name":"akshatakulkarni98/ProblemSolving","sub_path":"DataStructures/stacks/histogram_heights.py","file_name":"histogram_heights.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"24221493863","text":"\n__DEBUGING__ = False\n\nif not __DEBUGING__: \n from smbus2 import SMBus\n bus = SMBus(1)\n\nimport time\nimport threading\nimport random\n\n# Open i2c bus 1 and read one byte from address 80, offset 0\n\ntime.sleep(2)\n\n\nlocal_callback = None\n\nkeys = [ '1', '2', '3', 'A', '4', '5', '6', 'B', '7', '8', '9', 'C', '*', '0', '#', 'D' ]\nstates = [False] * len(keys)\n\ndef set_callback(callback):\n global local_callback\n\n # print(\"setting callback\")\n local_callback = callback\n\ndef reset_keys():\n global states\n\n states = [False] * len(keys)\n\n\ndef get_keys():\n global states\n\n # print(\"wheres the keys\")\n return states\n\n\ndef async_key_check():\n global states\n\n while True:\n if __DEBUGING__:\n theres_a_change = check_simulation_keys()\n else:\n theres_a_change = check_keys()\n if theres_a_change:\n local_callback(states)\n \n if __DEBUGING__:\n time.sleep(1)\n else:\n time.sleep(0.1)\n\n\ndef check_keys():\n global states\n\n try:\n b = bus.read_byte_data(0x2a, 0)\n if b != 0:\n char = chr(b)\n index = keys.index(char)\n states[index] = not states[index]\n # print(char, states[index])\n return True\n else:\n return False\n except:\n sad = \"No mames Hugo\"\n return False\n \n\ndef reset_key(index):\n global states\n\n states[index] = False\n\n\ndef check_simulation_keys():\n global states\n # print(\"checking keys\")\n\n theres_a_change = random.randint(0, 1)\n\n if theres_a_change:\n index = random.randint(0, len(states) - 1)\n char = keys[index]\n states[index] = not states[index]\n # print(char, states[index])\n return True\n else:\n return False\n","repo_name":"elastra21/ffa-controller","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"20829680856","text":"# https://leetcode.com/problems/palindrome-permutation/\n# TC:O(N)\n# SC:O(N)\n\nclass Solution:\n def canPermutePalindrome(self, s: str) -> bool:\n if not s:\n return False\n \n n=len(s)\n hash_map=dict()\n count=0\n \n for ch in s:\n hash_map[ch]=hash_map.get(ch,0)+1\n \n for k,v in hash_map.items():\n count = count + (v%2)\n \n return count<=1\n \n \n \n","repo_name":"akshatakulkarni98/ProblemSolving","sub_path":"DataStructures/strings/can_permute_palindrome.py","file_name":"can_permute_palindrome.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"41263746856","text":"import argparse\nimport logging\nimport requests\nimport sys\n\nfrom crlite_query import CRLiteDB, CRLiteQuery, IntermediatesDB, parse_hosts_file\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nlog = logging.getLogger(\"query_cli\")\n\n\ncrlite_collection_prod = (\n \"https://firefox.settings.services.mozilla.com/v1/buckets/security-state\"\n + \"/collections/cert-revocations/records\"\n)\ncrlite_collection_stage = (\n \"https://settings.stage.mozaws.net/v1/buckets/security-state\"\n + \"/collections/cert-revocations/records\"\n)\nintermediates_collection_prod = (\n \"https://firefox.settings.services.mozilla.com/v1/buckets/security-state\"\n + \"/collections/intermediates/records\"\n)\n\n\ndef find_attachments_base_url(urlstring):\n url = urlparse(urlstring)\n base_rsp = requests.get(f\"{url.scheme}://{url.netloc}/v1/\")\n return base_rsp.json()[\"capabilities\"][\"attachments\"][\"base_url\"]\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Query CRLite data\",\n epilog=\"\"\"\n The --db option should point to a folder containing a single filter file of\n the form \"YYYYMMDDnn.filter\" along with a collection of files of the form\n \"YYYYMMDDnn.stash\" which contain updates from that original filter. By\n default, if this tool believes it is out-of-date based on the local\n database, it will attempt to update itself before performing its checks.\n To avoid that behavior, pass --no-update on the command line.\n \"\"\",\n )\n parser.add_argument(\n \"--hosts\",\n help=\"Hosts to check, in the form host[:port] where \"\n + \"port is assumed 443 if not provided. Can be specified multiple times.\",\n action=\"append\",\n nargs=\"+\",\n default=[],\n metavar=\"host[:port]\",\n )\n parser.add_argument(\n \"--hosts-file\",\n help=\"File of hosts to check, in the form of 'host[:port]' each line, \"\n + \"where port is assumed 443 if not provided. Can be specified multiple \"\n + \" times.\",\n action=\"append\",\n default=[],\n type=Path,\n )\n parser.add_argument(\n \"files\", help=\"PEM files to load\", type=argparse.FileType(\"r\"), nargs=\"*\"\n )\n parser.add_argument(\n \"--db\",\n type=Path,\n default=Path(\"~/.crlite_db\"),\n help=\"Path to CRLite database folder\",\n )\n parser.add_argument(\n \"--no-update\", help=\"Do not attempt to update the database\", action=\"store_true\"\n )\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--force-update\", help=\"Force an update to the database\", action=\"store_true\"\n )\n group.add_argument(\n \"--use-filter\",\n help=\"Use this specific filter file, ignoring the database\",\n type=Path,\n )\n parser.add_argument(\n \"--check-freshness\",\n help=\"Set exit code 0 if the database is more than this many hours old\",\n type=int,\n )\n parser.add_argument(\n \"--check-not-revoked\",\n help=\"Set exit code 0 if none of the supplied certificates are revoked\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--no-delete\",\n help=\"Do not attempt to delete old database files\",\n action=\"store_true\",\n )\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--crlite-url\",\n default=crlite_collection_prod,\n help=\"URL to the CRLite records at Remote Settings.\",\n )\n group.add_argument(\n \"--crlite-staging\",\n action=\"store_true\",\n help=\"Use the staging URL for CRLite\",\n )\n parser.add_argument(\n \"--intermediates-url\",\n default=intermediates_collection_prod,\n help=\"URL to the CRLite records at Remote Settings.\",\n )\n parser.add_argument(\n \"--download-intermediates\",\n action=\"store_true\",\n help=\"Download all intermediate PEM files to the database\",\n )\n parser.add_argument(\n \"--verbose\", \"-v\", help=\"Be more verbose\", action=\"count\", default=0\n )\n parser.add_argument(\n \"--structured\",\n help=\"Emit log entries intended for structured loggers\",\n action=\"store_true\",\n )\n\n args = parser.parse_args()\n\n if args.crlite_staging:\n args.crlite_url = crlite_collection_stage\n\n if args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG)\n if args.verbose > 2:\n from pyasn1 import debug\n\n debug.setLogger(debug.Debug(\"all\"))\n else:\n logging.basicConfig(level=logging.INFO)\n\n db_dir = args.db.expanduser()\n\n if not db_dir.is_dir():\n db_dir.expanduser().mkdir()\n\n last_updated_file = (db_dir / \".last_updated\").expanduser()\n if last_updated_file.exists() and not args.force_update:\n updated_file_timestamp = datetime.fromtimestamp(\n last_updated_file.stat().st_mtime\n )\n grace_time = datetime.now() - timedelta(hours=6)\n if last_updated_file.is_file() and updated_file_timestamp > grace_time:\n log.info(f\"Database was updated at {updated_file_timestamp}, skipping.\")\n log.debug(\n f\"Database was last updated {datetime.now() - updated_file_timestamp} ago.\"\n )\n args.no_update = True\n\n attachments_base_url = find_attachments_base_url(args.crlite_url)\n\n intermediates_db = IntermediatesDB(\n db_path=db_dir, download_pems=args.download_intermediates\n )\n crlite_db = CRLiteDB(db_path=args.db)\n\n try:\n if args.force_update or not args.no_update:\n if args.download_intermediates:\n log.info(\n \"Downloading all intermediate certificates. Look in \"\n + f\"{intermediates_db.intermediates_path}\"\n )\n\n intermediates_db.update(\n collection_url=args.intermediates_url,\n attachments_base_url=attachments_base_url,\n )\n crlite_db.update(\n collection_url=args.crlite_url,\n attachments_base_url=attachments_base_url,\n )\n last_updated_file.touch()\n except KeyboardInterrupt:\n log.warning(\"Interrupted.\")\n sys.exit(1)\n\n if args.use_filter:\n crlite_db.load_filter(path=args.use_filter)\n\n if not args.no_delete:\n crlite_db.cleanup()\n\n log.info(f\"Status: {intermediates_db}, {crlite_db}\")\n\n if args.check_freshness:\n freshness_limit = timedelta(hours=args.check_freshness)\n if crlite_db.age() > freshness_limit:\n log.error(\n f\"Database age is {crlite_db.age()}, which is larger than {freshness_limit}, \"\n + \"aborting!\"\n )\n sys.exit(1)\n\n query = CRLiteQuery(intermediates_db=intermediates_db, crlite_db=crlite_db)\n\n if not args.files and not args.hosts and not args.hosts_file:\n log.info(\"No PEM files or hosts specified to load. Run with --help for usage.\")\n\n to_test = list()\n\n for file in args.files:\n to_test.append((file.name, query.gen_from_pem(file)))\n\n host_strings = []\n for host_list in args.hosts:\n host_strings.extend(host_list)\n\n for path in args.hosts_file:\n with path.open(\"r\") as fd:\n host_strings.extend(parse_hosts_file(fd))\n\n for host_str in host_strings:\n parts = host_str.split(\":\")\n hostname = parts[0]\n port = 443\n if len(parts) > 1:\n port = int(parts[1])\n to_test.append((f\"{hostname}:{port}\", query.gen_from_host(hostname, port)))\n\n failures = list()\n\n for (name, generator) in to_test:\n for result in query.query(name=name, generator=generator):\n if args.structured:\n result.log_query_result()\n else:\n result.print_query_result(verbose=args.verbose)\n\n if args.check_not_revoked and result.is_revoked():\n failures.append(result)\n\n if failures:\n log.error(f\"{len(failures)} failures logged:\")\n for result in failures:\n log.error(result)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"leplatrem/moz_crlite_query","sub_path":"crlite_query/query_cli.py","file_name":"query_cli.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"}
+{"seq_id":"21480201032","text":"import sys\nfrom collections import deque\ninput=sys.stdin.readline\n# 1시간 이상\n# bfs(x좌표,y좌표,지나온 흔적(str))\nh,w=map(int,input().split())\ngrid=[list(map(str,input().rstrip())) for _ in range(h)]\ndisc=[[0]*w for _ in range(h)]\ns_x,s_y,l_x,l_y=0,0,0,0\ntrans={}\ntrans['W']=[-1,0];trans['S']=[1,0];trans['A']=[0,-1];trans['D']=[0,1]\ndirection={}\ndirection[(-1,0)]='W';direction[(1,0)]='S';direction[(0,-1)]='A';direction[(0,1)]='D'\nfor i in range(h):\n for j in range(w):\n if grid[i][j]=='D':\n s_x,s_y=i,j\n if grid[i][j]=='Z':\n l_x,l_y=i,j\n\norder={}\nn=int(input())\nfor i in range(n):\n l=input().split()\n order[i+1]=[]\n for c in l:\n order[i+1].append(trans[c])\n\nans=[]\nq=deque()\nq.append([s_x,s_y,\"\"])\ntime=0\nwhile q:\n time+=1\n for _ in range(len(q)):\n a,b,ans=q.popleft()\n if a==l_x and b==l_y:\n print('YES')\n print(ans)\n sys.exit()\n if time>n:continue\n for x,y in order[time]:\n sam=ans[:]\n aa,bb=a+x,b+y\n if 0<=aaHow Much is Your Collector Car Worth?
\", unsafe_allow_html=True)\n st.subheader('Use our API and predict a sale price for your vehicle!')\n with headercol2:\n st.image('https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTe1SBdlXWtJ96-zcUnN05YMaumzpJ-q2ei-A&usqp=CAU', width=500)\nselected_navbar = option_menu(None, [\"Predict\", \"FAQ\", \"API\"], orientation=\"horizontal\")\n\n \ndataset = st.container()\nmodel = st.container()\nyears = range(1980, 2023)\nchart = st.container()\n\n \nif selected_navbar == \"FAQ\":\n with st.container():\n with st.expander(\"What is Cars and Bids?\"):\n st.write('Cars and Bids is an online enthusiast car sales platform created by the automotive Youtuber Doug DeMuro. Most listings on the platform are sold in auction format.')\n with st.expander(\"What model is being used to predict sale price?\"):\n st.write('We are using a Gradient Boosted Regression Tree to predict sale price')\n with st.expander(\"How was the data collected?\"):\n st.write('All of the past listings from CarsAndBids.com were collected using webscraping via selenium. We collected estimated market price for each vehicle from VinAudit.com as well as overall market conditions at the time of sale via Yahoo Finance')\n with st.expander(\"How accurate are the predictions?\"):\n st.write('On our best model, we obtain an accuracy of about 75% (MSE .75)')\n with st.expander(\"Can I use this site commercially?\"):\n st.write('This site is not intended to be used commercially and should not be used commercially')\n with st.expander(\"Is the car price prediction sound financial advice?\"):\n st.write('No. This is a purely academic exercise; use the model output at your own discretion')\n\nif selected_navbar == \"Predict\":\n with st.container():\n st.text('CarsAndBids.com is a new auction website for collector cars from the 80s until now. With a rich history of auctions, we wanted to learn if we could predict\\nwhich cars would be good deals on the site by using features of the vehicle like Make, Model, Year, Engine (etc.) as well as car market data on the vehicle\\nand general market data, we fit a gradient boosted decision tree to predict the selling price of the car. To determine whether its a good deal, we compare the\\npredicted sale price against the market average for similar vehicles. Car market data comes from the VinAudit API\\n')\n form = st.form(key='uinput')\n with form:\n form_columns = st.columns(4)\n text_arr = [['Make', 'Model', 'Year'], ['Engine', 'Title', 'Drive'], ['Body Style', 'Reserve', 'Transmission'], ['Vin', 'Mileage']]\n options_arr = [[make_df, model_df, years], [engine_df, title_status_df, drive_train_df], [bodyStyle_df, reserve_df, transmission_df]]\n first_make = make_df.iloc[0][\"make\"]\n columns = []\n for i, col in enumerate(form_columns):\n if i < 3:\n for j in range(len(text_arr[i])):\n newcol = col.selectbox(text_arr[i][j], options_arr[i][j], key=(i*3)+j, index=0)\n columns.append(newcol)\n else:\n for j in range(len(text_arr[i])):\n newcol = col.text_input(text_arr[i][j], key=(i*3)+j)\n columns.append(newcol)\n newcol = col.selectbox('ML Model', model_list, key=(i*3)+j+1, index=0)\n columns.append(newcol)\n\n \n sp500 = fetch_market_data()\n \n m = st.markdown(\"\"\"\n \"\"\", unsafe_allow_html=True)\n \n \n \n button = st.form_submit_button(label=\"Submit\", use_container_width=True)\n \n if button:\n try:\n req = get_vin_info(columns[9])\n with st.spinner('Running Prediction...'):\n time.sleep(5)\n newres= rq.post(SERVER_URI, json={\"rows\": [{ \"make\": columns[0],\n \"model\": columns[1],\n \"mileage\": columns[10],\n \"status\": columns[4], \n \"engine\":columns[3],\n \"drivetrain\": columns[5],\n \"transmission\" :columns[8],\n \"bodystyle\": columns[6],\n \"y_n_reserve\":columns[7],\n \"year\":columns[2],\n 'market_value_mean': req[\"mean\"], \n 'market_value_std':req['stdev'], \n 'count_over_days':str(float(req['count']) / 90), \n 'Adj Close':sp500,\n 'tree_model': columns[11]}]}) \n response = newres.json()\n newres = response[0][0]\n shaps = pd.DataFrame(pd.Series(response[1]))\n shaps = pd.melt(shaps.reset_index(), id_vars=[\"index\"])\n st.subheader('Dollar Contribution of Each Feature to the Predicted Sale Price')\n chart = (\n alt.Chart(shaps)\n .mark_bar()\n .encode(\n x=alt.X(\"value\", type=\"quantitative\", title=\"Dollars\"),\n y=alt.Y(\"index\", type=\"nominal\", title=\"Features\"),\n color=alt.Color(\"variable\", type=\"nominal\", title=\"\", legend=None),\n order=alt.Order(\"variable\", sort=\"descending\")))\n st.altair_chart(chart, use_container_width=True)\n st.markdown(f\"# Predicted Price on CarsAndBids.com: **${round(newres)}**\")\n except:\n st.write('Unable to gather information from VIN. Please try a different vehicle')\n\napi_column1, api_column2, api_column3 = st.columns(3) \nif selected_navbar == \"API\":\n st.subheader(\"Our API is free to use and available via a POST request to http://collectorcarpricing.com:8080/predict\")\n st.write('The post request must include the following parameters:')\n api_data = { \"Name\": ['make', 'model', 'mileage', 'status', 'engine', 'bodystyle', 'y_n_reserve','year', 'drivetrain', 'transmission', 'vin'],\n \"Required\": ['yes', 'yes', 'yes', 'yes', 'yes', 'yes', 'yes','yes', 'yes', 'yes', 'yes'],\n \"Data Type\": ['string', 'string', 'float', 'string', 'string', 'string', 'string','int', 'string', 'string', 'string'],\n \"Accepted Values\": [\"Any brand of auto manufacturer. If the brand doesnt exist in the training data make will not contribute to the prediction\",\n \"Any model from an auto manufacturer. If the model doesnt exist in the training data it will use the average price for the chosen make\",\n \"Any positive number (without commas)\",\n \"Clean, Salvage, Other\",\n \"One of the following: (P9, P8, V1, I6, Electric, I2, H6, I3, I5, Flat-2, I4, Flat-4, R6, H4, V6, W8, V2, Flat-6, V8). If not in this list the model will use the average price for the chosen make\",\n \"One of the following: (SUV/Crossover, Hatchback, Convertible, Van/Minivan, Sedan, Wagon, Truck, Coupe)\",\n \"One of the following: (Reserve, No Reserve)\",\n \"Any year from 1980 - present\",\n \"One of the following: (Rear-wheel drive, 4WD/AWD, Front-wheel drive)\",\n \"One of the following: (Manual, Automatic)\",\n \"Any valid VIN number\"]}\n st.table(pd.DataFrame(api_data))\n st.subheader('Ex:')\n st.text('''curl -d '{\"rows\": [{\"make\": \"Porsche\",\"model\": \"Cayenne\",\"mileage\": \"167500.0\",\"status\": \"Clean\" , \"engine\":\"3.6L V6\",\"drivetrain\": \"4WD/AWD\",\"transmission\" :\"Manual (6-Speed)\",\"bodystyle\":\" SUV/Crossover\", \"y_n_reserve\":\" No Reserve\",\"year\":\"2012.0\", \"vin\": \"5YJSA1DP4CFF00027\"}]}' -X POST http://collectorcarpricing.com:8080/predict''')\n \n\nst.write(\"Developed by Adam Lang and David Kim [Github Repo]('https://github.com/CodeSmithDSMLProjects/CarSalesModel')\")\nst.write(\"Contact us at adamglang96@gmail.com and koyykdy@gmail.com\")\n","repo_name":"CodeSmithDSMLProjects/CarSalesModel","sub_path":"public/stream_lit.py","file_name":"stream_lit.py","file_ext":"py","file_size_in_byte":12710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"3251391737","text":"import telegram\n\nfrom dbdriver import DBDriver\n\ndatabase = DBDriver()\ndatabase.setup()\n\nclass ToDoBot:\n\n class UserData:\n def __init__(self, text, chat_id, items):\n self.text = text\n self.chat_id = chat_id\n self.items = items\n\n def __init__(self, todo_queue):\n \"\"\" The data in queue contains tuples with the text of the message received from\n the user and user's chat id in (text, chat_id) format \"\"\"\n self.queue = todo_queue\n self.calls = {\n '/list': self.call_list,\n '/done': self.call_delete_keyboard,\n '/start': self.call_start,\n '/clear': self.call_clear\n }\n\n def run(self):\n while not self.queue.empty():\n text, chat_id = self.queue.get()\n items = database.get_items(chat_id)\n userdata = self.UserData(text, chat_id, items)\n\n if userdata.text in self.calls:\n self.calls[userdata.text](userdata)\n\n elif userdata.text.startswith('/'):\n continue\n\n elif userdata.text in userdata.items:\n self.delete_item(userdata)\n\n else:\n database.add_item(userdata)\n\n\n def call_start(self, userdata):\n telegram.send_message(\"Welcome to your personal To Do list. Send any text to me and I'll store it as an\"\n \" item. Send /done to remove items\", userdata.chat_id)\n\n def call_list(self, userdata):\n if userdata.items:\n text_of_items = '\\n'.join(userdata.items)\n telegram.send_message(text_of_items, userdata.chat_id)\n else:\n telegram.send_message('The list is empty, type anything you want to add', userdata.chat_id)\n\n def call_clear(self, userdata):\n if userdata.items:\n database.clear_items(userdata)\n telegram.send_message('The list has been cleared', userdata.chat_id)\n else:\n telegram.send_message('The list is empty', userdata.chat_id)\n\n def call_delete_keyboard(self, userdata):\n if userdata.items:\n keyboard = telegram.build_keyboard(userdata.items)\n telegram.send_message('Select an item to delete', userdata.chat_id, keyboard)\n else:\n telegram.send_message('The list is empty', userdata.chat_id)\n\n def delete_item(self, userdata):\n database.delete_item(userdata)\n userdata.items.remove(userdata.text)\n self.call_delete_keyboard(userdata)\n\n","repo_name":"tonybruhh/ToDoBot","sub_path":"todobot.py","file_name":"todobot.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"6183160869","text":"import cv2\r\nimport glob\r\nimport numpy as np\r\nimport pickle\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Activation, Dense, Conv2D , Flatten, MaxPool2D, BatchNormalization, Dropout, GlobalMaxPool2D\r\nfrom tensorflow.keras.optimizers import Adam, RMSprop\r\nfrom sklearn.metrics import precision_recall_fscore_support, accuracy_score\r\nfrom sklearn.utils import shuffle\r\n\r\n\r\n#Data Augmentation\r\nfromPath = '../test/'\r\naugPath = '../aug_test/'\r\nclassName = {0:'Aavad',1:'Chikoo',2:'Jamun',3:'Raat_Rani',4:'Umbaro'}\r\n\r\n\r\ndatagen = ImageDataGenerator(\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest'\r\n)\r\n\r\nfor key,values in className.items() :\r\n for image in glob.glob(fromPath + values + '/*.jpg'):\r\n img = cv2.imread(image)\r\n # cv2.imshow('image',g)\r\n da_img = img.reshape(1,img.shape[0], img.shape[1], 3)\r\n print('new img shape: ',da_img.shape)\r\n i=0\r\n for batch in datagen.flow(da_img,save_to_dir=augPath + values,save_format='jpg'):\r\n i += 1\r\n if i>20:\r\n break\r\n\r\n#Saving Image Matrix into pickles\r\nli = []\r\nlabels =[]\r\nfor key,values in className.items() :\r\n for img in glob.glob(augPath + values + '/*.jpg'):\r\n g = cv2.imread(img)\r\n print(g.shape)\r\n # cv2.imshow('image',g)\r\n g = cv2.resize(g,(224,224))\r\n g = g.reshape(g.shape[0], g.shape[1], 3)\r\n print('g: ',g.shape)\r\n li.append(g)\r\n labels.append(key)\r\n\r\n\r\nfeatures = 'test'+\".pkl\"\r\nclass_labels = 'TestClassLabels'+\".pkl\"\r\n\r\nli = np.array(li)\r\nlabels = np.array(labels)\r\n\r\nfo = open(features, \"wb\")\r\npickle.dump(li, fo)\r\nfo.close()\r\n\r\nfo = open(class_labels, \"wb\")\r\npickle.dump(labels, fo)\r\nfo.close()\r\n\r\nprint(labels)\r\nprint(li.shape)\r\n'''\r\n\r\n\r\n\r\ngpus = tf.config.list_physical_devices('GPU')\r\nif gpus:\r\n # Restrict TensorFlow to only allocate 1GB of memory on the first GPU\r\n try:\r\n tf.config.set_logical_device_configuration(gpus[0],[tf.config.LogicalDeviceConfiguration(memory_limit=4500)])\r\n logical_gpus = tf.config.list_logical_devices('GPU')\r\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\r\n except RuntimeError as e:\r\n # Virtual devices must be set before GPUs have been initialized\r\n print(e)\r\n\r\n#Loading pickle files\r\nfp = open('train.pkl', \"rb\")\r\ntrain_features = pickle.load(fp)\r\nfp.close()\r\n\r\nfp = open('TrainClassLabels.pkl', \"rb\")\r\ntrain_cls_labels = pickle.load(fp)\r\nfp.close()\r\n\r\nfp = open('test.pkl', \"rb\")\r\ntest_features = pickle.load(fp)\r\nfp.close()\r\n\r\nfp = open('TestClassLabels.pkl', \"rb\")\r\ntest_cls_labels = pickle.load(fp)\r\nfp.close()\r\n\r\n#Normalizng data\r\nX_train = train_features/255\r\nX_test = test_features/255\r\nY_train = train_cls_labels\r\nY_test = test_cls_labels\r\nX_train, Y_train = shuffle(X_train, Y_train)\r\nX_test, Y_test = shuffle(X_test, Y_test)\r\nprint(X_train.shape, X_test.shape)\r\nprint(Y_train.shape, Y_test.shape)\r\n\r\n#Training of CNN model\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(224, 224, 3)))\r\nmodel.add(MaxPool2D(pool_size=(2, 2)))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(100, activation='relu'))\r\nmodel.add(Dense(5, activation='softmax'))\r\n\r\nmodel.compile(loss='sparse_categorical_crossentropy',optimizer=RMSprop(learning_rate=0.01),metrics=['accuracy'])\r\nmodel.fit(X_train,Y_train,epochs=10,validation_data=(X_test,Y_test))\r\nmodel.save('model.h5')\r\n\r\n#Checking accuracy\r\nY_pred = model.predict(X_test)\r\nY_pred = np.argmax(Y_pred,axis=1)\r\nacc = accuracy_score(Y_test, Y_pred)\r\nprint('testing accuracy: ',acc)\r\n'''","repo_name":"RajPanjwani-2001/Plant-Classification","sub_path":"Codes/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"42074343385","text":"#Crie um programa que tenha uma tupla totalmente preenchida com uma contagem por extenso, de zero até vinte. Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso\n\nextenso = ('Zero', 'Um', 'Dois', 'Tres', 'Quatro', 'Cinco', 'Seis', 'Sete', 'Oito', 'Nove', 'Dez', 'Onze', 'Doze', 'Treze', 'Quatorze', 'Quinze', 'Dezesseis', 'Dezessete','Dezoito', 'Dezenove', 'Vinte')\nwhile True:\n num = int(-1)\n while num not in range(0,len(extenso)):\n num = int(input('Digite um número de 0 a 20 para saber seus nome por extenso:\\n>>> '))\n \n print(f'O número {num} por extenso é: {extenso[num]}')\n \n answer = str(input('Deseja saber outro número? [S/N]\\n>>> '))[0]\n if answer in 'nN':\n break","repo_name":"LeonardoSextare/Curso-Python","sub_path":"Curso em Video - Guanabara/Mundo 3/!Exercicios/ex072 - Numero por Extenso.py","file_name":"ex072 - Numero por Extenso.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"25402384958","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExtract certificate stored in the APK as PEM\n\"\"\"\n\n\nimport sys\nimport argparse\nfrom apk_parse.apk import APK\n\n\ndef main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(description='Extracts PEM certificates from APK files')\n parser.add_argument('files', nargs=argparse.ZERO_OR_MORE, default=[], help='APK files')\n parser.add_argument('-t', dest='text', default=False, action='store_const', const=True,\n help='show also text representation')\n args = parser.parse_args()\n\n for file_name in args.files:\n apkf = APK(file_name)\n if args.text:\n print(apkf.cert_text)\n\n pem = apkf.cert_pem\n print(pem)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"ph4r05/codesign-analysis","sub_path":"codesign/android/apk2cert.py","file_name":"apk2cert.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"40310655475","text":"import uuid\nimport os\nimport speech_recognition as sr\nfrom pydub import AudioSegment\nimport tempfile\n\n\ndef decorator_remove_file(func):\n def wrapper(*args, **kwargs):\n rez = func(*args, **kwargs)\n try:\n os.remove('voice_message.ogg')\n os.remove('voice_message.wav')\n except:\n pass\n return rez\n return wrapper\n#\ndef convert_ogg_wav(file):\n wfn = file.replace('.ogg', '.wav')\n x = AudioSegment.from_file(file)\n x.export(wfn, format='wav')\n\n\nlanguage='ru_RU'\n\n\n@decorator_remove_file\ndef audio_to_text(file):\n r = sr.Recognizer()\n with sr.AudioFile(file) as source:\n audio = r.record(source)\n text = r.recognize_google(audio_data=audio, language=language)\n return text\n\n\n\ndef convert_and_recognize(file_path):\n # Создаем временный файл, который будет автоматически удаляться после закрытия\n with tempfile.NamedTemporaryFile(delete=True) as temp_wav:\n audio = AudioSegment.from_ogg(file_path)\n audio.export(temp_wav.name, format=\"wav\") # Экспортируем аудио в wav-формате во временный файл\n\n recognizer = sr.Recognizer()\n with sr.AudioFile(temp_wav.name) as source:\n # Записываем аудио из файла\n audio_file = recognizer.record(source)\n # Применяем распознавание речи с помощью Google Speech Recognition\n try:\n result = recognizer.recognize_google(audio_file, language='ru-RU')\n print('Распознан текст:', result)\n return result\n except sr.UnknownValueError:\n print(\"Google Speech Recognition не смог понять аудио\")\n except sr.RequestError:\n print(\"Could not request results from Google Speech Recognition service\")\n\n\nasync def dewnload_and_converted_audio_text(event):\n if event.message.voice:\n # Получаем голосовое сообщение\n voice_message = await event.message.download_media()\n\n # Создаем временный файл, который будет автоматически удаляться после закрытия\n with tempfile.NamedTemporaryFile(delete=True) as temp_ogg:\n # Копируем голосовое сообщение во временный файл\n with open(voice_message, 'rb') as file:\n temp_ogg.write(file.read())\n\n # Преобразуем и распознаем речь\n text = convert_and_recognize(temp_ogg.name)\n return f'👺 Voice\\n{text}'\n\nasync def esli_voice_to_text_ili_text_text(event):\n return f'💥🔊💭 {await dewnload_and_converted_audio_text(event)}\\n{event.message.message}' if event.message.voice else event.message.message\n # if event.message.voice:#если сообщение голосовое\n # text =f'💥🔊💭 {await dewnload_and_converted_audio_text(event)}'\n # else:\n # text = event.message.message # достаем только текст сообщени","repo_name":"nasket-it/sanchos","sub_path":"audio_text.py","file_name":"audio_text.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"38685876642","text":"# -*- coding: utf-8 -*-\n\n\"\"\"the zimbra module provides an interface to interact with zimbra\n\"\"\"\n\nimport re\nimport json\n\nfrom bs4 import BeautifulSoup\n\nfrom dhbw.util import ImporterSession, reqget, reqpost, url_get_fqdn\nfrom dhbw.util import ServiceUnavailableException, LoginRequiredException\n\n#------------------------------------------------------------------------------#\n# H E L P E R - F U N C T I O N S\n#------------------------------------------------------------------------------#\n\ndef _entity_list(in_list, out_list, in_type):\n \"\"\"Adds entities to a list while converting an entity string to a dict.\n\n Parameters\n ----------\n in_list : List[str]\n Description\n out_list : List[Dict[str, str]]\n Description\n in_type : str\n Description\n Returns\n -------\n List[Dict[str, str]]\n\n \"\"\"\n\n if in_type == \"recipient\":\n temp = \"t\"\n elif in_type == \"cc\":\n temp = \"c\"\n else:\n temp = \"b\"\n\n for account in in_list:\n temp_dict = {}\n temp_dict[\"t\"] = temp\n temp_dict[\"a\"] = account\n out_list.insert(0, temp_dict)\n\n return out_list\n\n\ndef _fill_contacts_dict_elem(contact):\n \"\"\"Checks for existing keys inside the response contact dict and creates contact dict.\n\n Parameters\n ----------\n contact : Dict[str, str]\n\n Returns\n -------\n Dict\n\n \"\"\"\n temp = {}\n if \"email\" in contact.keys():\n temp[\"email\"] = contact[\"email\"]\n temp[\"id\"] = contact[\"id\"]\n temp[\"firstName\"] = None\n temp[\"lastName\"] = None\n temp[\"jobTitle\"] = None\n if \"firstName\" in contact.keys():\n temp[\"firstName\"] = contact[\"firstName\"]\n if \"lastName\" in contact.keys():\n temp[\"lastName\"] = contact[\"lastName\"]\n if \"jobTitle\" in contact.keys():\n temp[\"jobTitle\"] = contact[\"jobTitle\"]\n\n return temp\n\n#------------------------------------------------------------------------------#\n# Z I M B R A - H A N D L E R\n#------------------------------------------------------------------------------#\n\nclass ZimbraHandler(ImporterSession):\n \"\"\"Handler for interacting with zimbra.\n\n Attributes\n ----------\n url : str\n the given url for zimbra\n accountname : str\n the dhbw mail account\n contacts : List[Dict[str, str]]\n a list representing all contacts from zimbra\n realname : str\n the real name of the logged in user\n signatures : List[str]\n a list of all available signatures to the user\n\n Methods\n -------\n login(self): None\n creates a session for the user\n logout(self): None\n sends a logout request\n scrape(self): None\n scrape the wanted data from the website\n get_contacts(self): None\n import contacts from the default \"contact\" book\n new_contact(self, contact_dict): None\n create a new contact inside the default contact book\n remove_contact(self, contact_id): None\n remove an existing contact from the default contact book\n _create_entities_list(self, recipients, rec_cc, rec_bcc): List[Dict[str, str]]\n create a list with dictionary elements\n _generate_mail(self, mail_dict): Dict[str, Any]\n build the mail in the needed format for zimbra\n send_mail(self, mail_dict): None\n sends a mail to the soap backend of zimbra\n \"\"\"\n\n url = \"https://studgate.dhbw-mannheim.de/zimbra/\"\n\n __slots__ = (\"accountname\", \"contacts\", \"realname\", \"signatures\",)\n\n def __init__(self):\n super().__init__()\n self.accountname = \"\"\n self.contacts = []\n self.headers[\"Host\"] = url_get_fqdn(ZimbraHandler.url)\n self.realname = \"\"\n self.signatures = []\n\n async def login(self, username, password):\n \"\"\"Authenticate the user against zimbra.\n\n Parameters\n ----------\n username: str\n the username for the authentication process\n password: str\n the password for the authentication process\n\n Returns\n -------\n ZimbraHandler\n \"\"\"\n url = ZimbraHandler.url\n\n # add accountname\n self.accountname = username\n\n # set headers for post request\n self.headers[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n self.headers[\"Cookie\"] = \"ZM_TEST=true\"\n\n # form data\n payload = {\n \"client\": \"preferred\",\n \"loginOp\": \"login\",\n \"username\": username,\n \"password\": password\n }\n\n # LOGIN - POST REQUEST\n try:\n r_login = reqpost(\n url=url,\n headers=self.headers,\n payload=payload,\n allow_redirects=False,\n return_code=302\n )\n except ServiceUnavailableException as service_err:\n raise service_err\n finally:\n # drop content-type header\n self.drop_header(\"Content-Type\")\n\n # add authentication cookie to the headers\n self.auth_token = r_login.headers[\"Set-Cookie\"].split(\";\")[0]\n self.headers[\"Cookie\"] = self.headers[\"Cookie\"] + \"; \" + self.auth_token\n\n return self\n\n async def scrape(self):\n # TODO documentation?\n \"\"\"Scrape the selected data from zimbra.\n\n Returns\n -------\n None\n \"\"\"\n url = ZimbraHandler.url\n\n try:\n r_home = reqget(\n url=url,\n headers=self.headers,\n )\n except ServiceUnavailableException as service_err:\n raise service_err\n\n content_home = BeautifulSoup(r_home.text, \"lxml\")\n\n # improvement idea -> let it loop reversed, since needed content\n # is inside the last / one of the last script tag(s)\n try:\n tag_script_all = content_home.find_all(\"script\")\n except AttributeError as attr_err:\n raise LoginRequiredException() from attr_err\n\n for tag_script in tag_script_all:\n if \"var batchInfoResponse\" in str(tag_script.string):\n temp = re.search(\n r\"var\\ batchInfoResponse\\ =\\ \\{\\\"Header\\\":.*\\\"_jsns\\\":\\\"urn:zimbraSoap\\\"\\};\",\n str(tag_script.string)\n )\n break\n temp_json = json.loads(\n re.sub(r\"(var\\ batchInfoResponse\\ =\\ )|(;$)\", \"\", temp.group(0))\n )\n\n self.realname = temp_json[\"Body\"][\"BatchResponse\"][\"GetInfoResponse\"][0][\"attrs\"][\"_attrs\"][\"cn\"]\n\n self.scraped_data = temp_json\n\n def get_contacts(self):\n \"\"\"Import contacts from the default contact book.\n\n Returns\n -------\n None\n \"\"\"\n url = ZimbraHandler.url\n origin = \"https://\" + url_get_fqdn(url)\n\n self.headers[\"Content-Type\"] = \"application/soap+xml; charset=utf-8\"\n self.headers[\"Referer\"] = url\n self.headers[\"Origin\"] = origin\n\n # TODO query is limited to 100 contact entities --> query all contact entities\n\n query = {\n \"Header\": {\n \"context\": {\n \"_jsns\": \"urn:zimbra\",\n \"account\": {\n \"_content\": self.accountname,\n \"by\": \"name\"\n }\n }\n },\n \"Body\": {\n \"SearchRequest\": {\n \"_jsns\": \"urn:zimbraMail\",\n \"sortBy\": \"nameAsc\",\n \"offset\": 0,\n \"limit\": 100,\n \"query\": \"in:contacts\",\n \"types\": \"contact\"\n }\n }\n }\n\n try:\n r_contacts = reqpost(\n url=origin + \"/service/soap/SearchRequest\",\n headers=self.headers,\n payload=json.dumps(query)\n ).json()\n except ServiceUnavailableException as service_err:\n raise service_err\n finally:\n self.drop_header(\"Content-Type\")\n\n try:\n contacts = r_contacts[\"Body\"][\"SearchResponse\"][\"cn\"]\n except KeyError:\n contacts = []\n\n for contact in contacts:\n cnt = contact[\"_attrs\"]\n cnt[\"id\"] = contact[\"id\"]\n temp = _fill_contacts_dict_elem(cnt)\n if temp:\n self.contacts.append(temp)\n\n def new_contact(self, contact_dict):\n \"\"\"Create a new contact inside the default contact book.\n\n Parameters\n ----------\n contact_dict : Dict\n\n Returns\n -------\n None\n \"\"\"\n url = ZimbraHandler.url\n origin = \"https://\" + url_get_fqdn(url)\n\n self.headers[\"Content-Type\"] = \"application/soap+xml; charset=utf-8\"\n self.headers[\"Referer\"] = url\n self.headers[\"Origin\"] = origin\n\n contact_details = []\n for key, value in contact_dict.items():\n if value:\n contact_details.append(\n {\n \"n\": key,\n \"_content\": value\n }\n )\n\n contact = {\n \"Header\": {\n \"context\": {\n \"_jsns\": \"urn:zimbra\",\n \"account\": {\n \"_content\": self.accountname,\n \"by\": \"name\"\n },\n \"auth_token\": self.auth_token\n }\n },\n \"Body\": {\n \"CreateContactRequest\": {\n \"_jsns\": \"urn:zimbraMail\",\n \"cn\": {\n \"l\": \"7\",\n \"a\": contact_details\n }\n }\n }\n }\n\n try:\n r_contact = reqpost(\n url=origin + \"/service/soap/CreateContactRequest\",\n headers=self.headers,\n payload=json.dumps(contact),\n ).json()\n except ServiceUnavailableException as service_err:\n raise service_err\n finally:\n self.drop_header(\"Content-Type\")\n\n try:\n contact_dict[\"id\"] = r_contact[\"Body\"][\"CreateContactResponse\"][\"cn\"][0][\"id\"]\n except AttributeError as attr_err:\n raise LoginRequiredException() from attr_err\n\n self.contacts.append(contact_dict)\n\n def remove_contact(self, contact_id):\n \"\"\"remove an existing contact from the default contact book\n\n Parameters\n ----------\n contact_id : str\n\n \"\"\"\n url = ZimbraHandler.url\n origin = \"https://\" + url_get_fqdn(url)\n\n self.headers[\"Content-Type\"] = \"application/soap+xml; charset=utf-8\"\n self.headers[\"Referer\"] = url\n self.headers[\"Origin\"] = origin\n\n del_contact = {\n \"Header\": {\n \"context\": {\n \"_jsns\": \"urn:zimbra\",\n \"account\": {\n \"_content\": self.accountname,\n \"by\": \"name\"\n },\n \"auth_token\": self.auth_token\n }\n },\n \"Body\": {\n \"ContactActionRequest\": {\n \"_jsns\": \"urn:zimbraMail\",\n \"action\": {\n \"id\": contact_id,\n \"l\": \"3\",\n \"op\": \"move\"\n }\n }\n }\n }\n\n try:\n reqpost(\n url=origin + \"/service/soap/ContactActionRequest\",\n headers=self.headers,\n payload=json.dumps(del_contact)\n )\n except ServiceUnavailableException as service_err:\n raise service_err\n finally:\n self.drop_header(\"Content-Type\")\n\n i = 0\n while i < len(self.contacts):\n if self.contacts[i][\"id\"] == contact_id:\n break\n i += 1\n\n del self.contacts[i]\n\n def _create_entities_list(self, recipients, rec_cc, rec_bcc):\n \"\"\"Create a list with dictionary elements.\n\n Parameters\n ----------\n recipients : List[str]\n\n rec_cc : List[str]\n\n rec_bcc : List[str]\n\n\n Returns\n -------\n List[Dict[str, str]]\n \"\"\"\n entities_list = [\n {\n \"t\": \"f\",\n \"a\": self.accountname,\n \"p\": self.realname\n }\n ]\n\n entities_list = _entity_list(rec_bcc, entities_list, \"bcc\")\n entities_list = _entity_list(rec_cc, entities_list, \"cc\")\n entities_list = _entity_list(recipients, entities_list, \"recipient\")\n\n return entities_list\n\n def _generate_mail(self, mail_dict):\n \"\"\"build the mail in the needed format for zimbra\n\n Parameters\n ----------\n mail_dict : Dict\n\n Returns\n -------\n Dict[str, Any]\n \"\"\"\n header_dict = {\n \"context\": {\n \"_jsns\": \"urn:zimbra\",\n \"account\": {\n \"_content\": self.accountname,\n \"by\": \"name\"\n },\n \"auth_token\": self.auth_token\n }\n }\n\n entities = self._create_entities_list(\n mail_dict[\"recipients\"],\n mail_dict[\"rec_cc\"],\n mail_dict[\"rec_bcc\"]\n )\n\n message_dict = {\n \"_jsns\": \"urn:zimbraMail\",\n \"m\": {\n \"e\": entities,\n \"su\": {\n \"_content\": mail_dict[\"subject\"]\n },\n \"mp\": {\n \"ct\": mail_dict[\"cttype\"],\n \"content\": {\n \"_content\": mail_dict[\"content\"]\n }\n }\n }\n }\n\n # join the dicts to create the whole mail\n mail = {\n \"Header\": header_dict,\n \"Body\": {\n \"SendMsgRequest\": message_dict\n }\n }\n\n return mail\n\n def send_mail(self, mail_dict):\n \"\"\"Sends a mail to the soap backend of zimbra.\n\n Parameters\n ----------\n mail_dict: SendMailDict\n a dictionary containing recipients, subject, content-type and the actual content\n\n Returns\n -------\n None\n \"\"\"\n # create mail\n mail = self._generate_mail(mail_dict)\n\n # IMPROVEMENT IDEA:\n # store mail_dict somewhere, in case that the service is unavailable\n\n url = ZimbraHandler.url\n origin = \"https://\" + url_get_fqdn(url)\n\n self.headers[\"Content-Type\"] = \"application/soap+xml; charset=utf-8\"\n self.headers[\"Referer\"] = url\n self.headers[\"Origin\"] = origin\n\n try:\n reqpost(\n url=origin + \"/service/soap/SendMsgRequest\",\n headers=self.headers,\n payload=json.dumps(mail),\n return_code=200\n )\n except ServiceUnavailableException as service_err:\n raise service_err\n finally:\n self.drop_header(\"Content-Type\")\n\n def logout(self):\n \"\"\"sends a logout request\n\n Returns\n -------\n None\n \"\"\"\n url = ZimbraHandler.url\n\n try:\n reqget(\n url=url,\n headers=self.headers,\n params={\"loginOp\": \"logout\"},\n return_code=200\n )\n except ServiceUnavailableException as service_err:\n raise service_err\n\n self.auth_token = \"\"\n","repo_name":"Software-Engineering-DHBW/BonoboBoard","sub_path":"bonobo-board/modules/dhbw/zimbra.py","file_name":"zimbra.py","file_ext":"py","file_size_in_byte":15644,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"82"}
+{"seq_id":"34336940367","text":"from pdfminer3.pdfpage import PDFPage\nfrom pdfminer3.pdfinterp import PDFResourceManager\nfrom pdfminer3.pdfinterp import PDFPageInterpreter\nfrom pdfminer3.converter import TextConverter\nimport io\nimport os\nimport shutil\nfrom PyPDF2 import PdfFileMerger\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n\ndef scan_folder(parent, keyword):\n lista = []\n # iterate over all the files in directory 'parent'\n for file_name in os.listdir(parent):\n resource_manager = PDFResourceManager()\n handle = io.StringIO()\n converter = TextConverter(resource_manager, handle)\n page_interpreter = PDFPageInterpreter(resource_manager, converter)\n if file_name.endswith(\".pdf\"):\n # if it's a txt file, print its name (or do whatever you want)\n arquivo = open(parent + \"/\" + file_name, 'rb')\n with arquivo as fh:\n\n for page in PDFPage.get_pages(fh,\n caching=True,\n check_extractable=True):\n page_interpreter.process_page(page)\n text = handle.getvalue()\n if (text.find(keyword) != -1):\n # print(file_name + \" TEEM\")\n lista.append(parent + \"/\" + file_name)\n # else:\n # print(file_name + \" NAOOOO\")\n converter.close()\n handle.close()\n else:\n current_path = \"\".join((parent, \"/\", file_name))\n if os.path.isdir(current_path):\n # if we're checking a sub-directory, recall this method\n scan_folder(current_path)\n return lista\n\n\ndef merger(output_path, input_paths):\n pdf_merger = PdfFileMerger()\n file_handles = []\n\n for path in input_paths:\n pdf_merger.append(path)\n\n with open(output_path, 'wb') as fileobj:\n pdf_merger.write(fileobj)\n\n\ndef searchPDF(parent, keyword):\n lista = []\n # iterate over all the files in directory 'parent'\n for file_name in parent:\n resource_manager = PDFResourceManager()\n handle = io.StringIO()\n converter = TextConverter(resource_manager, handle)\n page_interpreter = PDFPageInterpreter(resource_manager, converter)\n arquivo = open(file_name, 'rb')\n with arquivo as fh:\n for page in PDFPage.get_pages(fh, caching=True, check_extractable=True):\n page_interpreter.process_page(page)\n text = handle.getvalue()\n if (text.find(keyword) != -1):\n # print(file_name + \" TEEM\")\n lista.append(file_name)\n # else:\n # print(\"NAO\")\n converter.close()\n handle.close()\n return lista\n\n\ndef splitter(path, output_folder):\n for x in path:\n fname = os.path.splitext(os.path.basename(x))[0]\n pdf = PdfFileReader(x)\n for page in range(pdf.getNumPages()):\n pdf_writer = PdfFileWriter()\n pdf_writer.addPage(pdf.getPage(page))\n pages = page + 1\n if page >= 99:\n pagename = str(pages)\n elif page >= 9:\n pagename = \"0\" + str(pages)\n else:\n pagename = \"00\" + str(pages)\n output_filename = output_folder + '/{}_page_{}.pdf'.format(\n fname, pagename)\n with open(output_filename, 'wb') as out:\n pdf_writer.write(out)\n # print('Created: {}'.format(output_filename))\n\n\ndef splitterCustom(path, output_folder, doublepageslist,originalfile):\n for x in path:\n fname = os.path.splitext(os.path.basename(x))[0]\n pdf = PdfFileReader(x)\n print(x)\n filenumber = find_between(x, \"_file_\", \".pdf\")\n if filenumber not in originalfile:\n doublepageslist2 = []\n else:\n doublepageslist2 = doublepageslist\n b = True\n for page in range(pdf.getNumPages()):\n pagenamemerged = str(page + 1) + \";\" + filenumber\n print(pagenamemerged)\n pdf_writer = PdfFileWriter()\n if b:\n if pagenamemerged not in doublepageslist2:\n pdf_writer.addPage(pdf.getPage(page))\n pages = page + 1\n if page >= 99:\n pagename = str(pages)\n elif page >= 9:\n pagename = \"0\" + str(pages)\n else:\n pagename = \"00\" + str(pages)\n output_filename = output_folder + '/{}_page_{}.pdf'.format(fname, pagename)\n with open(output_filename, 'wb') as out:\n pdf_writer.write(out)\n # print('Created: {}'.format(output_filename))\n b = True\n else:\n pdf_writer.addPage(pdf.getPage(page))\n pdf_writer.addPage(pdf.getPage(page + 1))\n pages = page + 1\n if page >= 99:\n pagename = str(pages)\n elif page >= 9:\n pagename = \"0\" + str(pages)\n else:\n pagename = \"00\" + str(pages)\n output_filename = output_folder + '/{}_page_{}.pdf'.format(\n fname, pagename)\n with open(output_filename, 'wb') as out:\n pdf_writer.write(out)\n # print('Created: {}'.format(output_filename))\n b = False\n else:\n b = True\n\n\ndef splitterNew(path, output_folder):\n for x in path:\n name = os.path.splitext(os.path.basename(x))[0]\n print(*name)\n pdf = PdfFileReader(x)\n for page in range(pdf.getNumPages()):\n pdf_writer = PdfFileWriter()\n pdf_writer.addPage(pdf.getPage(page))\n output_filename = output_folder + '/{}_{}.pdf'.format(\n page + 1, name)\n with open(output_filename, 'wb') as out:\n pdf_writer.write(out)\n # print('Created: {}'.format(output_filename))\n\n\ndef list_files_mac(dir):\n names = []\n for root, dirs, files in os.walk(dir):\n for file in files:\n if file.endswith('.pdf'):\n names.append(dir + \"/\" + file)\n return names\n\n\ndef list_files_win(dir):\n names = []\n for root, dirs, files in os.walk(dir):\n for file in files:\n if file.endswith('.pdf'):\n names.append(dir + \"/\" + file)\n return names\n\n\ndef list_files_walk(directory):\n fu = [os.path.join(dp, f) for dp, dn, filenames in os.walk(directory) for f in filenames if\n os.path.splitext(f)[1].lower() == '.pdf']\n return (fu)\n\n\ndef newScan(parent):\n lista = []\n f = open(\"designs.txt\", \"w+\")\n g = open(\"paths.txt\", \"w+\")\n # iterate over all the files in directory 'parent'\n for file_name in parent:\n resource_manager = PDFResourceManager()\n handle = io.StringIO()\n converter = TextConverter(resource_manager, handle)\n page_interpreter = PDFPageInterpreter(resource_manager, converter)\n arquivo = open(file_name, 'rb')\n with arquivo as fh:\n for page in PDFPage.get_pages(fh, caching=True, check_extractable=True):\n page_interpreter.process_page(page)\n text = handle.getvalue()\n word = find_between(text, \"SKUPrice1\", \"$\")\n print(word)\n # number = has_sequence(word)\n # stringnumber = ''.join(map(str, number))\n # artwork = find_between(word,\"1\",stringnumber)\n\n f.write(word + \"\\n\")\n g.write(file_name + \"\\n\")\n converter.close()\n handle.close()\n f.close()\n g.close()\n return lista\n\n\ndef scanDoublePages(parent, galleryprices, dailyprices):\n daily = open(\"daily.txt\", \"w+\")\n gal = open(\"gallery.txt\", \"w+\")\n sweet = open(\"sweet.txt\", \"w+\")\n duplicatestest = open(\"duplicatestest.txt\", \"w+\")\n number = 0\n numberlist = []\n originalfile = []\n folder = \"temp\"\n cleanFolder(folder)\n # listing the files inside the folder\n parentnew = list_files_walk(parent)\n # creating a temporary folder\n os.mkdir(folder)\n # splitting the temporary files\n splitter(parentnew, folder)\n # getting the temporary files\n parentnew2 = list_files_walk(folder)\n # sorting the files by name\n parentnew2.sort()\n # iterate over all the files in directory 'parent'\n for file_name in parentnew2:\n resource_manager = PDFResourceManager()\n handle = io.StringIO()\n converter = TextConverter(resource_manager, handle)\n page_interpreter = PDFPageInterpreter(resource_manager, converter)\n arquivo = open(file_name, 'rb')\n if \"page_001.pdf\" in file_name:\n number = 0\n with arquivo as fh:\n for page in PDFPage.get_pages(fh, caching=True, check_extractable=True):\n booleangal = True\n booleanSweet = True\n page_interpreter.process_page(page)\n text = handle.getvalue()\n text = text[:-1]\n text = text + \"¬¬¬\"\n #print(text)\n # searching the reference number\n search = find_between(text, \"#\", \"Order\")\n # searching the order number\n search2 = find_between(text, \"# \", \"Order Date\")\n # Searching the design name\n name = find_between(text, \"SKUPrice1\", \"$\")\n # Prices\n price = find_between(text, name, \",\")\n # Products\n products = find_between(text, \"SKUPrice1\", \"¬¬¬\")\n #print(products)\n originalfilenumber = find_between(file_name, \"_file_\", \"_page\")\n print(originalfilenumber)\n if search == \"\":\n numberlist.append(str(number) + \";\" + originalfilenumber)\n originalfile.append(originalfilenumber)\n # print(result[number-1])\n # f.write(result[number - 1] + \"\\n\")\n else:\n duplicatestest.write(search2 + \"\\n\")\n for daprices in dailyprices:\n if products.find(daprices) != -1:\n print(search2 + \" Daily Shirt\")\n booleangal = False\n daily.write(name + \"^\" + file_name + \"^\" + search2 + \"\\n\")\n break\n if booleangal:\n for gaprices in galleryprices:\n if products.find(gaprices) != -1:\n print(search2 + \" Gallery Shirt\")\n gal.write(name + \"^\" + file_name + \"^\" + search2 + \"\\n\")\n booleanSweet = False\n break\n if booleanSweet:\n sweet.write(name + \"^\" + file_name + \"^\" + search2 + \"\\n\")\n print(search2 + \" Sweet Deal\")\n number = number + 1\n converter.close()\n handle.close()\n daily.close()\n gal.close()\n duplicatestest.close()\n sweet.close()\n cleanFolder(folder)\n print(originalfile)\n print(\"Files with double pages: \")\n print(numberlist)\n os.mkdir(folder)\n splitterCustom(parentnew, folder, numberlist,originalfile)\n\n\ndef find_between(s, first, last):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return \"\"\n\n\ndef has_sequence(s):\n val = []\n number = []\n length = len(s)\n for x in range(length):\n try:\n prov = int(s[x])\n val.append(prov)\n\n except ValueError:\n val.append(\"%\")\n\n for x in range(length):\n if val[x] == \"%\":\n 1\n else:\n if val[x + 1] == \"%\":\n 1\n else:\n number.append(val[x])\n return number\n\n\ndef sortFiles(file_name):\n f = open(file_name + \".txt\", \"r\")\n contents = f.readlines()\n contents.sort()\n with open(file_name + \"_sorted.txt\", \"w+\") as g:\n for item in contents:\n g.write(item)\n f.close()\n g.close()\n\n\ndef cleanFolder(path):\n if os.path.exists(path):\n shutil.rmtree(path)\n\ndef checkIfDuplicates(listOfElems):\n for elem in listOfElems:\n if listOfElems.count(elem) > 1:\n return True\n return False","repo_name":"williamzu/PDF_Miner","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":12733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"1999871848","text":"# -*- coding: utf-8 -*-\nfrom .models import Item\nfrom django.utils import timezone\nfrom django.db.models import Q\nfrom django.core import serializers\nimport json\n\ndef create_item(name, checklist):\n\tnewItem = Item(name=name, checklist_id=checklist)\n\tnewItem.save()\n\tmapped_item = item_mapper(newItem)\n\treturn json.dumps(mapped_item)\n\ndef get_all_items():\n\tstartdate = timezone.now() - timezone.timedelta(hours=1)\n\tenddate = timezone.now()\n\tall_items = Item.objects.filter(Q(endtime__range=[startdate, enddate]) | Q(endtime=None))\n\tall_items_orderd = all_items.order_by('createdtime')\n\tall_items_serialized = serializers.serialize('json', all_items_orderd)\n\treturn all_items_serialized\n\ndef get_all_items_by_checklist_id(checklist_id):\n\tstartdate = timezone.now() - timezone.timedelta(hours=1)\n\tenddate = timezone.now()\n\tall_items = Item.objects.filter(Q(\n\t\t\tQ(checklist_id=float(checklist_id))\n\t\t\t& Q(\n\t\t\t\tQ(endtime__range=[startdate, enddate]) | Q(endtime=None))\n\t\t\t)\n\t)\n\tall_items_orderd = all_items.order_by('createdtime')\n\tall_items_orderd = all_items_orderd.order_by('done')\n\tmappedItemList = []\n\tfor item in all_items_orderd:\n\t\tmappedItem = item_mapper(item)\n\t\tmappedItemList.append(mappedItem)\n\tall_items_json = json.dumps(mappedItemList)\n\t#all_items_serialized = serializers.serialize('json', all_items_orderd)\n\treturn all_items_json\n\ndef update_item(id, value):\n\tif value == \"1\":\n\t\tendt = timezone.now()\n\telse:\n\t\tendt = None\n\titem = Item.objects.filter(id=id)\n\titem.update(done = value)\n\titem.update(endtime = endt)\n\ndef remove_item(id):\n\titem = Item.objects.filter(id=id)\n\titem.delete()\n\ndef get_highest_soring_order():\n\tstartdate = timezone.now() - timezone.timedelta(hours=1)\n\tenddate = timezone.now()\n\tall_items = Item.objects.filter(Q(endtime__range=[startdate, enddate]) | Q(endtime=None))\n\tindex = all_items.order_by(\"-ordernumber\")[0]\n\treturn index.ordernumber\n\n\ndef update_item_order(newvalue, id):\n\treorder_items(newvalue)\n\titem = Item.objects.filter(id=id)\n\titem.update(ordernumber=newvalue)\n\ndef reorder_items(i):\n\tstartdate = timezone.now() - timezone.timedelta(hours=1)\n\tenddate = timezone.now()\n\tfor item in items:\n\t\titem.update(ordernumber = F('ordernumber') + 1)\n\ndef item_mapper(rawItem):\n\titem = {}\n\titem['id'] = rawItem.id\n\titem['name'] = rawItem.name\n\titem['done'] = rawItem.done\n\treturn item","repo_name":"andreasastrom/mysocialclub","sub_path":"hello/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"27589884887","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nimport traceback\nfrom datetime import datetime\nfrom typing import List\n\nfrom fastapi import APIRouter, Request, WebSocket\nfrom starlette.responses import HTMLResponse\n\nfrom current_log.RedisClient import RedisClient\n\nlog_router = APIRouter()\n\nhtml = \"\"\"\n\n\n \n \n 实时日志
\n \n \n \n\"\"\"\n\nredis_client = RedisClient()\n\n\nclass ConnectionManager:\n def __init__(self):\n self.active_connections: List[WebSocket] = []\n\n async def broadcast(self, system_name):\n while True:\n message = redis_client.lpop(system_name)\n if message:\n # await asyncio.gather(\n # *[ws.send_text(message) for ws in self.active_connections],\n # return_exceptions=False,\n # )\n for ws in self.active_connections:\n try:\n await ws.send_text(message)\n except:\n pass\n await asyncio.sleep(0.2)\n\n def start_broadcast(self, system_name):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n asyncio.get_event_loop().run_until_complete(manager.broadcast(system_name))\n # asyncio.get_event_loop().run_forever()\n\n\nmanager = ConnectionManager()\n\n\n@log_router.get(path='/logs')\ndef get_log(request: Request):\n try:\n run_host = str(request.url.netloc)\n user = datetime.now().strftime('%f')\n user_html = html\n user_html = user_html.format(host=run_host, user=user)\n return HTMLResponse(user_html)\n except:\n return {\"error\": traceback.format_exc()}\n\n\n@log_router.websocket(path=\"/log_connect/{user}\")\nasync def broadcast_log_redis(ws: WebSocket, user: str):\n await ws.accept()\n manager.active_connections.append(ws)\n try:\n while True:\n await ws.receive_text()\n await ws.send_text(\"pong\")\n except:\n pass\n finally:\n manager.active_connections.remove(ws)\n\n\n@log_router.get(path=\"/start_generate_log/{system_name}\")\ndef start_generate_log(system_name: str):\n import threading\n threading.Thread(target=manager.start_broadcast, args=(system_name,)).start()\n return {\"message\": \"success\"}\n\n\n@log_router.get(path='/')\ndef test():\n print(\"测试\")\n return {\"message\": \"aaaa\"}\n","repo_name":"cooldowntime/curentlog","sub_path":"current_log/current_log_router.py","file_name":"current_log_router.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"13883538647","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nimport datetime\n\n\n# Create your views here.\n@api_view()\ndef get_detail(request):\n\n slack_name = request.GET.get('slack_name', None)\n\n track = request.GET.get('track', None)\n\n current_date = datetime.date.today()\n\n\n \n # Get the current UTC time\n current_utc_time = datetime.datetime.utcnow()\n\n # Define a time range of +/- 2 hours\n time_range = datetime.timedelta(hours=2)\n\n # Calculate the minimum and maximum allowed times\n min_time = current_utc_time - time_range\n max_time = current_utc_time + time_range\n\n # Get the current UTC time as a string\n min_time_str = min_time.strftime('%Y-%m-%d %H:%M:%S')\n max_time_str = max_time.strftime('%Y-%m-%d %H:%M:%S')\n\n# Get the name of the day of the week\n day_of_week = current_date.strftime('%A')\n\n detail = { \n \"slack_name\": slack_name,\n \"current_days\": day_of_week,\n \"utc_time\": \"Min. =>\"+ min_time_str + \" - Man =>\" + max_time_str,\n \"track\": track,\n \"github_file_url\": \"https://github.com/hussain4me/zuri-first-assignment/blob/main/api/views.py\",\n \"github_repo_url\": \"https://github.com/hussain4me/zuri-first-assignment\",\n \"status_code\": 200\n }\n\n return Response(detail)\n","repo_name":"hussain4me/zuri-first-assignment","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"14461442187","text":"import tensorflow as tf\nimport helper\n \nmodel = tf.keras.models.load_model(\"pets\")\n\nwhile True:\n url = input(\"Please enter an image url:\")\n try:\n image = tf.keras.utils.get_file(origin=url)\n image = tf.keras.utils.load_img(image)\n break\n except:\n print(\"That is not a valid link\")\n\nhelper.show_predictions(url, model)","repo_name":"Powerlax/ImageSegmentation","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"31564645195","text":"import sys\nfrom turtle import back\nfrom load import *\n\n# this function creates a 2D matrix with pre-determined scores for certain rows\ndef scoreMatrix(x, y, score):\n matrix = [[0] * (len(y)+1) for i in range(len(x)+1)]\n for i in range(len(x)+1):\n matrix[i][0] = i * score\n for j in range(len(y)+1):\n matrix[0][j] = j * score\n return matrix\n\n# this function creates a 2D matrix to backtrack previous locations\ndef backMatrix(x, y):\n matrix = [[0] * (len(y)+1) for i in range(len(x)+1)]\n for i in range(len(x)+1):\n matrix[i][0] = \"up\"\n for j in range(len(y)+1):\n matrix[0][j] = \"left\"\n matrix[0][0] = 0\n return matrix\n\n# this function decides which score to use for DNA scoring\ndef DNAscore(match, mismatch, seq1, seq2):\n if seq1 == seq2:\n return match\n else: \n return mismatch\n\n# this function calculates identities between 2 sequences\ndef identities(seq1, seq2):\n total = len(seq1)\n count = 0\n for i in range(len(seq1)):\n if seq1[i] == seq2[i]:\n count += 1\n \n num = str(count) + \"/\" + str(total)\n percent = \"(\" + str(int(count/total * 100)) + \"%)\"\n return num + \" \" + percent\n\n# this function takes 2 DNA sequences and use global alignment to decide the optimal score\ndef DNAglobal(scores, seq1, seq2):\n\n # get scores from the dnaMatrix file\n matchScore = scores.get(\"match score\")\n mismatchScore = scores.get(\"mismatch score\")\n gapPenalty = scores.get(\"gap penalty\")\n\n # create a 2D matrix using list of list and fill in the basic gap penalties \n matrix = scoreMatrix(seq1, seq2, gapPenalty)\n\n # create another 2D matrix to save the information for backtracking\n backtrack = backMatrix(seq1, seq2)\n\n # fill in all the scores and backtracking info\n for i in range(1, len(seq1)+1):\n for j in range(1, len(seq2)+1):\n diagonal = matrix[i-1][j-1] + DNAscore(matchScore, mismatchScore, seq1[i-1], seq2[j-1])\n left = matrix[i][j-1] + gapPenalty\n up = matrix[i-1][j] + gapPenalty\n matrix[i][j] = max(diagonal, left, up)\n\n if matrix[i][j] == diagonal:\n backtrack[i][j] = \"diagonal\"\n elif matrix[i][j] == left:\n backtrack[i][j] = \"left\"\n else:\n backtrack[i][j] = \"up\"\n\n final_score = matrix[len(seq1)][len(seq2)]\n return backtrack, final_score\n\n# this function takes 2 DNA sequences and use semi-global alignment to decide the optimal score\ndef DNAsemi_global(scores, seq1, seq2):\n\n # get scores from the dnaMatrix file\n matchScore = scores.get(\"match score\")\n mismatchScore = scores.get(\"mismatch score\")\n gapPenalty = scores.get(\"gap penalty\")\n\n matrix = scoreMatrix(seq1, seq2, 0)\n backtrack = backMatrix(seq1, seq2)\n\n for i in range(1, len(seq1)+1):\n for j in range(1, len(seq2)+1):\n diagonal = matrix[i-1][j-1] + DNAscore(matchScore, mismatchScore, seq1[i-1], seq2[j-1])\n\n if i == len(seq1):\n left = matrix[i][j-1]\n else:\n left = matrix[i][j-1] + gapPenalty\n\n if j == len(seq2):\n up = matrix[i-1][j]\n else:\n up = matrix[i-1][j] + gapPenalty\n \n matrix[i][j] = max(diagonal, left, up)\n\n if matrix[i][j] == diagonal:\n backtrack[i][j] = \"diagonal\"\n elif matrix[i][j] == left:\n backtrack[i][j] = \"left\"\n else:\n backtrack[i][j] = \"up\"\n \n final_score = matrix[len(seq1)][len(seq2)]\n return backtrack, final_score\n\n# this function takes 2 DNA sequences and uses local alignment to decide the optimal score\ndef DNAlocal(scores, seq1, seq2):\n matchScore = scores.get(\"match score\")\n mismatchScore = scores.get(\"mismatch score\")\n gapPenalty = scores.get(\"gap penalty\")\n\n matrix = scoreMatrix(seq1, seq2, 0)\n backtrack = backMatrix(seq1, seq2)\n max_score = 0\n\n for i in range(1, len(seq1)+1):\n for j in range(1, len(seq2)+1):\n diagonal = matrix[i-1][j-1] + DNAscore(matchScore, mismatchScore, seq1[i-1], seq2[j-1])\n left = matrix[i][j-1] + gapPenalty\n up = matrix[i-1][j] + gapPenalty\n matrix[i][j] = max(diagonal, left, up)\n\n if matrix[i][j] == diagonal:\n backtrack[i][j] = \"diagonal\"\n elif matrix[i][j] == left:\n backtrack[i][j] = \"left\"\n else:\n backtrack[i][j] = \"up\"\n\n if matrix[i][j] < 0:\n matrix[i][j] = 0\n\n if matrix[i][j] > max_score:\n max_score = matrix[i][j]\n max_index = [i,j]\n\n final_score = matrix[len(seq1)][len(seq2)]\n return backtrack, final_score, max_score, max_index, matrix\n\n# this function uses backtracking info to create new aligned sequences\ndef aligned(seq1, seq2, src, type):\n l1 = len(seq1)\n l2 = len(seq2)\n back_matrix = src[0]\n s1 = \"\"\n s2 = \"\"\n \n if type == \"G\" or type == \"S\":\n score = src[1]\n while (l1 > 0 or l2 > 0):\n if back_matrix[l1][l2] == \"diagonal\":\n s1 = seq1[l1-1] + s1\n s2 = seq2[l2-1] + s2\n l1 -= 1\n l2 -= 1\n elif back_matrix[l1][l2] == \"left\":\n s1 = \"-\" + s1\n s2 = seq2[l2-1] + s2\n l2 -= 1\n else:\n s1 = seq1[l1-1] + s1\n s2 = \"-\" + s2\n l1 -= 1\n else:\n score = src[2]\n max_start = src[3]\n l1 = max_start[0]\n l2 = max_start[1]\n score_matrix = src[4]\n while (score_matrix[l1][l2] != 0):\n if back_matrix[l1][l2] == \"diagonal\":\n s1 = seq1[l1-1] + s1\n s2 = seq2[l2-1] + s2\n l1 -= 1\n l2 -= 1\n elif back_matrix[l1][l2] == \"left\":\n s1 = \"-\" + s1\n s2 = seq2[l2-1] + s2\n l2 -= 1\n else:\n s1 = seq1[l1-1] + s1\n s2 = \"-\" + s2\n l1 -= 1\n \n return s1, s2, score, l1, l2\n\n# this function determines which score to use from the BLOSUM file\ndef proteinScore(blosum, seq1, seq2):\n score = blosum.get(seq1).get(seq2)\n return score\n\n# this function creates a scoring matrix using global alignment for 2 protein sequences\ndef proteinGlobal(scores, seq1, seq2):\n gapPenalty = scores.get(\"gap penalty\")\n matrix = scoreMatrix(seq1, seq2, gapPenalty)\n backtrack = backMatrix(seq1, seq2)\n \n # fill in all the scores and backtracking info\n for i in range(1, len(seq1)+1):\n for j in range(1, len(seq2)+1):\n diagonal = matrix[i-1][j-1] + proteinScore(scores, seq1[i-1], seq2[j-1])\n left = matrix[i][j-1] + gapPenalty\n up = matrix[i-1][j] + gapPenalty\n matrix[i][j] = max(diagonal, left, up)\n\n if matrix[i][j] == diagonal:\n backtrack[i][j] = \"diagonal\"\n elif matrix[i][j] == left:\n backtrack[i][j] = \"left\"\n else:\n backtrack[i][j] = \"up\"\n\n final_score = matrix[len(seq1)][len(seq2)]\n return backtrack, final_score\n\n# this function creates a scoring matrix using semi-global alignment for 2 protein sequences\ndef proteinSemi_global(scores, seq1, seq2):\n matrix = scoreMatrix(seq1, seq2, 0)\n backtrack = backMatrix(seq1, seq2)\n gapPenalty = scores.get(\"gap penalty\")\n\n for i in range(1, len(seq1)+1):\n for j in range(1, len(seq2)+1):\n diagonal = matrix[i-1][j-1] + proteinScore(scores, seq1[i-1], seq2[j-1])\n\n if i == len(seq1):\n left = matrix[i][j-1]\n else:\n left = matrix[i][j-1] + gapPenalty\n\n if j == len(seq2):\n up = matrix[i-1][j]\n else:\n up = matrix[i-1][j] + gapPenalty\n \n matrix[i][j] = max(diagonal, left, up)\n\n if matrix[i][j] == diagonal:\n backtrack[i][j] = \"diagonal\"\n elif matrix[i][j] == left:\n backtrack[i][j] = \"left\"\n else:\n backtrack[i][j] = \"up\"\n \n final_score = matrix[len(seq1)][len(seq2)]\n return backtrack, final_score\n\n# this function takes 2 DNA sequences and uses local alignment to decide the optimal score\ndef proteinLocal(scores, seq1, seq2):\n matrix = scoreMatrix(seq1, seq2, 0)\n backtrack = backMatrix(seq1, seq2)\n gapPenalty = scores.get(\"gap penalty\")\n max_score = 0\n\n for i in range(1, len(seq1)+1):\n for j in range(1, len(seq2)+1):\n diagonal = matrix[i-1][j-1] + proteinScore(scores, seq1[i-1], seq2[j-1])\n left = matrix[i][j-1] + gapPenalty\n up = matrix[i-1][j] + gapPenalty\n matrix[i][j] = max(diagonal, left, up)\n\n if matrix[i][j] == diagonal:\n backtrack[i][j] = \"diagonal\"\n elif matrix[i][j] == left:\n backtrack[i][j] = \"left\"\n else:\n backtrack[i][j] = \"up\"\n\n if matrix[i][j] < 0:\n matrix[i][j] = 0\n\n if matrix[i][j] > max_score:\n max_score = matrix[i][j]\n max_index = [i,j]\n\n final_score = matrix[len(seq1)][len(seq2)]\n return backtrack, final_score, max_score, max_index, matrix\n\n# this function deals with all command-line arguments and put them in an ordered list\ndef param(seq1, seq2, output, proseq, atype):\n argv = sys.argv[1:]\n pars = [\"-i\", \"-j\", \"-o\", \"-p\", \"-atype\"]\n alist = [seq1, seq2, output, proseq, atype]\n\n for i in range(len(argv)):\n for j in range(len(pars)):\n if argv[i] == pars[j]:\n try:\n argv[i+1]\n except IndexError:\n print(\"Missing argument after the last index \" + pars[j] + \". Can't run the program!\")\n else:\n if argv[i+1] not in pars:\n alist[j] = argv[i+1]\n\n for i in range(len(alist)):\n if alist[i] == \"\":\n print(\"Missing \" + pars[i] + \" or missing argument after \" + pars[i] + \". Can't run the program!\")\n return None\n if alist[3] != 'T' and alist[3] != 'F':\n print(\"Wrong argument, can't run the program! It should be either T or F after '-p'.\")\n return None\n break\n if alist[4] != 'G' and alist[4] != 'S' and alist[4] != 'L':\n print(\"Wrong argument, can't run the program! It should be either G or S after '-atype'.\")\n return None\n break\n\n return alist\n\ndef main():\n alist = param(None, None, None, None, None)\n # print(alist)\n i = loadSeq(alist[0])\n j = loadSeq(alist[1])\n output = alist[2]\n proseq = alist[3]\n atype = alist[4]\n\n if alist != None:\n fout = open(output, \"w\")\n fout.write(\"\\n \\n\")\n if proseq == \"F\":\n scoring = loadMatrix(\"dnaMatrix.txt\")\n if atype == \"G\":\n source = DNAglobal(scoring, i, j)\n elif atype == \"S\":\n source = DNAsemi_global(scoring, i, j)\n else:\n source = DNAlocal(scoring, i, j)\n else:\n scoring = loadBLOSUM(\"BLOSUM45.txt\")\n if atype == \"G\":\n source = proteinGlobal(scoring, i, j)\n elif atype == \"S\":\n source = proteinSemi_global(scoring, i, j)\n else: \n source = proteinLocal(scoring, i, j)\n\n result = aligned(i, j, source, atype)\n align1 = result[0]\n align2 = result[1]\n score = str(result[2])\n if atype == \"G\" or atype == \"S\":\n fout.write(\"seq1: \" + str(1) + \" \" + align1 + \" \" + str(len(i)) + \"\\n\")\n fout.write(\"\\n\")\n fout.write(\"seq2: \" + str(1) + \" \" + align2 + \" \" + str(len(j)) + \"\\n\") \n else:\n last_idx = source[3]\n fout.write(\"seq1: \" + str(result[3]+1) + \" \" + align1 + \" \" + str(last_idx[0]+1) + \"\\n\")\n fout.write(\"\\n\")\n fout.write(\"seq2: \" + str(result[4]+1) + \" \" + align2 + \" \" + str(last_idx[1]+1) + \"\\n\") \n fout.write(\"\\n\")\n fout.write(\"Score: \" + score + \"\\n\")\n fout.write(\"Identities: \" + identities(align1, align2) + \"\\n\")\n fout.close()\nmain()","repo_name":"ninavu/pairwise_alignment","sub_path":"align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":12452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"71211003149","text":"# cook your dish here\ntry:\n import math as mt\n def great(a,b):\n if(a>=b):\n return a\n else:\n return b\n t=int(input())\n while(t):\n a,b=map(int,input().split())\n g=great(a,b)\n power=int((mt.log(g)/mt.log(2)))+1\n mx=a^b\n \n rotation=0\n mx_rotation=0\n for i in range(1,power):\n end=b&1\n b=b>>1\n b=b|int((end*(2**(power-1))))\n rotation+=1\n if((a^b)>mx):\n mx=a^b\n mx_rotation=rotation\n # print(power)\n # print(mx)\n print(mx_rotation,mx)\n t-=1\nexcept:\n pass\n","repo_name":"iamvedant/Campus-Chapters-1.0","sub_path":"Another Game Of Numbers (GAMENUM).py","file_name":"Another Game Of Numbers (GAMENUM).py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"32624230813","text":"import turtle\nfrom turtle import Turtle, Screen\n\n\nclass LeftPaddle(Turtle):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.penup()\n\t\tself.setpos(-400, 0)\n\t\tself.setheading(90)\n\t\tself.speed(0)\n\t\tself.color(61, 84, 103)\n\t\tself.shape(\"square\")\n\t\tself.shapesize(0.8, 4)\n\t\tself.score = 0\n\n\tdef up(self):\n\t\tif self.ycor() < 290:\n\t\t\tself.setheading(90)\n\t\t\tself.forward(20)\n\n\tdef down(self):\n\t\tif self.ycor() > -290:\n\t\t\tself.setheading(270)\n\t\t\tself.forward(20)\n\n\nclass RightPaddle(Turtle):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.penup()\n\t\tself.setpos(400, 0)\n\t\tself.setheading(90)\n\t\tself.speed(0)\n\t\tself.color(61, 84, 103)\n\t\tself.shape(\"square\")\n\t\tself.shapesize(0.8, 4)\n\t\tself.score=0\n\n\tdef up(self):\n\t\tif self.ycor() < 290:\n\t\t\tself.setheading(90)\n\t\t\tself.forward(20)\n\n\tdef down(self):\n\t\tif self.ycor() > -290:\n\t\t\tself.setheading(270)\n\t\t\tself.forward(20)\n\n\nclass Ball(Turtle):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.penup()\n\t\tself.shape(\"circle\")\n\t\tself.speed(8)\n\t\tself.dx = 10\n\t\tself.dy = 10\n\n\tdef move(self):\n\t\tself.goto((self.xcor() + self.dx), (self.ycor() + self.dy))\n\n\tdef reset(self):\n\t\tself.goto(0, 0)\n\n\nclass ScoreBoard(Turtle):\n\tdef __init__(self, player, score, cord):\n\t\tsuper().__init__()\n\t\tself.player = player\n\t\tself.cord = cord\n\t\tself.score = score\n\t\tself.penup()\n\t\tself.hideturtle()\n\t\tself.goto(cord)\n\t\tself.color(219, 84, 97)\n\t\tself.write(f\"{self.player}: {self.score}\", True, align=\"center\", font=(\"Arial\", 30, \"normal\"))\n\n\nclass HalfCourt(Turtle):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.penup()\n\t\tself.setheading(90)\n\t\tself.color(138, 162, 158)\n\t\tself.shape(\"square\")\n\t\tself.shapesize(0.5, 1)\n","repo_name":"xalxnder/pong","sub_path":"pong_classes.py","file_name":"pong_classes.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"14372327952","text":"import csv\nimport operator\nfrom random import choice\n\nfrom .classes import Node, Link, Network, Agent\n\n\ndef read_nodes(input_dir, node_list, internal_node_seq_no_dict,\n external_node_id_dict, zone_to_nodes_dict):\n \"\"\" step 1: read input_node \"\"\"\n with open(input_dir+'/node.csv', 'r', encoding='utf-8') as fp:\n reader = csv.DictReader(fp)\n node_seq_no = 0\n for line in reader:\n node = Node(node_seq_no, line['node_id'], line['zone_id'])\n node_list.append(node)\n internal_node_seq_no_dict[node.external_node_id] = node_seq_no\n external_node_id_dict[node_seq_no] = node.external_node_id\n if node.zone_id not in zone_to_nodes_dict.keys():\n zone_to_nodes_dict[int(node.zone_id)] = list()\n zone_to_nodes_dict[int(node.zone_id)].append(\n node.external_node_id\n )\n else:\n zone_to_nodes_dict[int(node.zone_id)].append(\n node.external_node_id\n )\n node_seq_no += 1\n print('the number of nodes is', node_seq_no)\n fp.close()\n\n\ndef read_links(input_dir, link_list, node_list, internal_node_seq_no_dict):\n \"\"\" step 2: read input_link \"\"\"\n with open(input_dir+'/link.csv', 'r', encoding='utf-8') as fp:\n reader = csv.DictReader(fp)\n link_seq_no = 0\n for line in reader:\n from_node_no = internal_node_seq_no_dict[int(line['from_node_id'])]\n to_node_no = internal_node_seq_no_dict[int(line['to_node_id'])]\n link = Link(link_seq_no, \n from_node_no, \n to_node_no,\n int(line['from_node_id']),\n int(line['to_node_id']),\n line['length'],\n line['lanes'],\n line['free_speed'],\n line['capacity'],\n line['link_type'],\n line['VDF_alpha1'],\n line['VDF_beta1'])\n node_list[link.from_node_seq_no].outgoing_link_list.append(link)\n node_list[link.to_node_seq_no].incoming_link_list.append(link)\n link_list.append(link)\n link_seq_no += 1\n print('the number of links is', link_seq_no)\n fp.close()\n \n\ndef read_agents(input_dir,\n agent_list,\n agent_td_list_dict,\n zone_to_nodes_dict):\n \"\"\" step 3:read input_agent \"\"\"\n with open(input_dir+'/demand.csv', 'r', encoding='utf-8') as fp:\n reader = csv.DictReader(fp)\n agent_id = 1\n agent_type = 'v'\n agent_seq_no = 0\n for line in reader:\n volume = line['volume']\n volume_agent_size = int(float(volume) + 1)\n \n # only test up to 10k\n if agent_id >= 10000 :\n break \n \n for i in range(volume_agent_size):\n agent = Agent(agent_id,\n agent_seq_no,\n agent_type,\n line['o_zone_id'], \n line['d_zone_id'])\n\n # step 3.1 generate o_node_id and d_node_id randomly according \n # to o_zone_id and d_zone_id \n if zone_to_nodes_dict.get(agent.o_zone_id, -1) == -1 : \n continue\n if zone_to_nodes_dict.get(agent.d_zone_id, -1) == -1 : \n continue \n \n agent.o_node_id = choice(zone_to_nodes_dict[agent.o_zone_id])\n agent.d_node_id = choice(zone_to_nodes_dict[agent.d_zone_id])\n \n # step 3.2 update agent_id and agent_seq_no\n agent_id += 1\n agent_seq_no += 1 \n\n # step 3.3: update the g_simulation_start_time_in_min and \n # g_simulation_end_time_in_min \n if agent.departure_time_in_min < g_simulation_start_time_in_min:\n g_simulation_start_time_in_min = agent.departure_time_in_min\n if agent.departure_time_in_min > g_simulation_end_time_in_min:\n g_simulation_end_time_in_min = agent.departure_time_in_min\n\n #step 3.4: add the agent to the time dependent agent list \n if agent.departure_time_in_simu_interval not in agent_td_list_dict.keys():\n agent_td_list_dict[agent.departure_time_in_simu_interval] = list()\n agent_td_list_dict[agent.departure_time_in_simu_interval].append(agent.agent_seq_no)\n else:\n agent_td_list_dict[agent.departure_time_in_simu_interval].append(agent.agent_seq_no)\n agent_list.append(agent)\n\n print('the number of agents is', len(agent_list))\n\n #step 3.6:sort agents by the departure time\n sort_fun = operator.attrgetter(\"departure_time_in_min\")\n agent_list.sort(key=sort_fun)\n for i, agent in enumerate(agent_list):\n agent.agent_seq_no = i\n\n\ndef read_network(input_dir='./'):\n network = Network()\n\n read_nodes(input_dir,\n network.node_list,\n network.internal_node_seq_no_dict,\n network.external_node_id_dict,\n network.zone_to_nodes_dict)\n\n read_links(input_dir, \n network.link_list,\n network.node_list,\n network.internal_node_seq_no_dict)\n\n read_agents(input_dir,\n network.agent_list,\n network.agent_td_list_dict,\n network.zone_to_nodes_dict)\n\n network.update()\n\n return network","repo_name":"asu-trans-ai-lab/Path4GMNS","sub_path":"path4gmns/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"82"}
+{"seq_id":"24251040917","text":"\"\"\" Introduction to the Python Protocol \"\"\"\n\n# Suppose you have a function that calculates the total value of a product list, \n# where each product has the name, quantity, and price attributes:\nfrom typing import List\nclass Product:\n def __init__(self, name, quantity, price):\n self.name = name\n self.quantity = quantity\n self.price = price\n\ndef calculate_total(items: List[Product]) -> int:\n return sum([prod.quantity * prod.price for prod in items])\n\nitems = [\n Product('Mouse', 2, 250),\n Product('Keyboard', 3, 550)\n]\n\nprint(calculate_total(items))\n\n# In this example, the calculate_total() function accepts a list of Product objects and \n# returns the total value.\n\n# When writing this function, you may want to calculate the total of a product list. But you \n# likely want to use it for other lists such as inventory lists in the future.\n\n# If you look closely at the calculate_total() function, it only uses the quantity and price \n# attributes.\n\n# To make the calculate_total() more dynamic while leveraging type hints, you can use the \n# Protocol from the typing module. The Protocol class has been available since Python 3.8, \n# described in PEP 544.\n\nfrom pprint import pprint\nfrom typing import Protocol\n# First, define an Item class that inherits from the Protocol with two \n# attributes: quantity and price:\nclass Item(Protocol):\n quantity: int\n price: float\n\nclass Product:\n def __init__(self, name, quantity, price):\n self.name = name\n self.quantity = quantity\n self.price = price\n\nclass Inventory:\n def __init__(self, name, quantity, price):\n self.name = name\n self.quantity = quantity\n self.price = price\n\n# Second, change the calculate_total() function that accepts a list of Item objects \n# instead of a list of Product objects:\ndef calculate_total(items: List[Item]):\n return sum([item.quantity * item.price for item in items])\n\n# By doing this, you can pass any list of Item objects to the calculate_total() function \n# with the condition that each item has two attributes quantity and price.\ntotal = calculate_total([\n Product('Keyboard', 2, 800),\n Product('Mouse', 3, 250)\n])\nprint(total)\n\ntotal = calculate_total([\n Inventory('Food', 25, 150),\n Inventory('Stones', 150, 250)\n])\nprint(total)\n\n# In this example, the Product and Inventory class don’t need to subclass the Item \n# class but still can be used in the calculate_total() function.\n\n# This is called duck typing in Python. In duck typing, the behaviors and properties of an \n# object determine the object type, not the explicit type of the object.\n\n# For example, an object with the quantity and price will follow the Item protocol, \n# regardless of its explicit type.\n\n\"\"\" Summary \"\"\"\n# Use Python Protocol to define implicit interfaces.\n\n","repo_name":"Engr-Asad-Hussain/oop","sub_path":"single_inheritance/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"40581670455","text":"import os\nimport random\n\n\ndef search_file(directory, file, list_p):\n\n for i_elem in os.listdir(directory):\n path = os.path.join(directory, i_elem)\n if file == i_elem:\n list_p.append(path)\n elif os.path.isdir(path):\n search_file(path, file, list_p)\n\n return list_p\n\n\nlist_paths = list()\nmy_dir = 'Skillbox'\ndir_path = os.path.abspath(os.path.join('..', '..', '..', my_dir))\n\nprint('Ищем в: ', dir_path)\nfile_name = input('Имя файла: ')\n\n\nresult = search_file(dir_path, file_name, list_paths)\n\nif not result:\n print('Указанный файл в системе не найден.')\nelse:\n print('Найдены следующие пути:')\n for i_path in result:\n print(i_path)\n\nrandom_file = random.choice(result)\n\nfile = open(random_file, 'r', encoding='utf-8')\n\nprint('Вывод случайного файла из найденных, его путь', random_file)\nfor i_line in file:\n print(i_line, end='')\n\n","repo_name":"surma623/Module-Skillbox","sub_path":"22.3.2.py","file_name":"22.3.2.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"12559472397","text":"import sys\r\nimport heapq\r\n\r\ninput = sys.stdin.readline\r\nn, k = map(int, input().split())\r\njewel_data = [tuple(map(int, input().split())) for _ in range(n)]\r\nbag_data = [int(input()) for _ in range(k)]\r\n\r\njewel_data.sort(reverse=True)\r\nbag_data.sort()\r\n\r\nh = []\r\n\r\nresult = 0\r\nfor c in bag_data:\r\n while jewel_data and jewel_data[-1][0] <= c:\r\n jewel = jewel_data.pop()\r\n heapq.heappush(h, -jewel[1])\r\n if h:\r\n result += -heapq.heappop(h)\r\n\r\nprint(result)","repo_name":"Charmull/Algorithm_Python","sub_path":"백준/Gold/1202. 보석 도둑/보석 도둑.py","file_name":"보석 도둑.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"25864527041","text":"import pymongo\nfrom pymongo import MongoClient\nimport requests\nfrom bs4 import BeautifulSoup as soup\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup as BS\nimport re\n\nc = MongoClient()\ndb=c[\"mydatabase\"]\narticle = db.articles\n\ndef insertIntoDB(date,title, content, source, url):\n post_Data ={'date': date, 'title':title,'content':content,'source':source,'url':url,'score':'NA'}\n \"\"\"if(article.find({'title':title,'date':date,'source':source}).count()>0):\n print(\"already present\")\n else:\"\"\"\n result = article.insert_one(post_Data)\n\ndef updateScore():\n article.find({'score':'NA'})\n\t\ndef getGoodNetworkNews():\n url = 'https://www.goodnewsnetwork.org/'\n html = requests.get(url)\n soup = BS(html.text)\n table = soup.find_all('h3',{'class':\"entry-title td-module-title\"})\n for i in table:\n k=i.find('a')['href']\n #print(\"url\",k)\n browser = webdriver.PhantomJS(executable_path=\"D:/sw/phantomjs-2.1.1-windows/bin/phantomjs\")\n browser.get(k)\n html = browser.page_source\n soup = BS(html, 'html.parser')\n time = soup.find('time')\n #print(time.string)\n try:\n title_article=soup.find('h1',{'class':\"entry-title\"})\n #print(\"TITLE\",title_article.string)\n para = soup.find('div',{'class':'td-post-content'}).find_all('p')\n content=''\n for i in para:\n #if not(i.string is None):\n content=content+i.string\n insertIntoDB(time.string,title_article.string, content, \"Good\", content)\n except:\n print(\"removing video articles\") #article\n\t\t\t\ndef newsFromGuardian():\n main_url = \"https://newsapi.org/v2/everything?sources=the-guardian-uk&apiKey=fa6d77b861bc48c2a4bfd93ef6ceaeba\"\n open_bbc_page = requests.get(main_url).json()\n article = open_bbc_page[\"articles\"]\n browser = webdriver.PhantomJS(executable_path=\"D:/sw/phantomjs-2.1.1-windows/bin/phantomjs\")\n try:\n for ar in article:\n print(\"TITLE:\",ar[\"title\"])\n browser.get(ar[\"url\"])\n ans=''\n html = browser.page_source\n soup = BS(html, 'html.parser')\n table = soup.find('div',{'class':re.compile('content__article-body')}).find_all('p')\n for k in table:\n if k.string is not None:\n ans=ans+k.string\n insertIntoDB(ar['publishedAt'],ar[\"title\"], ans, \"the-guardian-uk\", ar[\"url\"])\n except:\n print(\"removing video articles\") #article\n \n \ndef newsFromBBC():\n main_url = \" https://newsapi.org/v2/everything?sources=bbc-news&apiKey=95465951cbf447369c10a005ded49a0b\"\n open_bbc_page = requests.get(main_url).json()\n article = open_bbc_page[\"articles\"]\n results = []\n links = []\n browser = webdriver.PhantomJS(executable_path=\"D:/sw/phantomjs-2.1.1-windows/bin/phantomjs\")\n for ar in article:\n print(\"TITLE:\",ar[\"title\"])\n print(\"DATE:\",ar['publishedAt'])\n try:\n browser.get(ar[\"url\"])\n ans=''\n html = browser.page_source\n soup = BS(html, 'html.parser')\n table = soup.find_all('div',{'class':\"story-body__inner\"})[0].find_all('p',{'class':\"aria-hidden\"})\n for div in table:\n div.decompose()\n table=soup.find_all('div',{'class':\"story-body__inner\"})[0].find_all('p')\n for k in table:\n #if k.string is not None:\n ans=ans+k.string\n insertIntoDB(ar['publishedAt'],ar[\"title\"], ans, \"BBC\", ar[\"url\"])\n except:\n print(\"removing video articles\")\n\t\t\n\t\t\t\ndef newsFromCNBC():\n main_url = \"https://newsapi.org/v2/everything?sources=cnbc&apiKey=cb28b795dd1e469ebbc02ea19535898a\"\n open_cnbc_page = requests.get(main_url).json()\n article = open_cnbc_page[\"articles\"]\n browser = webdriver.PhantomJS(executable_path=\"D:/sw/phantomjs-2.1.1-windows/bin/phantomjs\")\n for ar in article:\n browser.get(ar[\"url\"])\n ans=''\t\t\n html = browser.page_source\n soup = BS(html, 'html.parser')\n try:\n table = soup.find_all('div',{'class':'group-container'})[1].find_all('p')\n for k in table:\n if k.string is not None:\n ans=ans+k.string\n insertIntoDB(ar['publishedAt'],ar[\"title\"], ans, \"CNBC\", ar[\"url\"])\n except:\n print(\"removing video articles\")\n\n\t\t\t\nnewsFromBBC()\t\n#newsFromGuardian()\ngetGoodNetworkNews()\n#newsFromCNBC()\n","repo_name":"nivedita104/PosNews","sub_path":"mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"36108116979","text":"import numpy as np\n\n\nclass HillClimbing:\n \"\"\"\n Maximization Problem.\n \"\"\"\n\n def __init__(self, method='vanilla', noise_scale=0.1, n_candidates=1, up_rate=2, down_rate=0.5, max_noise=2,\n min_noise=0.001):\n\n assert method in ['vanilla', 'steepest_ascent', 'simulated_annealing', 'adaptive_noise_scaling']\n\n self.x_best = None\n self.f_best = -np.inf\n self.noise_scale = noise_scale\n self.n_candidates = n_candidates\n self.down_rate = down_rate\n self.up_rate = up_rate\n self.max_noise = max_noise\n self.min_noise = min_noise\n self.method = method\n\n def step(self, xs, fs):\n\n xs_new = None\n\n if self.method == 'vanilla':\n if fs[0] > self.f_best:\n self.x_best = xs[0]\n self.f_best = fs[0]\n xs_new = [self.x_best + np.random.normal(loc=0, scale=self.noise_scale, size=xs[0].shape)]\n\n if self.method == 'steepest_ascent':\n best_indx = np.argmax(fs)\n\n if fs[best_indx] > self.f_best:\n self.x_best = xs[best_indx]\n self.f_best = fs[best_indx]\n xs_new = [self.x_best + np.random.normal(0, self.noise_scale, size=xs[0].shape) for _ in\n range(self.n_candidates)]\n\n if self.method == 'simulated_annealing':\n best_indx = np.argmax(fs)\n\n if fs[best_indx] > self.f_best:\n self.x_best = xs[best_indx]\n self.f_best = fs[best_indx]\n self.noise_scale /= self.down_rate\n xs_new = [self.x_best + np.random.normal(0, self.noise_scale, size=xs[0].shape) for _ in\n range(self.n_candidates)]\n\n if self.method == 'adaptive_noise_scaling':\n best_indx = np.argmax(fs)\n\n if fs[best_indx] > self.f_best:\n self.x_best = xs[best_indx]\n self.f_best = fs[best_indx]\n self.noise_scale = max(self.noise_scale * self.down_rate, self.min_noise)\n else:\n self.noise_scale = min(self.noise_scale * self.up_rate, self.max_noise)\n\n xs_new = [self.x_best + np.random.normal(0, self.noise_scale, size=xs[0].shape) for _ in\n range(self.n_candidates)]\n\n return xs_new\n","repo_name":"m-fili/CartPole_HillClimbing","sub_path":"gradient_free/hill_climbing.py","file_name":"hill_climbing.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"29466654927","text":"import argparse\nimport logging\nimport re\nimport os\nimport shutil\nfrom logging import getLogger\n\nimport MeCab\nimport wikipedia\nfrom Levenshtein import distance as D\nfrom pykakasi import kakasi\n\n\nlogging.basicConfig(level=logging.INFO)\nlog = getLogger(__name__)\n\n\nclass Gorgeous:\n \"\"\"\n 君のハートに、レボ☆リューション\n\n gorgeous = Gorgeous()\n gorgeous.revolution(\"まだ助かる\")\n\n >>> マダガスカル\n \"\"\"\n def __init__(self, **kwargs) -> None:\n k = kakasi()\n k.setMode('K', 'a')\n self.conv = k.getConverter()\n self.tagger = MeCab.Tagger()\n self.nations = self.read_nations(**kwargs)\n self.nations_roman = [\n self.romanize(nation) for nation in self.nations]\n self.nations_roman_vowel = [self.extract_vowel(\n self.romanize(nation)) for nation in self.nations]\n self.recent_answer = \"\"\n return\n\n def read_nations(self, fname=\"data/nations.csv\", **kwargs) -> list:\n \"\"\"\n Read csv file \n published on 『国コード一覧CSV ISO 3166-1』\n https://qiita.com/tao_s/items/32b90a2751bfbdd585ea\n \"\"\"\n assert os.path.exists(fname), f\"{fname} is not found\"\n with open(fname, \"r\") as f:\n nations = f.read().split(\"\\n\")\n\n nations = [re.split(\"[,|]\", nation)[0].replace(\"\\\"\", \"\") for nation in nations]\n nations.pop(0)\n return nations\n\n def read_csv_data(self, filepath, **kwargs) -> list:\n with open(filepath, \"r\") as f:\n data = f.read().split(\"\\n\")\n data = [re.split(\"[,|]\", area)[0].replace(\"\\\"\", \"\") for area in data]\n data.pop(0)\n return data\n\n def clean_str(self, s: str) -> str:\n return re.sub(r'[*\\s\\t\\n.,]', \"\", s)\n\n def katakanize(self, s: str, morph=False, **kwargs) -> str:\n \"\"\"\n convert \"kanji\" to \"katakana\"\n \"\"\"\n morphed = [re.split(r\"[,\\t\\s\\n]\", w) for w in self.tagger.parse(s).split(\"\\n\")]\n morphed.remove([\"\"])\n morphed.remove([\"EOS\"])\n \n k = [morph[-1] if morph[-1] != \"*\" else morph[0] for morph in morphed]\n\n if morph: # morphlogical analysed output\n return k\n\n return \"\".join(k)\n\n def romanize(self, s, **kwargs) -> list:\n \"\"\"\n convert \"katakana\" to \"romaji\" via kakasi\n (kanji - kana simple inverter)\n \"\"\"\n s = self.katakanize(s, **kwargs)\n if type(s) == str:\n s = [s]\n return [self.conv.do(w) for w in s]\n\n def extract_vowel(self, word: str, **kwargs) -> str:\n \"\"\"\n extract vowels from romanized words\n \"\"\"\n if type(word) == list:\n return [self.extract_vowel(w) for w in word]\n\n return \"\".join([l for l in word if l in [\"a\", \"i\", \"u\", \"e\", \"o\", \"n\"]])\n\n def revolution(self, sentence: str, app_use=False ,**kwargs):\n \"\"\"\n Revolution: Get Similar Nation Name from Word\n\n gorgeous.revolution(\"まだ助かる\")\n >>> マダガスカル\n\n args\n ----\n n_result : default=5 : lines of result print\n vowel : default=False : if true, word-distance will be calculated based on vowels\n app_use : default=False, if true, returns value of dict with some info\n \"\"\"\n\n # default kargs\n n_result = kwargs.get('n_result', 3)\n vowel = kwargs.get('vowel', False)\n\n answer = dict()\n\n log.info(f\"INPUT: {sentence}\")\n answer[\"input\"] = sentence\n # sentence -> [words] -> [katakana] -> [roman]\n word_roman = self.romanize(sentence, **kwargs)\n log.info(f\"ROMAN: {word_roman}\")\n answer[\"roman\"] = word_roman\n\n if vowel:\n word_vowel = self.extract_vowel(word_roman)\n log.info(f\"VOWEL: {word_vowel}\")\n answer[\"vowel\"] = word_vowel\n dists = [D(word_vowel[-1], nation[0]) for nation in self.nations_roman_vowel]\n else:\n dists = [D(word_roman[-1], nation[0]) for nation in self.nations_roman]\n answer[\"vowel\"] = \"\"\n idx = sorted(range(len(dists)), key=lambda k: dists[k])\n\n # logging\n log.info(\"RESULT:\")\n answer[\"results\"] = []\n for i in range(n_result):\n rank = idx[i]\n nation = self.nations[rank]\n dist = dists[rank]\n roman = self.nations_roman_vowel[rank] if vowel else self.nations_roman[rank]\n # calc score\n roman = roman[0] if type(roman) == list else roman # list -> str\n word_roman = word_roman[0] if type(word_roman) == list else word_roman # list -> str\n score = (len(word_roman) - int(dist)) / len(roman) if len(roman) != 0 else 0\n score = round(100 * score, 2)\n # build message for log and line bot\n msg = f\"No.{i+1} : {nation} ({roman}) : ({dist} : {score}%)\"\n log.info(\"\\t\" + msg)\n answer[\"results\"].append([nation, roman, dist, score])\n\n self.recent_answer = self.nations[idx[0]]\n answer[\"result\"] = self.nations[idx[0]]\n\n # Get meta info\n map_url = self.googlemap()\n log.info(f\"ここ!({map_url})\")\n answer[\"map\"] = map_url\n print(\"-\" * shutil.get_terminal_size()[0]) # draw line\n \n wiki = self.wikipedia()\n log.info(f\"{wiki[1]}!!\\n\")\n _, answer[\"wiki_summary\"], answer[\"wiki_url\"] = wiki\n print(u\"☆\" * shutil.get_terminal_size()[0]) # draw line\n\n # Answer\n if app_use: # returns dict value\n return answer\n return self.recent_answer\n\n def googlemap(self, place=None) -> str:\n \"\"\"generate Google Map Link\"\"\"\n if place is None:\n place = self.recent_answer\n return f\"https://www.google.com/maps/search/{place}/\"\n\n def wikipedia(self, place=None) -> tuple:\n \"\"\"Generate Wikipedia Link\"\"\"\n if place is None:\n place = self.recent_answer\n wikipedia.set_lang(\"ja\")\n p = wikipedia.page(wikipedia.search(place)[0])\n return (p.title, p.summary, p.url)\n\n def showtime(self, **kwargs) -> None:\n print(\"【ゴー☆ジャスのショータイム!】\")\n print(f\"\\n- 【お題】を入力してくれよな!\\n- ランキングを{kwargs.get('n_result', 3)}件表示するぞ!\\n- 地球義ではなく、GoogleMapとWikipediaの情報を出力するぞ!\")\n print(u\"☆\" * shutil.get_terminal_size()[0]) # draw line\n while True:\n place = input(\"\\n【お題】を入力: \")\n if place in [\"終了\", \"end\", \"終わり\"]:\n break\n self.revolution(place, **kwargs)\n print(\"また遊んでくれよな!\")\n return\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description='キミも、ゴー☆ジャスになろう!')\n \n parser.add_argument('-N', '--n_line', help=\"結果表示数\", default=3, type=int)\n parser.add_argument('-F', '--file', help=\"nations.csv ファイルパス\",\n default='nations.csv')\n parser.add_argument('-V', '--vowel', help=\"母音モード\", action='store_true')\n\n args = parser.parse_args()\n gorgeous = Gorgeous(fname=args.file)\n gorgeous.showtime(vowel=args.vowel, n_result=args.n_line)\n","repo_name":"atsukoba/GorgeousApp","sub_path":"app/gorgeous.py","file_name":"gorgeous.py","file_ext":"py","file_size_in_byte":7351,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"82"}
+{"seq_id":"40065351764","text":"import math\nl = input()\nwhile l:\n try:\n r,x,y = map(float, l.split())\n area = math.pi*r**2\n d = math.sqrt(x**2 + y**2)\n if d > r:\n print(\"miss\")\n else:\n #are of sector - area of triangle\n #area of circle - area of segment\n tmp = r-d\n o = 2*math.acos(d/r)\n seg = 0.5*(o - math.sin(o))*(r**2)\n print(area - seg,seg)\n\n l = input() \n\n except EOFError:\n break\n","repo_name":"nigelandrewquinn/Kattis","sub_path":"halfacookie.py","file_name":"halfacookie.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"24916151396","text":"#Demonstrates a simple GUI\n\nfrom tkinter import *\n\n#Base window\nroot = Tk()\n\n#Editing the window\nroot.title(\"Простейший GUI\")\nroot.geometry(\"200x100\")\n\n#Frame\napp = Frame(root)\napp.grid()\n\n#A Label inside the Frame\nlbl = Label(app, text=\"Это я!\")\nlbl.grid()\n\n#Button1 inside the Frame\nbttn1 = Button(app, text=\"Я ничего не делаю!\")\nbttn1.grid()\n\n#Button2 inside the Frame\nbttn2 = Button(app)\nbttn2.grid()\nbttn2.configure(text=\"Я тоже!\")\n\n#Button3 inside the Frame\nbttn3 = Button(app)\nbttn3.grid()\nbttn3[\"text\"] = \"И я тоже!\"\n\n\nroot.mainloop()","repo_name":"Xomer89/LearningPy","sub_path":"untitled/GUI/simpleGUI.py","file_name":"simpleGUI.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"37443747793","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom app.libs.utils import safe_int\nfrom app.exception.http_error import MultiValidationException\n\nINPUT = {\n \"X\": [10, 20, 30, 40, 50, 60, 70],\n \"Y\": [12, 21, 46, 65, 90, 111, 148]\n}\n\nclass PolinomialNewton(object):\n xinput: int\n\n def __init__(self, data: dict) -> object:\n self.xinput = safe_int(data.get(\"xinput\", 0))\n \n def prevalidate(self) -> MultiValidationException:\n error = MultiValidationException()\n\n if self.xinput == 0 or \\\n (self.xinput < 10 or self.xinput > 70):\n error.push_error(\"input\", \"Invalid input. Range input: 10-70\")\n\n return error\n\n def to_dict(self) -> dict:\n return self.__polinom_newton()\n\n def __polinom_newton(self) -> dict:\n x = INPUT[\"X\"]\n y = INPUT[\"Y\"]\n n = len(x)-1\n ST = np.zeros((n+1, n+1))\n ST[:, 0] = y\n\n for k in range(1, n+1):\n for i in range(0, n-k+1):\n ST[i, k] = round((ST[i+1, k-1] - ST[i, k-1])/(x[i+k]-x[i]), 5)\n\n p = ST[0,0]\n for i in range(1, n+1):\n a = ST[0, i]\n\n for k in range(0, i):\n a = a * (self.xinput-x[k])\n\n p = p + a\n \n return {\n \"calculate\": ST.tolist(),\n \"result\": p,\n }\n","repo_name":"agprsty-utdi/metode-numerik-app","sub_path":"domain/polinomial_newton.py","file_name":"polinomial_newton.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"16813782546","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import, division\n\nimport os\nimport sys\nimport time\nfrom pprint import pprint\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\nfrom opt import Options\n\nfrom model import LinearModel, weight_init\nfrom train import DatasetTrain, DatasetTest\nimport util\nimport log\n\n\n\ndef main(opt):\n start_epoch = 0\n err_best = 1000\n glob_step = 0\n lr_now = opt.lr\n\n # save options\n\n # create model\n print(\">>> creating model\")\n model = LinearModel()\n model = model.cuda()\n model.apply(weight_init)\n print(\">>> total params: {:.2f}M\".format(sum(p.numel() for p in model.parameters()) / 1000000.0))\n criterion = nn.MSELoss(size_average=True).cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)\n\n # load ckpt\n if opt.load:\n print(\">>> loading ckpt from '{}'\".format(opt.load))\n # import pdb; pdb.set_trace()\n ckpt = torch.load(opt.load)\n start_epoch = ckpt['epoch']\n err_best = ckpt['err']\n glob_step = ckpt['step']\n lr_now = ckpt['lr']\n model.load_state_dict(ckpt['state_dict'])\n optimizer.load_state_dict(ckpt['optimizer'])\n print(\">>> ckpt loaded (epoch: {} | err: {})\".format(start_epoch, err_best))\n \n\n # list of action(s)\n \n\n # data loading\n # test\n if opt.test:\n \n test_loader = DataLoader(DatasetTest('test.npy','label.npy'), batch_size =128,drop_last=False)\n \n hh=test(test_loader, model, criterion)\n \n print (\">>>>>> TEST results:\")\n \n sys.exit()\n\n # load dadasets for training\n test_loader = DataLoader(DatasetTest('train.npy','label.npy'), batch_size=128,drop_last=False)\n train_loader = DataLoader(DatasetTrain('train.npy','label.npy'), batch_size=128,drop_last=False, shuffle=True)\n print(\">>> data loaded !\")\n\n cudnn.benchmark = True\n for epoch in range(start_epoch, opt.epochs):\n print('==========================')\n print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))\n\n # per epoch\n glob_step, lr_now, loss_train = train(\n train_loader, model, criterion, optimizer,\n lr_init=opt.lr, lr_now=lr_now, glob_step=glob_step, lr_decay=opt.lr_decay, gamma=opt.lr_gamma,\n max_norm=opt.max_norm)\n loss_test = test(test_loader, model, criterion)\n\n # update log file\n \n\n # save ckpt\n is_best = loss_test < err_best\n err_best = min(loss_test, err_best)\n if is_best:\n \tlog.save_ckpt({'epoch': epoch + 1,\n 'lr': lr_now,\n 'step': glob_step,\n 'err': err_best,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()},\n ckpt_path=opt.ckpt,\n is_best=True)\n \n\n \n\n\ndef train(train_loader, model, criterion, optimizer,\n lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None,\n max_norm=True):\n losses = util.AverageMeter()\n\n model.train()\n\n start = time.time()\n batch_time = 0\n \n for i, (inps, tars) in enumerate(train_loader):\n glob_step += 1\n if glob_step % lr_decay == 0 or glob_step == 1:\n lr_now = util.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma)\n inputs = Variable(inps.cuda())\n targets = Variable(tars.cuda())\n\n outputs = model(inputs)\n\n # calculate loss\n optimizer.zero_grad()\n loss = criterion(outputs, targets)\n losses.update(loss.item(), inputs.size(0))\n loss.backward()\n if max_norm:\n nn.utils.clip_grad_norm(model.parameters(), max_norm=1)\n optimizer.step()\n print (\">>> train error: {} <<<\".format(losses.avg))\n\n # update summary\n\n \n return glob_step, lr_now, losses.avg\n\n\ndef test(test_loader, model, criterion):\n losses = util.AverageMeter()\n\n model.eval()\n\n all_dist = []\n start = time.time()\n batch_time = 0\n results = list()\n for i, (inps, tars) in enumerate(test_loader):\n inputs = Variable(inps.cuda())\n targets = Variable(tars.cuda())\n\n \n outputs = model(inputs)\n results.append(outputs)\n\n # calculate loss\n outputs_coord = outputs\n loss = criterion(outputs_coord, targets)\n\n losses.update(loss.item(), inputs.size(0))\n\n final=torch.cat(results,dim=0)\n final=1000000*final.detach().cpu().numpy()[:,0]\n results_txt = open(\"results.txt\", \"w+\")\n for i in range(final.shape[0]):\n \tresults_txt.write(str(final[i]) + '\\n')\n # update summary\n \n print (\">>> error: {} <<<\".format(losses.avg))\n return losses.avg\n\n\nif __name__ == \"__main__\":\n option = Options().parse()\n main(option)\n\n","repo_name":"WendyBaiYunwei/CS5228-project","sub_path":"task1/nn_approach/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"2278701279","text":"import matplotlib\nmatplotlib.use('agg')\nimport numpy as np\nimport pickle\nimport numpy\nfrom astropy.io import fits\nfrom galpy.util import bovy_conversion, bovy_coords, save_pickles, bovy_plot\nfrom galpy.potential import MWPotential2014, turn_physical_off, vcirc\nimport astropy.units as u\nfrom galpy.orbit import Orbit\nimport random\nimport pal5_util_MWfit\nimport MWPotential2014Likelihood\nimport os, os.path\nimport re\nimport glob\nimport pickle\nimport csv\nfrom optparse import OptionParser\n_REFR0, _REFV0= MWPotential2014Likelihood._REFR0, MWPotential2014Likelihood._REFV0\nro, vo= _REFR0, _REFV0\n\n\ndef get_options():\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage=usage)\n \n parser.add_option(\"--ind\",dest='ind',default=None,\n type='int',\n help=\"index of potential\")\n return parser\n\n\ndef determine_nburn(filename='../pal5_mcmc/mwpot14-fitsigma-0.dat',\n threshold=0.1,skip=50,\n return_nsamples=False):\n \"\"\"Function to detemrine an appropriate nburn for a given chain\"\"\"\n # Load the data\n data= numpy.loadtxt(filename,comments='#',delimiter=',')\n lndata= numpy.reshape(data[:,-1],(len(data[:,5])//nwalkers,nwalkers))\n # Perform a running diff wrt skip less\n diff= (lndata-numpy.roll(lndata,skip,axis=0))\n diff[:skip]= -100. # Make sure it's not within the first hundred\n maxln= numpy.nanmax(lndata)\n try:\n indx= (numpy.fabs(numpy.median(diff,axis=1)) < threshold)\\\n *((maxln-numpy.nanmax(lndata,axis=1)) < 1.25)\n if maxln > -22.5:\n indx*= numpy.std(lndata,axis=1) < 3.\n if return_nsamples:\n return len(data)-numpy.arange(len(lndata))[indx][0]*nwalkers\n else:\n return numpy.arange(len(lndata))[indx][0]*nwalkers\n except IndexError:\n if return_nsamples: return 100.\n else: return numpy.prod(lndata.shape)-100\n\n\nnwalkers= 12\n\n#from each MCMC chain file, pick nsamples\nnsamples= 2000\n\npot_ind=np.arange(0,32,1)\npot_ind=np.delete(pot_ind,14)\n\nt_age= np.linspace(0.,5.,1001)/bovy_conversion.time_in_Gyr(vo,ro)\n\nperi_all=[]\n\nparser= get_options()\noptions,args= parser.parse_args()\n\npindx=pot_ind[options.ind]\n\ncsvfo= open('pal5_mcmc_selected_chains_pot{}.dat'.format(pindx),'w')\nfowriter= csv.writer(csvfo,delimiter=',')\n\n# Load this potential\nfn= 'mwpot14-fitsigma-%i.dat' % pindx\nwith open(fn,'rb') as savefile:\n line1= savefile.readline()\npotparams= [float(s) for s in (line1.split(':'.encode())[1].split(','.encode()))]\n\ntnburn= determine_nburn(fn)\ntdata= numpy.loadtxt(fn,comments='#',delimiter=',')\ntdata= tdata[tnburn::]\n\nrand_indx=random.sample(range(len(tdata)),nsamples)\n\nperi=[]\n\nfor jj in rand_indx:\n \n tvo= tdata[jj][1]*_REFV0\n pot= MWPotential2014Likelihood.setup_potential(potparams,tdata[jj][0],False,False,\n pal5_util_MWfit._REFR0,tvo)\n\n # Now compute the stream model for this setup\n dist= tdata[jj][2]*22.\n pmra= -2.296+tdata[jj][3]+tdata[jj][4]\n pmdecpar= 2.257/2.296\n pmdecperp= -2.296/2.257\n pmdec= -2.257+tdata[jj][3]*pmdecpar+tdata[jj][4]*pmdecperp\n vlos= -58.7\n sigv= 0.4*numpy.exp(tdata[jj][5])\n \n \n prog= Orbit([229.018,-0.124,dist,pmra,pmdec,vlos],\n radec=True,ro=ro,vo=tvo,\n solarmotion=[-11.1,24.,7.25]).flip()\n \n prog.integrate(t_age,pot)\n peri=prog.rperi()\n \n out=[peri,tdata[jj][3],tdata[jj][0],tdata[jj][1],sigv]\n out.extend([229.018,-0.124,dist,pmra,pmdec,vlos])\n \n fowriter.writerow(out)\n \ncsvfo.flush()\ncsvfo.close()\n","repo_name":"nbanik/Baryonic-effects-on-Pal5","sub_path":"select_mcmc_chains.py","file_name":"select_mcmc_chains.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"}
+{"seq_id":"2763084199","text":"from fastapi import FastAPI as FA\nfrom fastapi.logger import logger\nfrom aioredis import create_redis_pool, Redis\n\nfrom .auth.routes import auth_router, user_router\nfrom .budget.routes import budget_router, transactions_router\n\n\nclass FastAPI(FA):\n def __init__(self) -> None:\n super().__init__()\n self.redis: Redis\n\n\napp = FastAPI()\n\n\n@app.on_event('startup')\nasync def on_start():\n logger.info('App init')\n logger.info('Connecting to redis database ... ')\n redis = await create_redis_pool('redis://redis', db=3)\n app.redis = redis\n\n\n@app.on_event('shutdown')\nasync def on_shutdown():\n app.redis.close()\n await app.redis.wait_closed()\n\n\napp.include_router(auth_router)\napp.include_router(user_router)\napp.include_router(budget_router)\napp.include_router(transactions_router)\n","repo_name":"bensondomingo/budget-backend","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"36396016914","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.rst')).read()\n\nrequires = [\n 'pyramid>=1.4',\n 'PyBrowserID',\n 'requests>=1.0',\n 'MarkupSafe',\n ]\n\nsetup(name='pyramid_persona',\n version='1.6.1',\n description='pyramid_persona',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n author='Georges Dubus',\n author_email='georges.dubus@gmail.com',\n url='https://github.com/madjar/pyramid_persona',\n keywords='web pyramid pylons authentication persona',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n tests_require=requires,\n test_suite=\"pyramid_persona\",\n )\n","repo_name":"madjar/pyramid_persona","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"82"}
+{"seq_id":"38799631444","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom blog import views\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('register/',views.register,name = 'register'),\n path('login/', views.login, name='login'),\n path('blog/',include('blog.urls')),\n path('home/', views.home_unlog,name=\"home_unlog\"),\n path('home/unlog',views.log_out,name=\"log_out\"),\n path('summernote/', include('django_summernote.urls')),\n path('jet/', include('jet.urls', 'jet')),\n path('jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')),\n]","repo_name":"githubfqy/EasyDown","sub_path":"EasyDown/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"}
+{"seq_id":"29673402947","text":"import time\nimport sys\n\ndef timecount(fu):\n def counttime():\n fu()\n t=time.process_time()\n print(t)\n return counttime\n\n@timecount\n\ndef test():\n print('hello')\n time.sleep(1)\n \ntest()\n\ndef fibnq(n):\n a,b,count=0,1,0\n while count HTD Player (Output Interface) Help:
Hello World!
\"\n\n\n@app.route(\"/get-red\")\n@cross_origin()\ndef get_red_counter():\n '''\n Accesses the red counter from the database.\n @return - the red counter document\n '''\n query = {'name': 'red'} # Query for PyMongo\n counter = db.counters.find_one(query, {\"_id\": False}) # Search with the query, removing the _id field\n return counter # Return the database entry\n\n@app.route(\"/get-blue\")\n@cross_origin()\ndef get_blue_counter():\n '''\n Accesses the blue counter from the database.\n @return - the blue counter document\n '''\n query = {'name': 'blue'} # Query for PyMongo\n counter = db.counters.find_one(query, {\"_id\": False}) # Search with the query, removing the _id field\n return counter # Return the database entry\n\n@app.put(\"/set-red\")\n@cross_origin()\ndef set_red_counter():\n '''\n Updates the red counter value in the database.\n Request should contain a body with a 'redCount' field that has\n the value to set the red counter to.\n '''\n\n # Get the PUT request body and access the 'redCount' field\n body = request.json\n redCount = body['redCount']\n\n # Update 1 document matching the query by setting the value of count\n query = {'name': 'red'}\n db.counters.update_one(query, {'$set': {'count': redCount}})\n\n # Return nothing\n return \"\"\n\n@app.put(\"/set-blue\")\n@cross_origin()\ndef set_blue_counter():\n '''\n Updates the blue counter value in the database.\n Request should contain a body with a 'blueCount' field that has\n the value to set the blue counter to.\n '''\n\n # Get the PUT request body and access the 'blueCount' field\n body = request.json\n blueCount = body['blueCount']\n\n # Update 1 document matching the query by setting the value of count\n query = {'name': 'blue'}\n db.counters.update_one(query, {'$set': {'count': blueCount}})\n\n # Return nothing\n return \"\"\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"AlexWills37/Fullstack-Tutorial-Code","sub_path":"backend-flask/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"74252363469","text":"\"\"\"148. Sort List\n\nSort a linked list in O(n log n) time using constant space complexity.\n\nExample 1:\n\nInput: 4->2->1->3\nOutput: 1->2->3->4\n\nExample 2:\n\nInput: -1->5->3->4->0\nOutput: -1->0->3->4->5\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def sortList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n # merge-sort\n # See LC 21 merge two lists\n \n # Nothing needs to be done for linked list with <= 1 nodes\n if not head or not head.next:\n return head\n \n # Use turtle-rabbit two pointers to find the mid-point\n turtle = head\n rabbit = head.next\n \n while rabbit.next:\n rabbit = rabbit.next\n turtle = turtle.next\n if rabbit.next:\n rabbit = rabbit.next\n \n # []------> ... ------> []--------->[]----------> ... -------->[]\n # head turtle turtle.next \n # <=========lower========> <===========upper===========> \n # ^\n # mid-point\n \n # step 1:\n # recurisvely sort `upper`:\n #\n # upper: []----------> ... -------->[]---->None\n \n # step 2:\n # disconnect `lower` from `upper`\n #\n # []------> ... ------> []--->None []----------> ... -------->[]\n # head turtle turtle.next \n # <=========lower========> <===========upper===========> \n \n # step 3:\n # recursively sort `lower`:\n #\n # lower: []------> ... ------> []--->None \n \n # step 4:\n # merged the two sorted list `lower` and `upper`\n \n upper = self.sortList(turtle.next) \n turtle.next = None\n lower = self.sortList(head)\n \n merged = self.mergeTwoLists(lower, upper)\n return merged\n \n def mergeTwoLists(self, l1, l2):\n if not l1:\n return l2\n elif not l2:\n return l1\n \n # `dummy.next` points to the start of a merged list (initially empty)\n #\n # `node` points the end of merged list (initially empty)\n \n \n # [] =======> [] =====> ... ====> [] ======> None\n # dummy node\n \n # [] ===> .... ===> [] ===> ... ===>[] ===> None\n # l1\n #\n # [] ===> .... ===> [] ===> ... ===>[] ===> None\n # l2 \n node = dummy = ListNode(0)\n \n while l1 and l2:\n # `l1` and `l2` points to the head of the two linked lists\n # We pick whichever is smaller, and step the corresponding pointer, `l1` or `l2`\n \n if l1.val < l2.val:\n node.next = l1\n l1 = l1.next\n else:\n node.next = l2\n l2 = l2.next\n \n # We always step `node` \n node = node.next\n \n if l1:\n node.next = l1\n if l2:\n node.next = l2\n \n return dummy.next \n","repo_name":"chao-ji/LeetCode","sub_path":"python/linked_list/lc148.py","file_name":"lc148.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"82"} +{"seq_id":"42470768684","text":"from statsmodels.tsa.seasonal import seasonal_decompose\n\n\ndef seasonal_decomp(df,column,model=\"additive\"):\n sea_decomp_df = df.copy(deep=True)\n sea_decomp_df.index=sea_decomp_df.index.to_timestamp()\n\n s_dec_add = seasonal_decompose(sea_decomp_df[f'{column}'],model=model, period=1).plot()\n s_dec_add.set_size_inches(20, 8)\n return s_dec_add","repo_name":"obaidagh/crypto-analysis-forcasting","sub_path":"analysis/seasonal_decomp.py","file_name":"seasonal_decomp.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"82"} +{"seq_id":"17153360160","text":"import traceback\nfrom typing import Any, NamedTuple, Optional\n\nimport graphviz\nimport torch\nimport torch.fx\n\n\nclass TensorMetadata(NamedTuple):\n # TensorMetadata is a structure containing pertinent information\n # about a tensor within a PyTorch program.\n\n # General Tensor metadata\n shape: torch.Size\n dtype: torch.dtype\n requires_grad: bool\n memory_format: Optional[torch.memory_format]\n\n def __repr__(self):\n return \"×\".join(map(str, self.shape))\n\n\ndef _extract_tensor_metadata(result: torch.Tensor) -> TensorMetadata:\n \"\"\"\n Extract a TensorMetadata NamedTuple describing `result`.\n \"\"\"\n shape = result.shape\n dtype = result.dtype\n requires_grad = result.requires_grad\n\n memory_formats = {\n torch.contiguous_format,\n torch.channels_last,\n torch.channels_last_3d,\n }\n\n memory_format = None\n\n for query_format in memory_formats:\n if result.is_contiguous(memory_format=query_format):\n memory_format = query_format\n break\n\n return TensorMetadata(shape, dtype, requires_grad, memory_format)\n\n\nclass ResultProbe(torch.fx.Interpreter):\n def run_node(self, n: torch.fx.Node) -> Any:\n try:\n result = super().run_node(n)\n except Exception:\n traceback.print_exc()\n raise RuntimeError(\n f\"ShapeProp error for: \"\n f\"node={n.format_node()} with \"\n f\"meta={n.meta}\"\n )\n find_tensor_in_result = False\n\n def extract_tensor_meta(obj):\n if isinstance(obj, torch.Tensor):\n nonlocal find_tensor_in_result\n find_tensor_in_result = True\n return _extract_tensor_metadata(obj)\n else:\n return obj\n\n n.meta[\"result\"] = torch.fx.node.map_aggregate(result, extract_tensor_meta)\n n.meta[\"find_tensor_in_result\"] = find_tensor_in_result\n return result\n\n\ndef html_table(*content, **kwargs):\n kwargs_pairs = [f'{k}=\"{v}\"' for k, v in kwargs.items()]\n return f'