repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
Nstats/cs_capsule | [
"e45a8518a41117d4b5f105bcc2c96a3d621e40ea"
] | [
"examples/evaluate.py"
] | [
"#*#*#*./examples/evaluate.py\n\"\"\"Official evaluation script for SQuAD version 2.0.\n\nIn addition to basic functionality, we also compute additional statistics and\nplot precision-recall curves if an additional na_prob.json file is provided.\nThis file is expected to map question ID's to the model's predicted probability\nthat a question is unanswerable.\n\"\"\"\nimport argparse\nimport collections\nimport json\nimport numpy as np\nimport os\nimport re\nimport string\nimport sys\n\nOPTS = None\ndef parse_args():\n parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')\n parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')\n parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')\n parser.add_argument('--out-file', '-o', metavar='eval.json',\n help='Write accuracy metrics to file (default is stdout).')\n parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',\n help='Model estimates of probability of no answer.')\n parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,\n help='Predict \"\" if no-answer probability exceeds this (default = 1.0).')\n parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,\n help='Save precision-recall curves to directory.')\n parser.add_argument('--verbose', '-v', action='store_true')\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\ndef make_qid_to_has_ans(dataset):\n qid_to_has_ans = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid_to_has_ans[qa['id']] = bool(qa['answers'])\n return qid_to_has_ans\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\ndef get_tokens(s):\n if not s: return []\n return normalize_answer(s).split()\n\ndef compute_exact(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\ndef compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold) #答案list\n pred_toks = get_tokens(a_pred)\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)###c.Counter([1,2,33])& c.Counter([2,3,4]) res: Counter({2: 1})\n num_same = sum(common.values()) ###所有出现的相同词的总个数\n if len(gold_toks) == 0 or len(pred_toks) == 0: ##无答案问题直接对比\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks) #准确率\n recall = 1.0 * num_same / len(gold_toks) #召回率\n f1 = (2 * precision * recall) / (precision + recall) #f1\n return f1\n\ndef get_raw_scores(dataset, preds):\n exact_scores = {}\n f1_scores = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid = qa['id']\n gold_answers = [a['text'] for a in qa['answers']\n if normalize_answer(a['text'])]\n if not gold_answers:\n # For unanswerable questions, only correct answer is empty string\n gold_answers = ['']\n if qid not in preds:\n print('Missing prediction for %s' % qid)\n continue\n a_pred = preds[qid]\n # Take max over all gold answers\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\n f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)\n return exact_scores, f1_scores\n\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\n new_scores = {}\n for qid, s in scores.items():\n pred_na = na_probs[qid] > na_prob_thresh ##有答案变成了无答案的情况\n if pred_na:\n new_scores[qid] = float(not qid_to_has_ans[qid])\n else:\n new_scores[qid] = s\n return new_scores\n\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\n if not qid_list:\n total = len(exact_scores)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores.values()) / total),\n ('f1', 100.0 * sum(f1_scores.values()) / total),\n ('total', total),\n ])\n else:\n total = len(qid_list)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\n ('total', total),\n ])\n\ndef merge_eval(main_eval, new_eval, prefix):\n for k in new_eval:\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\n\ndef plot_pr_curve(precisions, recalls, out_image, title):\n plt.step(recalls, precisions, color='b', alpha=0.2, where='post')\n plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.xlim([0.0, 1.05])\n plt.ylim([0.0, 1.05])\n plt.title(title)\n plt.savefig(out_image)\n plt.clf()\n\ndef make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,\n out_image=None, title=None):\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n true_pos = 0.0\n cur_p = 1.0\n cur_r = 0.0\n precisions = [1.0]\n recalls = [0.0]\n avg_prec = 0.0\n for i, qid in enumerate(qid_list):\n if qid_to_has_ans[qid]:\n true_pos += scores[qid]\n cur_p = true_pos / float(i+1)\n cur_r = true_pos / float(num_true_pos)\n if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:\n # i.e., if we can put a threshold after this point\n avg_prec += cur_p * (cur_r - recalls[-1])\n precisions.append(cur_p)\n recalls.append(cur_r)\n if out_image:\n plot_pr_curve(precisions, recalls, out_image, title)\n return {'ap': 100.0 * avg_prec}\n\ndef run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,\n qid_to_has_ans, out_image_dir):\n if out_image_dir and not os.path.exists(out_image_dir):\n os.makedirs(out_image_dir)\n num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)\n if num_true_pos == 0:\n return\n pr_exact = make_precision_recall_eval(\n exact_raw, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_exact.png'),\n title='Precision-Recall curve for Exact Match score')\n pr_f1 = make_precision_recall_eval(\n f1_raw, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_f1.png'),\n title='Precision-Recall curve for F1 score')\n oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}\n pr_oracle = make_precision_recall_eval(\n oracle_scores, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_oracle.png'),\n title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')\n merge_eval(main_eval, pr_exact, 'pr_exact')\n merge_eval(main_eval, pr_f1, 'pr_f1')\n merge_eval(main_eval, pr_oracle, 'pr_oracle')\n\ndef histogram_na_prob(na_probs, qid_list, image_dir, name):\n if not qid_list:\n return\n x = [na_probs[k] for k in qid_list]\n weights = np.ones_like(x) / float(len(x))\n plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))\n plt.xlabel('Model probability of no-answer')\n plt.ylabel('Proportion of dataset')\n plt.title('Histogram of no-answer probability: %s' % name)\n plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))\n plt.clf()\n\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\n cur_score = num_no_ans\n best_score = cur_score\n best_thresh = 0.0\n qid_list = sorted(na_probs, key=lambda k: na_probs[k]) #从小到大按照diff排序\n for i, qid in enumerate(qid_list):\n if qid not in scores: continue\n if qid_to_has_ans[qid]:\n diff = scores[qid]\n else:\n if preds[qid]:\n diff = -1\n else:\n diff = 0\n cur_score += diff\n if cur_score > best_score:\n best_score = cur_score\n best_thresh = na_probs[qid]\n return 100.0 * best_score / len(scores), best_thresh\n\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)\n main_eval['best_exact'] = best_exact\n main_eval['best_exact_thresh'] = exact_thresh\n main_eval['best_f1'] = best_f1\n main_eval['best_f1_thresh'] = f1_thresh\n\ndef main():\n with open(OPTS.data_file) as f:\n dataset_json = json.load(f)\n dataset = dataset_json['data']\n with open(OPTS.pred_file) as f:\n preds = json.load(f)\n if OPTS.na_prob_file:\n with open(OPTS.na_prob_file) as f:\n na_probs = json.load(f)\n else:\n na_probs = {k: 0.0 for k in preds}\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = get_raw_scores(dataset, preds) #得到每个答案的extract和f1 list\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n OPTS.na_prob_thresh)\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n OPTS.na_prob_thresh)\n\n\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n if has_ans_qids:\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\n merge_eval(out_eval, has_ans_eval, 'HasAns')\n if no_ans_qids:\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\n merge_eval(out_eval, no_ans_eval, 'NoAns')\n if OPTS.na_prob_file:\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)\n if OPTS.na_prob_file and OPTS.out_image_dir:\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,\n qid_to_has_ans, OPTS.out_image_dir)\n histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')\n histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')\n if OPTS.out_file:\n with open(OPTS.out_file, 'w') as f:\n json.dump(out_eval, f)\n else:\n print(json.dumps(out_eval, indent=2))\n\n\ndef judgeOnline(data_file,pred_file,na_prob_file,output_dir,epoch,train_steps):\n\n if not os.path.exists(os.path.join(output_dir,\"eval_res\")):\n os.makedirs(os.path.join(output_dir,\"eval_res\"))\n output = os.path.join(output_dir,\"eval_res\")\n\n out_file = os.path.join(output,\"eval.json\")\n out_image_dir = None\n na_prob_thresh = 1.0\n\n with open(data_file) as f:\n\n dataset_json = json.load(f)\n dataset = dataset_json['data']\n with open(pred_file) as f:\n preds = json.load(f)\n with open(na_prob_file) as f:\n na_probs = json.load(f)\n\n\n exact_raw, f1_raw = get_raw_scores(dataset, preds)\n\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False 区分dev中的有无答案\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] #有答案的问题\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] #无答案的问题\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)###这里没用因为默认的是1.0 详情可参考该函数\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)###这里没用因为默认的是1.0 详情可参考该函数\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n if has_ans_qids:\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\n merge_eval(out_eval, has_ans_eval, 'HasAns')\n if no_ans_qids:\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\n merge_eval(out_eval, no_ans_eval, 'NoAns')\n if na_prob_file: ##如果给出null_odds.json文件\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) ##获取最好的thresh\n if na_prob_file and out_image_dir:\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,\n qid_to_has_ans, out_image_dir)\n histogram_na_prob(na_probs, has_ans_qids, out_image_dir, 'hasAns')\n histogram_na_prob(na_probs, no_ans_qids, out_image_dir, 'noAns')\n if out_file:\n with open(out_file, 'a') as fout:\n fout.write(\"epoch:{} steps:{} evaluation res:{}\\n\".format(epoch,train_steps,json.dumps(out_eval, sort_keys=True, indent=2)))\n import logging\n logger = logging.getLogger(__name__)\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO )\n logger.info(\"write evaluation result to \" + out_file + \"OK!\")\n else:\n print(json.dumps(out_eval, indent=2))\n return out_eval\n\nif __name__ == '__main__':\n OPTS = parse_args()\n if OPTS.out_image_dir:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n print(vars(OPTS))\n main()"
] | [
[
"matplotlib.pyplot.savefig",
"numpy.ones_like",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"matplotlib.pyplot.step",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.hist",
"matplotlib.use",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xlabel"
]
] |
yeralin/qiskit-terra | [
"251930a7b5d83af121ea0f3aafb33a54a1860e14"
] | [
"qiskit/circuit/library/standard_gates/s.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The S and Sdg gate.\"\"\"\n\nimport numpy\nfrom qiskit.qasm import pi\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister\n\n\nclass SGate(Gate):\n r\"\"\"Single qubit S gate (Z**0.5).\n\n It induces a :math:`\\pi/2` phase, and is sometimes called the P gate (phase).\n\n This is a Clifford gate and a square-root of Pauli-Z.\n\n **Matrix Representation:**\n\n .. math::\n\n S = \\begin{pmatrix}\n 1 & 0 \\\\\n 0 & i\n \\end{pmatrix}\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───┐\n q_0: ┤ S ├\n └───┘\n\n Equivalent to a :math:`\\pi/2` radian rotation about the Z axis.\n \"\"\"\n\n def __init__(self, label=None):\n \"\"\"Create new S gate.\"\"\"\n super().__init__('s', 1, [], label=label)\n\n def _define(self):\n \"\"\"\n gate s a { u1(pi/2) a; }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import U1Gate\n q = QuantumRegister(1, 'q')\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U1Gate(pi / 2), [q[0]], [])\n ]\n qc._data = rules\n self.definition = qc\n\n def inverse(self):\n \"\"\"Return inverse of S (SdgGate).\"\"\"\n return SdgGate()\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the S gate.\"\"\"\n return numpy.array([[1, 0],\n [0, 1j]], dtype=complex)\n\n\nclass SdgGate(Gate):\n r\"\"\"Single qubit S-adjoint gate (~Z**0.5).\n\n It induces a :math:`-\\pi/2` phase.\n\n This is a Clifford gate and a square-root of Pauli-Z.\n\n **Matrix Representation:**\n\n .. math::\n\n Sdg = \\begin{pmatrix}\n 1 & 0 \\\\\n 0 & -i\n \\end{pmatrix}\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌─────┐\n q_0: ┤ Sdg ├\n └─────┘\n\n Equivalent to a :math:`\\pi/2` radian rotation about the Z axis.\n \"\"\"\n\n def __init__(self, label=None):\n \"\"\"Create new Sdg gate.\"\"\"\n super().__init__('sdg', 1, [], label=label)\n\n def _define(self):\n \"\"\"\n gate sdg a { u1(-pi/2) a; }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import U1Gate\n q = QuantumRegister(1, 'q')\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U1Gate(-pi / 2), [q[0]], [])\n ]\n qc._data = rules\n self.definition = qc\n\n def inverse(self):\n \"\"\"Return inverse of Sdg (SGate).\"\"\"\n return SGate()\n\n def to_matrix(self):\n \"\"\"Return a numpy.array for the Sdg gate.\"\"\"\n return numpy.array([[1, 0],\n [0, -1j]], dtype=complex)\n"
] | [
[
"numpy.array"
]
] |
krumo/SPIN | [
"0e2f17e70f06de46e062683ea6d5b233eeaa73c1"
] | [
"spin/models/smpl.py"
] | [
"import torch\nimport numpy as np\nimport smplx\nfrom smplx import SMPL as _SMPL\nfrom smplx.body_models import ModelOutput\nfrom smplx.lbs import vertices2joints\n\nimport spin.config as config\nimport spin.constants as constants\n\nclass SMPL(_SMPL):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SMPL, self).__init__(*args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))\n self.joint_map = torch.tensor(joints, dtype=torch.long)\n\n def forward(self, *args, **kwargs):\n kwargs['get_skin'] = True\n smpl_output = super(SMPL, self).forward(*args, **kwargs)\n extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)\n joints = torch.cat([smpl_output.joints, extra_joints], dim=1)\n joints = smpl_output.joints\n # print(smpl_output.joints.shape)\n # joints = joints[:, self.joint_map, :]\n output = ModelOutput(vertices=smpl_output.vertices,\n global_orient=smpl_output.global_orient,\n body_pose=smpl_output.body_pose,\n joints=joints,\n betas=smpl_output.betas,\n full_pose=smpl_output.full_pose)\n return output\n"
] | [
[
"numpy.load",
"torch.tensor",
"torch.cat"
]
] |
yketa/UBC---Spring-2018---code | [
"b065544639a483dda48cda89bcbb11c1772232aa"
] | [
"analysis/coarse_graining.py"
] | [
"\"\"\"\nModule coarse_graining implements a Gaussian coarse-graining adapted from\nIlling et al., Phys. Rev. Lett. 117, 208002 (2016) following Goldhirsch and\nGoldenberg, Eur. Phys. J. E 9, 245–251 (2002).\n\"\"\"\n\nimport numpy as np\n\nclass GaussianCG:\n \"\"\"\n Gaussian coarse-graining.\n \"\"\"\n\n def __init__(self, sigma, r_cut):\n \"\"\"\n Parameters\n ----------\n sigma : float\n Length scale of Gaussian function.\n r_cut : float\n Coarse-graining cut-off radius.\n \"\"\"\n\n self.sigma = sigma # length scale of Gaussian function\n self.r_cut = r_cut # coarse-graining cut-off radius\n\n def function(self, r):\n \"\"\"\n Parameters\n ----------\n r : float\n Radius.\n\n Returns\n -------\n phi : float\n Coarse-graining factor at radius r.\n \"\"\"\n\n if r > self.r_cut: return 0 # coarse-graining function is zero after cut-off\n\n Dg = 2*np.pi*(self.sigma**2)*(1 -\n np.exp(-0.5*((self.r_cut/self.sigma)**2))) # normalisation factor\n return np.exp(-0.5*((r/self.sigma)**2))/Dg # coarse-graining factor\n\n def factors(self, positions):\n \"\"\"\n Parameters\n ----------\n positions : float array\n Coordinates at which coarse-graining is desired.\n\n Returns\n -------\n CGfactors : Numpy float array\n Coarse-graining factors at positions.\n \"\"\"\n\n return np.array(list(map(\n lambda r: self.function(r),\n np.sqrt(np.sum(positions**2, axis=-1))\n ))) # coarse graining factors at positions\n\nclass SquareUniformCG:\n \"\"\"\n Square uniform coarse-graining.\n \"\"\"\n\n def __init__(self, dL):\n \"\"\"\n Parameters\n ----------\n dL : float\n Length of square box on which to average.\n \"\"\"\n\n self.dL = dL # averaging square length\n\n def function(self, position):\n \"\"\"\n Parameters\n ----------\n position : float array\n Coordinates.\n\n Returns\n -------\n phi : float\n Coarse-graining factor at position position.\n \"\"\"\n\n if (np.abs(np.array(position)) > self.dL/2).any(): return 0 # coarse-graining function is zero outside square\n return 1 # is one in\n\n def factors(self, positions):\n \"\"\"\n Parameters\n ----------\n positions : float array\n Coordinates at which coarse-graining is desired.\n\n Returns\n -------\n CGfactors : Numpy float array\n Coarse-graining factors at positions.\n \"\"\"\n\n CGfactors = np.array(list(map(\n lambda position:\n self.function(position),\n positions\n )))\n sumCGfactors = np.sum(CGfactors)\n if np.sum(CGfactors) == 0: return 0\n return CGfactors/sumCGfactors # coarse graining factors at positions\n\nclass CoarseGraining:\n \"\"\"\n Enables unique calculation of coarse-graining factors and then calculation\n of coarse-graining avergages.\n \"\"\"\n\n def __init__(self, factors_function, positions):\n \"\"\"\n Parameters\n ----------\n factors_function : function\n Function of array of coordinates which returns coarse-graining\n factors at these coordinates.\n positions : float array\n Coordinates at which coarse-graining is desired.\n \"\"\"\n\n self.CGfactors = np.array(factors_function(positions)) # coarse-graining factors at positions\n\n def average(self, var):\n \"\"\"\n Coarse-graining averaging.\n\n Parameters\n ----------\n var : float array\n Values of variable to coarse-grain at different positions from\n point at which coarse-graining is desired.\n\n Returns\n -------\n average : float\n Coarse-grained variable.\n \"\"\"\n\n return np.sum(\n np.transpose(np.array(self.CGfactors,\n ndmin=len(np.array(var).shape)))\n *np.array(var), axis=0) # coarse-grained variable\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.exp"
]
] |
Nexuscompute/Cirq | [
"640ef8f82d6a56ec95361388ce7976e096cca906"
] | [
"cirq-core/cirq/work/observable_measurement_data_test.py"
] | [
"# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport dataclasses\nimport datetime\nimport time\n\nimport numpy as np\nimport pytest\n\nimport cirq\nimport cirq.work as cw\nfrom cirq.work.observable_measurement_data import (\n _check_and_get_real_coef,\n _obs_vals_from_measurements,\n _stats_from_measurements,\n)\nfrom cirq.work.observable_settings import _MeasurementSpec\n\n\ndef test_get_real_coef():\n q0 = cirq.LineQubit(0)\n assert _check_and_get_real_coef(cirq.Z(q0) * 2, atol=1e-8) == 2\n assert _check_and_get_real_coef(cirq.Z(q0) * complex(2.0), atol=1e-8) == 2\n with pytest.raises(ValueError):\n _check_and_get_real_coef(cirq.Z(q0) * 2.0j, atol=1e-8)\n\n\ndef test_obs_vals_from_measurements():\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n obs = cirq.Z(a) * cirq.Z(b) * 10\n vals = _obs_vals_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)\n should_be = [10, -10, -10, 10]\n np.testing.assert_equal(vals, should_be)\n\n\ndef test_stats_from_measurements():\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n obs = cirq.Z(a) * cirq.Z(b) * 10\n mean, err = _stats_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)\n\n # The mean is zero since our bitstrings have balanced even- and odd-\n # parity cases.\n assert mean == 0\n\n # Since we multiplied our observable by 10, the standard deviation is\n # 10 [each obs val deviates by 10]. The variance is 10**2 and the\n # squared-standard-error-of-the-mean can be found by dividing by the\n # number of samples minus 1.\n assert err == 10**2 / (4 - 1)\n\n\ndef test_observable_measured_result():\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n omr = cw.ObservableMeasuredResult(\n setting=cw.InitObsSetting(\n init_state=cirq.Z(a) * cirq.Z(b), observable=cirq.Y(a) * cirq.Y(b)\n ),\n mean=0,\n variance=5**2,\n repetitions=4,\n circuit_params={'phi': 52},\n )\n assert omr.stddev == 5\n assert omr.observable == cirq.Y(a) * cirq.Y(b)\n assert omr.init_state == cirq.Z(a) * cirq.Z(b)\n\n cirq.testing.assert_equivalent_repr(omr)\n\n assert omr.as_dict() == {\n 'init_state': cirq.Z(a) * cirq.Z(b),\n 'observable': cirq.Y(a) * cirq.Y(b),\n 'mean': 0,\n 'variance': 25,\n 'repetitions': 4,\n 'param.phi': 52,\n }\n omr2 = dataclasses.replace(\n omr,\n circuit_params={\n 'phi': 52,\n 'observable': 3.14, # this would be a bad but legal parameter name\n 'param.phi': -1,\n },\n )\n assert omr2.as_dict() == {\n 'init_state': cirq.Z(a) * cirq.Z(b),\n 'observable': cirq.Y(a) * cirq.Y(b),\n 'mean': 0,\n 'variance': 25,\n 'repetitions': 4,\n 'param.phi': 52,\n 'param.observable': 3.14,\n 'param.param.phi': -1,\n }\n\n\n@pytest.fixture()\ndef example_bsa() -> 'cw.BitstringAccumulator':\n \"\"\"Test fixture to create an (empty) example BitstringAccumulator\"\"\"\n q0, q1 = cirq.LineQubit.range(2)\n setting = cw.InitObsSetting(\n init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)\n )\n meas_spec = _MeasurementSpec(\n max_setting=setting, circuit_params={'beta': 0.123, 'gamma': 0.456}\n )\n bsa = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[\n setting,\n cw.InitObsSetting(init_state=setting.init_state, observable=cirq.X(q0)),\n cw.InitObsSetting(init_state=setting.init_state, observable=cirq.Y(q1)),\n ],\n qubit_to_index={q0: 0, q1: 1},\n )\n return bsa\n\n\ndef test_bitstring_accumulator(example_bsa):\n # test initialization\n assert example_bsa.bitstrings.shape == (0, 2)\n assert example_bsa.chunksizes.shape == (0,)\n assert example_bsa.timestamps.shape == (0,)\n\n # test consume_results\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n example_bsa.consume_results(bitstrings)\n assert example_bsa.bitstrings.shape == (4, 2)\n assert example_bsa.chunksizes.shape == (1,)\n assert example_bsa.timestamps.shape == (1,)\n assert example_bsa.n_repetitions == 4\n\n with pytest.raises(ValueError):\n example_bsa.consume_results(bitstrings.astype(int))\n\n # test results\n results = list(example_bsa.results)\n assert len(results) == 3\n for r in results:\n assert r.repetitions == 4\n\n # test records\n for r in example_bsa.records:\n assert isinstance(r, dict)\n assert 'repetitions' in r\n assert r['repetitions'] == 4\n\n\ndef test_bitstring_accumulator_strings(example_bsa):\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n example_bsa.consume_results(bitstrings)\n\n q0, q1 = cirq.LineQubit.range(2)\n settings = cw.observables_to_settings(\n [cirq.X(q0), cirq.Y(q1), cirq.X(q0) * cirq.Y(q1)], qubits=[q0, q1]\n )\n\n strings_should_be = [\n '+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577',\n '+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577',\n '+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577',\n ]\n for setting, ssb in zip(settings, strings_should_be):\n assert example_bsa.summary_string(setting) == ssb, ssb\n\n assert (\n str(example_bsa)\n == \"\"\"Accumulator +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)); 4 repetitions\n +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577\n +Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577\n +Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577\"\"\"\n )\n\n\ndef test_bitstring_accumulator_equality():\n et = cirq.testing.EqualsTester()\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n chunksizes = np.asarray([4])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n obs = cirq.Z(a) * cirq.Z(b) * 10\n setting = cw.InitObsSetting(init_state=cirq.Z(a) * cirq.Z(b), observable=obs)\n meas_spec = _MeasurementSpec(setting, {})\n\n cirq.testing.assert_equivalent_repr(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings.copy(),\n chunksizes=chunksizes.copy(),\n timestamps=timestamps.copy(),\n )\n )\n\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings.copy(),\n chunksizes=chunksizes.copy(),\n timestamps=timestamps.copy(),\n ),\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings.copy(),\n chunksizes=chunksizes.copy(),\n timestamps=timestamps.copy(),\n ),\n )\n\n time.sleep(1)\n timestamps = np.asarray([datetime.datetime.now()])\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(setting, {'a': 2}),\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n\n bitstrings = bitstrings.copy()\n bitstrings[0] = [1, 1]\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n chunksizes = np.asarray([2, 2])\n timestamps = np.asarray(list(timestamps) * 2)\n et.add_equality_group(\n cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=[setting],\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n )\n\n\ndef _get_ZZ_Z_Z_bsa_constructor_args():\n bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)\n chunksizes = np.asarray([4])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n settings = list(\n cw.observables_to_settings(\n [cirq.Z(a) * cirq.Z(b) * 7, cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]\n )\n )\n meas_spec = _MeasurementSpec(settings[0], {})\n return {\n 'meas_spec': meas_spec,\n 'simul_settings': settings,\n 'qubit_to_index': qubit_to_index,\n 'bitstrings': bitstrings,\n 'chunksizes': chunksizes,\n 'timestamps': timestamps,\n }\n\n\ndef test_bitstring_accumulator_stats():\n kwargs = _get_ZZ_Z_Z_bsa_constructor_args()\n settings = kwargs['simul_settings']\n a, b = kwargs['qubit_to_index']\n\n bsa = cw.BitstringAccumulator(**kwargs)\n\n # There are three observables, each with mean 0 because\n # the four 2-bit strings have even numbers of a) ones in the\n # first position b) ones in the second position c) even parity\n # pairs.\n np.testing.assert_allclose([0, 0, 0], bsa.means())\n\n # Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)\n # where xbar and ybar are 0, per above. Each individual observed\n # value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)\n # For off-diagonal elements, there are two +1 and two -1 terms for each entry\n # so the total contribution is zero, and the matrix is diagonal\n should_be = np.array([[4 * 7**2, 0, 0], [0, 4 * 5**2, 0], [0, 0, 4 * 3**2]])\n should_be = should_be / (4 - 1) # covariance formula\n should_be = should_be / 4 # cov of the distribution of sample mean\n np.testing.assert_allclose(should_be, bsa.covariance())\n\n for setting, var in zip(settings, [4 * 7**2, 4 * 5**2, 4 * 3**2]):\n np.testing.assert_allclose(0, bsa.mean(setting))\n np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))\n np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))\n\n bad_obs = [cirq.X(a) * cirq.X(b)]\n bad_setting = list(cw.observables_to_settings(bad_obs, qubits=[a, b]))[0]\n with pytest.raises(ValueError):\n bsa.mean(bad_setting)\n\n\ndef test_bitstring_accumulator_stats_2():\n bitstrings = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], np.uint8)\n chunksizes = np.asarray([4])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n settings = list(cw.observables_to_settings([cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]))\n meas_spec = _MeasurementSpec(settings[0], {})\n\n bsa = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n\n # There are three observables, each with mean 0 because\n # the four 2-bit strings have even numbers of a) ones in the\n # first position b) ones in the second position.\n np.testing.assert_allclose([0, 0], bsa.means())\n\n # Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)\n # where xbar and ybar are 0, per above. Each individual observed\n # value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)\n # In this case, the measurements are perfectly correlated.\n should_be = 4 * np.array([[5 * 5, 5 * 3], [3 * 5, 3 * 3]])\n should_be = should_be / (4 - 1) # covariance formula\n should_be = should_be / 4 # cov of the distribution of sample mean\n np.testing.assert_allclose(should_be, bsa.covariance())\n\n for setting, var in zip(settings, [4 * 5**2, 4 * 3**2]):\n np.testing.assert_allclose(0, bsa.mean(setting))\n np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))\n np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))\n\n\ndef test_bitstring_accumulator_errors():\n q0, q1 = cirq.LineQubit.range(2)\n settings = cw.observables_to_settings(\n [cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]\n )\n grouped_settings = cw.group_settings_greedy(settings)\n max_setting = list(grouped_settings.keys())[0]\n simul_settings = grouped_settings[max_setting]\n\n with pytest.raises(ValueError):\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings,\n qubit_to_index={q0: 0, q1: 1},\n bitstrings=np.array([[0, 1], [0, 1]]),\n chunksizes=np.array([2]),\n )\n\n with pytest.raises(ValueError):\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings,\n qubit_to_index={q0: 0, q1: 1},\n bitstrings=np.array([[0, 1], [0, 1]]),\n chunksizes=np.array([3]),\n timestamps=[datetime.datetime.now()],\n )\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings[:1],\n qubit_to_index={q0: 0, q1: 1},\n )\n with pytest.raises(ValueError):\n bsa.covariance()\n with pytest.raises(ValueError):\n bsa.variance(simul_settings[0])\n with pytest.raises(ValueError):\n bsa.mean(simul_settings[0])\n\n bsa.consume_results(np.array([[0, 0]], dtype=np.uint8))\n assert bsa.covariance().shape == (1, 1)\n\n\ndef test_flatten_grouped_results():\n q0, q1 = cirq.LineQubit.range(2)\n settings = cw.observables_to_settings(\n [cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]\n )\n grouped_settings = cw.group_settings_greedy(settings)\n bsas = []\n for max_setting, simul_settings in grouped_settings.items():\n bsa = cw.BitstringAccumulator(\n meas_spec=_MeasurementSpec(max_setting, {}),\n simul_settings=simul_settings,\n qubit_to_index={q0: 0, q1: 1},\n )\n bsa.consume_results(np.array([[0, 0], [0, 0], [0, 0]], dtype=np.uint8))\n bsas.append(bsa)\n\n results = cw.flatten_grouped_results(bsas)\n assert len(results) == 4\n for res in results:\n # We pass all 0's to each consume_results, so everything is 1 +- 0\n assert res.mean == 1\n assert res.variance == 0\n assert res.repetitions == 3\n\n\ndef _get_mock_readout_calibration(qa_0=90, qa_1=10, qb_0=91, qb_1=9):\n # Mock readout correction results by constructing a BitstringAccumulator\n # with two <Z> measurements\n q1_ro = np.array([0] * qa_0 + [1] * qa_1)\n q2_ro = np.array([0] * qb_0 + [1] * qb_1)\n rs = np.random.RandomState(52)\n rs.shuffle(q1_ro)\n rs.shuffle(q2_ro)\n ro_bitstrings = np.vstack((q1_ro, q2_ro)).T\n assert ro_bitstrings.shape == (100, 2)\n chunksizes = np.asarray([100])\n timestamps = np.asarray([datetime.datetime.now()])\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n qubit_to_index = {a: 0, b: 1}\n ro_settings = list(cw.observables_to_settings([cirq.Z(a), cirq.Z(b)], qubits=[a, b]))\n (ro_meas_spec_setting,) = list(\n cw.observables_to_settings([cirq.Z(a) * cirq.Z(b)], qubits=[a, b])\n )\n ro_meas_spec = _MeasurementSpec(ro_meas_spec_setting, {})\n\n ro_bsa = cw.BitstringAccumulator(\n meas_spec=ro_meas_spec,\n simul_settings=ro_settings,\n qubit_to_index=qubit_to_index,\n bitstrings=ro_bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n return ro_bsa, ro_settings, ro_meas_spec_setting\n\n\ndef test_readout_correction():\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n ro_bsa, ro_settings, ro_meas_spec_setting = _get_mock_readout_calibration()\n\n # observables range from 1 to -1 while bitstrings range from 0 to 1\n assert ro_bsa.mean(ro_settings[0]) == 0.8\n assert ro_bsa.mean(ro_settings[1]) == 0.82\n assert np.isclose(ro_bsa.mean(ro_meas_spec_setting), 0.8 * 0.82, atol=0.05)\n\n bitstrings = np.array(\n [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 1], [1, 1]], dtype=np.uint8\n )\n chunksizes = np.asarray([len(bitstrings)])\n timestamps = np.asarray([datetime.datetime.now()])\n qubit_to_index = {a: 0, b: 1}\n settings = list(\n cw.observables_to_settings([cirq.X(a) * cirq.Y(b), cirq.X(a), cirq.Y(b)], qubits=[a, b])\n )\n meas_spec = _MeasurementSpec(settings[0], {})\n\n # First, make one with no readout correction\n bsa1 = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n )\n\n # [XY: one excitation, X: one excitation, Y: two excitations]\n np.testing.assert_allclose([1 - 1 / 4, 1 - 1 / 4, 1 - 2 / 4], bsa1.means())\n np.testing.assert_allclose([0.75, 0.75, 0.5], bsa1.means())\n\n # Turn on readout correction\n bsa2 = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n readout_calibration=ro_bsa,\n )\n\n # Readout correction increases variance\n for setting in settings:\n assert bsa2.variance(setting) > bsa1.variance(setting)\n\n np.testing.assert_allclose(\n [0.75 / (0.8 * 0.82), 0.75 / 0.8, 0.5 / 0.82], bsa2.means(), atol=0.01\n )\n\n # Variance becomes singular when readout error is 50/50\n ro_bsa_50_50, _, _ = _get_mock_readout_calibration(qa_0=50, qa_1=50)\n bsa3 = cw.BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=settings,\n qubit_to_index=qubit_to_index,\n bitstrings=bitstrings,\n chunksizes=chunksizes,\n timestamps=timestamps,\n readout_calibration=ro_bsa_50_50,\n )\n with pytest.raises(ZeroDivisionError):\n bsa3.means()\n\n assert bsa3.variance(settings[1]) == np.inf\n\n\ndef test_readout_correction_errors():\n kwargs = _get_ZZ_Z_Z_bsa_constructor_args()\n settings = kwargs['simul_settings']\n ro_bsa, _, _ = _get_mock_readout_calibration()\n kwargs['readout_calibration'] = ro_bsa\n bsa = cw.BitstringAccumulator(**kwargs)\n\n # Variance becomes singular as the estimated value approaches zero\n np.testing.assert_allclose(bsa.means(), [0, 0, 0])\n assert bsa.variance(settings[0]) == np.inf\n"
] | [
[
"numpy.vstack",
"numpy.sqrt",
"numpy.testing.assert_equal",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.array"
]
] |
alexcornier/INSEE | [
"a5dc6e1267834754ac1cd1331203b5e835828946"
] | [
"request.py"
] | [
"#================================================================\n# Ensemble de requêtes SQL sur une base de données SQL\n# hébergées sur un serveur local postgresql\n#\n# Modules pythons nécessaires\n# psycopg2 (SQL connection)\n# pandas (DataFrame et HTML)\n# matplotlib\n# jinja2 (styles HTML)\n#\n# Alexandre Cornier - 2020\n#================================================================\n\nimport psycopg2\nimport pandas as pd\nimport webbrowser\nimport pathlib\n\n# Interrupteur d'affichage console\nbconsole = False # pas d'affichage console par défaut\n\n#---------------------------- Connection à la Base de Données ------------------------------------\nconnection = psycopg2.connect(\"host=localhost port=5432 dbname=cremi user=postgres password=Audierne\")\ncur = connection.cursor()\n\n#-------------------------------------- Fonctions ------------------------------------------------\n\n# Affichage HTML des résultats dans le navigateur\ndef affiche_html(titre_question, question, fichier, resultat_html):\n # Préparation de l'entête du fichier HTML\n header = \"\"\"<!DOCTYPE html>\n <html>\n <head>\n <title>\"\"\" + titre_question + \"\"\"</title>\n </head>\n <body>\n \n <h1>\"\"\" + titre_question + \"\"\"</h1>\n <p>\"\"\" + question + \"\"\"</p>\n \"\"\"\n\n footer = \"\"\"\n </body>\n </html>\"\"\"\n\n # write html to file\n text_file = open(fichier, \"w\")\n text_file.write(header)\n text_file.write(resultat_html)\n text_file.write(footer)\n text_file.close()\n\n # open report.html in browser\n current_path = pathlib.Path(__file__).parent.absolute()\n fichier = \"file://\" + str(current_path) + \"/\" + fichier\n webbrowser.open(fichier)\n\n\n# Question 1\ndef listeRegions():\n cur.execute(\"\"\"SELECT reg, libelle FROM regions ORDER BY reg\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Région\" else '' for i in x])\n .hide_index()\n .render())\n\n affiche_html(\"Question 1\", \"Régions présentes dans la base de données\",\\\n \"question_01.html\", html)\n\n if (bconsole):\n print(\"les régions présentes dans la base de données sont : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 2\ndef listeDepartement():\n cur.execute(\"\"\"SELECT dep, libelle FROM departements ORDER BY dep\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code département', 'Département'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .hide_index()\n .render())\n\n affiche_html(\"Question 2\", \"Départements présents dans la base de données\",\\\n \"question_02.html\", html)\n\n if (bconsole):\n print(\"les départements présents dans la base de données sont : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 3\ndef choixRegions():\n print(\"Donnez le nom de la région :\")\n choix = input().capitalize()\n cur.execute(\"\"\"SELECT * FROM regionsocial WHERE region = '%s' \"\"\" % choix)\n\n lst = []\n for info in cur.fetchall():\n lst=[[\"Numéro\", info[0]],\n [\"Taux de pauvreté (%)\", info[2]],\n [\"Part des jeunes non insérés (%) en 2014\", info[3]],\n [\"Part des jeunes non insérés (%) en 2009\", info[4]],\n [\"Poids de l'économie sociale dans les emplois salariés du territoire (%)\", info[5]]]\n\n df = pd.DataFrame(lst, columns=['Information', 'Valeur'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .set_properties(subset=[\"Valeur\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 3\", \"Informations concernant la régione \" + choix,\\\n \"question_03.html\", html)\n\n if (bconsole):\n print(\"-------------- Informations concernant\", choix, \"--------------\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 4\ndef choix_departement_theme(): \n print(\"Donnez le nom du département :\")\n choix1 = input().capitalize()\n print(\"Choisissez un thème : 1.Social ou 2.Environnement (par défaut)\")\n choix2 = input()\n\n lst = []\n if choix2 == \"1\" or choix2.lower() == \"social\":\n cur.execute(\"\"\"SELECT * FROM departementsocial WHERE departements = '%s' \"\"\" % choix1)\n\n for info in cur.fetchall():\n lst = [[\"Numéro\", info[0]],\n [\"Espérance de vie des hommes à la naissance en 2015 (années)\", info[2]],\n [\"Espérance de vie des hommes à la naissance en 2010 (années)\", info[3]],\n [\"Espérance de vie des femmes à la naissance en 2015 (années)\", info[4]],\n [\"Espérance de vie des femmes à la naissance en 2010 (années)\", info[5]],\n [\"Part de la population éloignée de plus de 7 mn des services de santé de proximité (%) en 2016\", info[6]],\n [\"Part de la population estimée en zone inondable (%)\", info[7]]]\n\n df = pd.DataFrame(lst, columns=['Information', 'Valeur'])\n\n df[\"Valeur\"] = pd.to_numeric(df[\"Valeur\"], errors='coerce')\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .format({\"Valeur\": \"{:.1f}\"})\n .set_properties(subset=[\"Valeur\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 4a\",\\\n \"Informations sociales concernant le département \" + choix1,\\\n \"question_04a.html\", html)\n\n if (bconsole):\n df[\"Valeur\"] = df[\"Valeur\"].map(\"{:.1f}\".format)\n print(\"-------------- Informations concernant\", choix1, \"--------------\")\n print(df)\n\n else :\n cur.execute(\"\"\"SELECT * FROM departementenvironnement WHERE departements = '%s' \"\"\" % choix1)\n\n for info in cur.fetchall():\n lst = [[\"Numéro\", info[0]],\n [\"Taux de valorisation matière et organique (%) en 2013\", info[2]],\n [\"Taux de valorisation matière et organique (%) en 2009\", info[3]],\n [\"Part de surfaces artificialisées (%) en 2012\", info[4]],\n [\"Part de surfaces artificialisées (%) en 2006\", info[5]],\n [\"Part de l'agriculture biologique dans la surface agricole totale (%) en 2016\", info[6]],\n [\"Part de l'agriculture biologique dans la surface agricole totale (%) en 2010\", info[7]],\n [\"Production de granulats (tonnes) en 2014\", info[8]],\n [\"Production de granulats (tonnes) en 2009\", info[9]],\n [\"Eolien (%) en 2015\", info[10]],\n [\"Eolien (%) en 2010\", info[11]],\n [\"Photovoltaïque (%) en 2015\", info[12]],\n [\"Photovoltaïque (%) en 2010\", info[13]],\n [\"Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2015\",info[14]],\n [\"Autre (biogaz, biomasse, géothermie, incinération de déchets, petite hydraulique) (%) en 2010\",info[15]]]\n\n df = pd.DataFrame(lst, columns=['Information', 'Valeur'])\n\n df[\"Valeur\"] = pd.to_numeric(df[\"Valeur\"], errors='coerce')\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .format({\"Valeur\": \"{:.1f}\"})\n .set_properties(subset=[\"Valeur\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 4b\",\\\n \"Informations environnementales concernant le département \" + choix1,\\\n \"question_04b.html\", html)\n\n if (bconsole):\n df[\"Valeur\"] = df[\"Valeur\"].map(\"{:.1f}\".format)\n print(\"-------------- Informations concernant\", choix1, \"--------------\")\n print(df)\n\n if (bconsole):\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 5\ndef typeEnergie():\n print(\"Choisissez un type d'energie : 1.Eolien, 2.Photovoltaique ou 3.Autre\")\n choix = input()\n\n if choix == \"1\" or choix.lower() == \"eolien\":\n cur.execute(\"\"\"SELECT nb, departements, eolien2015 - eolien2010 AS croissance FROM departementenvironnement\n WHERE eolien2015 > eolien2010\n ORDER BY eolien2015 - eolien2010 DESC\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Croissance\"])\n .format({\"Croissance\": \"{:.1f}pts\"})\n .set_properties(subset=[\"Croissance\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 5a\",\\\n \"Départements où la part de l'énergie éolienne a augmenté entre les deux années de référence\",\\\n \"question_05a.html\", html)\n\n if (bconsole):\n df[\"Croissance\"] = df[\"Croissance\"].map(\"{:.1f}pts\".format)\n print(\n \"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : \")\n print(df)\n\n if choix == \"2\" or choix.lower() == \"photovoltaique\":\n cur.execute(\"\"\"SELECT nb, departements, photovoltaique2015 - photovoltaique2010 AS croissance FROM departementenvironnement\n WHERE photovoltaique2015 > photovoltaique2010\n ORDER BY photovoltaique2015 - photovoltaique2010 DESC\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Croissance\"])\n .format({\"Croissance\": \"{:.1f}pts\"})\n .set_properties(subset=[\"Croissance\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 5b\",\\\n \"Départements où la part de l'énergie photovoltaïque a augmenté entre les deux années de référence\",\\\n \"question_05b.html\", html)\n\n if (bconsole):\n df[\"Croissance\"] = df[\"Croissance\"].map(\"{:.1f}pts\".format)\n print(\"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : \")\n print(df)\n\n if choix == \"3\" or choix.lower() == \"autre\":\n cur.execute(\"\"\"SELECT nb, departements, autre2015 - autre2010 AS croissance FROM departementenvironnement\n WHERE autre2015 > autre2010\n ORDER BY autre2015 - autre2010 DESC\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code', 'Département', 'Croissance'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Croissance\"])\n .format({\"Croissance\": \"{:.1f}pts\"})\n .set_properties(subset=[\"Croissance\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 5c\",\\\n \"Départements où la part des énergies renouvelables autres a augmenté entre les deux années de référence\",\\\n \"question_05c.html\", html)\n\n if (bconsole):\n df[\"Croissance\"] = df[\"Croissance\"].map(\"{:.1f}pts\".format)\n print(\"Voici la liste des départements où la part de cette énergie a augmenté entre les deux années de référence : \")\n print(df)\n\n if (bconsole):\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 6\ndef tonnes():\n cur.execute(\"\"\"SELECT departements.reg, regions.libelle AS region, departements.libelle AS departement\n FROM departements, regions \n WHERE departements.reg\n IN (SELECT departements.reg from departements\n INNER JOIN departementenvironnement\n ON departements.dep = departementenvironnement.nb\n INNER JOIN regions\n ON departements.reg = regions.reg\n GROUP BY departements.reg\n\t\t\tHAVING SUM(prodgranulat2014) > 25000000\n\t\t\tAND SUM(prodgranulat2014) <> 'NaN')\n\t\tORDER BY region, departement\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .hide_index()\n .render())\n\n affiche_html(\"Question 6\",\\\n \"Départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014\",\\\n \"question_06.html\", html)\n\n if (bconsole):\n print(\"les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014 sont :\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 7\ndef topFive():\n cur.execute(\"\"\"SELECT nb, departements, eolien2015 FROM departementenvironnement \n ORDER BY nullif(eolien2015, 'NaN')\n DESC nulls last LIMIT 5\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code département', 'Département', \"Part de l'énergie éolienne en 2015\"])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Département\" else '' for i in x])\n .background_gradient(cmap='Blues', subset=[\"Part de l'énergie éolienne en 2015\"])\n .format({\"Part de l'énergie éolienne en 2015\": \"{:.1f}%\"})\n .set_properties(subset=[\"Part de l'énergie éolienne en 2015\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 7\",\\\n \"Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015\",\\\n \"question_07.html\", html)\n\n if (bconsole):\n df[\"Part de l'énergie éolienne en 2015\"] = df[\"Part de l'énergie éolienne en 2015\"].map(\"{:.1f}%\".format)\n print(\"Les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015 sont :\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 8\ndef weak():\n cur.execute(\"\"\"SELECT regions.reg, regions.libelle AS region,\n departements.libelle AS departement, departementenvironnement.valorisationorga2013\n FROM departements\n INNER JOIN regions\n ON departements.reg = regions.reg\n INNER JOIN departementenvironnement\n ON departements.dep = departementenvironnement.nb\n ORDER BY nullif(valorisationorga2013, 'NaN') nulls last LIMIT 1\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région', 'Département', 'Valorisation en 2013'])\n\n # Formattage des valeurs\n df[\"Valorisation en 2013\"] = df[\"Valorisation en 2013\"].map(\"{:.1f}\".format)\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Région\" else '' for i in x])\n .set_properties(subset=[\"Valorisation en 2013\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 8\",\\\n \"Région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013\",\\\n \"question_08.html\", html)\n\n if (bconsole):\n print(\"La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013 est :\")\n print(\"Reg, Région, Département, Valorisation2013\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 9\ndef bestPopMin():\n cur.execute(\"\"\"SELECT departementenvironnement.departements, departementenvironnement.agriculturebio2016 \n FROM departementenvironnement\n INNER JOIN departementsocial\n ON departementenvironnement.departements = departementsocial.departements\n ORDER BY nullif(popeloignee7min, 'NaN') DESC nulls last LIMIT 1\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Département', \"Part de l'agriculture biologique\"])\n\n # Formattage des valeurs\n df[\"Part de l'agriculture biologique\"] = df[\"Part de l'agriculture biologique\"].map(\"{:.1f}%\".format)\n\n titre_html = \"Part en 2016 (en %) de l’agriculture biologique dans la surface agricole totale du département<br>\" +\\\n \"contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité\"\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Part de l'agriculture biologique\" else '' for i in x])\n .set_properties(subset=[\"Part de l'agriculture biologique\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 9\", titre_html, \"question_09.html\", html)\n\n if (bconsole):\n print(\"En 2016, la part (en %) de l’agriculture biologique dans la surface agricole totale du département\")\n print(\"contenant le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité est : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 10\ndef pauvrete():\n cur.execute(\"\"\"SELECT pauvrete,region \n FROM regionsocial \n WHERE jeunesnoninseres2014 > 30\n AND pauvrete <> 'NaN'\n ORDER BY nullif(pauvrete, 'NaN') DESC nulls last\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Pauvreté', 'Région'])\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .apply(lambda x: ['background: lightblue' if x.name == \"Pauvreté\" else '' for i in x])\n .format({\"Pauvreté\": \"{:.2f}%\"})\n .set_properties(subset=[\"Pauvreté\"], **{'text-align': 'right'})\n .hide_index()\n .render())\n\n affiche_html(\"Question 10\",\\\n \"Taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014\",\\\n \"question_10.html\", html)\n\n if (bconsole):\n df[\"Pauvreté\"] = df[\"Pauvreté\"].map(\"{:.2f}%\".format)\n print(\"Le taux de pauvreté connu en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 sont : \")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\n# Question 11\ndef poids_eco():\n cur.execute(\"\"\"SELECT regions.reg, regions.libelle, poidseco,\n AVG(photovoltaique2015) AS photovoltaique2015,\n AVG(agriculturebio2016) AS agriculturebio2016\n FROM departements\n INNER JOIN departementenvironnement\n ON departements.dep = departementenvironnement.nb\n INNER JOIN regionsocial\n ON departements.reg = regionsocial.nb\n INNER JOIN regions\n ON departements.reg = regions.reg\n GROUP BY poidseco, regions.reg\n HAVING AVG(photovoltaique2015) >= 10\n AND AVG(photovoltaique2015) <> 'NaN'\n AND AVG(agriculturebio2016) >= 5\n AND AVG(agriculturebio2016) <> 'NaN'\n ORDER BY poidseco\"\"\")\n query_result = cur.fetchall()\n df = pd.DataFrame(query_result, columns=['Code région', 'Région', \"Poids de l'économie sociale\",\\\n \"Part moyenne du photovoltaïque\", \"Part moyenne de l'agriculture Bio\"])\n\n # Conversion string vers float pour le formattage\n df[\"Part moyenne du photovoltaïque\"] = pd.to_numeric(df[\"Part moyenne du photovoltaïque\"], errors='coerce').fillna(0)\n df[\"Part moyenne de l'agriculture Bio\"] = pd.to_numeric(df[\"Part moyenne de l'agriculture Bio\"], errors=\"coerce\").fillna(0)\n\n titre_html = \"Poids de l'économie sociale en 2015 dans les emplois salariés de la région<br>\" +\\\n \"dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque<br>\" +\\\n \"et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%\"\n\n html = (df.style\n .set_table_styles([\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [\n ('background', '#606060'),\n ('color', 'white'),\n ('font-family', 'verdana')]},\n {'selector': 'td', 'props': [('font-family', 'verdana')]}])\n .set_properties(subset=[\"Poids de l'économie sociale\", \"Part moyenne du photovoltaïque\",\n \"Part moyenne de l'agriculture Bio\"], **{'text-align': 'right'})\n .hide_index()\n .background_gradient(cmap='Blues', subset=[\"Poids de l'économie sociale\"])\n .format({\"Poids de l'économie sociale\": \"{:.1f}%\"})\n .format({\"Part moyenne du photovoltaïque\": \"{:.1f}%\"})\n .format({\"Part moyenne de l'agriculture Bio\": \"{:.1f}%\"})\n .render())\n\n affiche_html(\"Question 11\", titre_html, \"question_11.html\", html)\n\n if (bconsole):\n df[\"Poids de l'économie sociale\"] = df[\"Poids de l'économie sociale\"].map(\"{:.1f}%\".format)\n df[\"Part moyenne du photovoltaïque\"] = df[\"Part moyenne du photovoltaïque\"].map(\"{:.1f}%\".format)\n df[\"Part moyenne de l'agriculture Bio\"] = df[\"Part moyenne de l'agriculture Bio\"].map(\"{:.1f}%\".format)\n print(\"Poids de l'économie sociale en 2015 dans les emplois salariés de la région\")\n print(\"dont la source de la puissance électrique en énergies renouvelables provenait à au moins 10% de l'énergie photovoltaïque\")\n print(\"et dont la part de l'agriculture biologique dans la surface agricole totale était d'au moins 5%\")\n print(df)\n\n print(\"Appuyez sur entrée pour revenir au menu\")\n input()\n\n\ndef menu():\n print (\"\")\n print (\"------------------------------------ Projet INSEE -----------------------------------\")\n print (\"\")\n print (\"1...Afficher la liste des régions\")\n print (\"2...Afficher la liste des départements\")\n print (\"3...Demander à l’utilisateur de choisir une région et afficher les données de la region choisie\")\n print (\"4...Demander à l’utilisateur de choisir un département et un thème : social ou environnemental,\")\n print (\" | et afficher les données demandées pour le departement choisi\")\n print (\"5...demander à l’utilisateur de choisir un type d’énergie (éolien, photovoltaïque, autre)\")\n print (\" | et en fonction de ce choix retourner la liste des départements où la part de cette énergie a augmenté\")\n print (\" | entre les deux années de référence, classés de la plus forte augmentation à la plus faible.\")\n print (\"6...les départements dont la région a eu une production de granulats supérieure à 25 000 000 tonnes en 2014\")\n print (\"7...les 5 départements avec le plus grand taux d’énergie éolienne comme source de la puissance électrique en 2015\")\n print (\"8...La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013\")\n print (\"9...La part (en %) de l’agriculture biologique dans la surface agricole totale du département contenant\")\n print (\" | le plus grand pourcentage de population éloignée de plus de 7 minutes des services de santé de proximité en 2016\")\n print (\"10..Le taux de pauvreté en 2014 des régions dont la part des jeunes non insérés est supérieure à 30% en 2014 \")\n print (\"11..Le poids de l'économie sociale dans les emplois salariés de la région dont la source de la puissance électrique\")\n print (\" | en énergies renouvelables provenait à au moins 10% de l’énergie photovoltaïque et dont la part\")\n print (\" | de l’agriculture biologique dans la surface agricole totale était d’au moins 5% en 2015\")\n print (\"\")\n print (\"0...Quitter\")\n print (\"-------------------------------------------------------------------------------------\")\n\n\n#----------------------------------------- MAIN --------------------------------------------------\n\n# Demande d'affichae console ou non, HTML seul par défaut\n\nprint(\"Souhaitez-vous afficher les résultats dans la console,\")\nprint(\"en plus de la création des fichiers HTML ?\")\nprint(\" (O Oui / N Non)\")\nchoix = input()\n\nif (choix[0].lower() == \"o\"):\n bconsole = True\n\n# Menu principal\n\nwhile True:\n menu()\n print(\"Chosissez un numéro de question pour avoir la réponse :\")\n choix = input()\n\n if (choix == \"1\"):\n listeRegions()\n elif (choix == \"2\"):\n listeDepartement()\n elif (choix == \"3\"):\n choixRegions()\n elif (choix == \"4\"):\n choix_departement_theme()\n elif (choix == \"5\"):\n typeEnergie()\n elif (choix == \"6\"):\n tonnes()\n elif (choix == \"7\"):\n topFive()\n elif (choix == \"8\"):\n weak()\n elif (choix == \"9\"):\n bestPopMin()\n elif (choix == \"10\"):\n pauvrete()\n elif (choix == \"11\"):\n poids_eco()\n elif (choix == \"0\"):\n break\n else:\n print (\"Choix invalide\")\n\n# fermeture \"propre\" du curseur et de la connection\ncur.close()\nconnection.close()"
] | [
[
"pandas.to_numeric",
"pandas.DataFrame"
]
] |
myelintek/results | [
"11c38436a158c453e3011f8684570f7a55c03330"
] | [
"v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/utils/metrics_hook_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for metrics_hook.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport shutil\nfrom tensor2tensor.utils import metrics_hook\n\nimport tensorflow as tf\n\n\nclass DummyHook(metrics_hook.MetricsBasedHook):\n\n def _process_metrics(self, global_step, metrics):\n if metrics:\n assert \"\" in metrics\n assert isinstance(metrics[\"\"], dict)\n if metrics[\"\"]:\n assert \"global_step_1\" in metrics[\"\"]\n self.test_metrics = metrics\n if global_step >= 40:\n return True\n\n\nclass MetricsHookTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.base_checkpoint_dir = tf.test.get_temp_dir()\n shutil.rmtree(cls.base_checkpoint_dir, ignore_errors=True)\n\n def ckpt_dir(self, name):\n return os.path.join(self.base_checkpoint_dir, name)\n\n @contextlib.contextmanager\n def sess(self, hook, ckpt_dir):\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=ckpt_dir,\n save_checkpoint_secs=0,\n save_summaries_steps=10,\n hooks=[hook]) as sess:\n self._sess = sess\n yield sess\n\n def flush(self):\n self._sess._hooks[1]._summary_writer.flush()\n\n def testStop(self):\n global_step = tf.train.create_global_step()\n tf.summary.scalar(\"global_step\", global_step)\n incr_global_step = tf.assign_add(global_step, 1)\n\n ckpt_dir = self.ckpt_dir(\"stop\")\n dummy = DummyHook(ckpt_dir, every_n_steps=10)\n with self.sess(dummy, ckpt_dir) as sess:\n for _ in range(20):\n sess.run(incr_global_step)\n\n # Summary files should now have 2 global step values in them\n self.flush()\n\n # Run for 10 more so that the hook gets triggered again\n for _ in range(10):\n sess.run(incr_global_step)\n\n # Check that the metrics have actually been collected.\n self.assertTrue(\"\" in dummy.test_metrics)\n metrics = dummy.test_metrics[\"\"]\n self.assertTrue(\"global_step_1\" in metrics)\n steps, vals = metrics[\"global_step_1\"]\n self.assertTrue(len(steps) == len(vals))\n self.assertTrue(len(steps) >= 2)\n\n # Run for 10 more so that the hook triggers stoppage\n for _ in range(10):\n sess.run(incr_global_step)\n\n with self.assertRaisesRegexp(RuntimeError, \"after should_stop requested\"):\n sess.run(incr_global_step)\n\n def testEarlyStoppingHook(self):\n global_step = tf.train.create_global_step()\n counter = tf.get_variable(\"count\", initializer=0, dtype=tf.int32)\n tf.summary.scalar(\"count\", counter)\n incr_global_step = tf.assign_add(global_step, 1)\n incr_counter = tf.assign_add(counter, 1)\n\n # Stop if the global step has not gone up by more than 1 in 20 steps.\n\n ckpt_dir = self.ckpt_dir(\"early\")\n stop_hook = metrics_hook.EarlyStoppingHook(\n ckpt_dir,\n \"count_1\",\n num_plateau_steps=20,\n plateau_delta=1.,\n plateau_decrease=False,\n every_n_steps=10)\n with self.sess(stop_hook, ckpt_dir) as sess:\n for _ in range(20):\n sess.run((incr_global_step, incr_counter))\n\n # Summary files should now have 2 values in them\n self.flush()\n\n # Run for more steps so that the hook gets triggered and we verify that we\n # don't stop.\n for _ in range(30):\n sess.run((incr_global_step, incr_counter))\n\n self.flush()\n\n # Run without incrementing the counter\n for _ in range(40):\n sess.run(incr_global_step)\n\n # Metrics should be written such that now the counter has gone >20 steps\n # without being incremented.\n self.flush()\n\n # Check that we ask for stop\n with self.assertRaisesRegexp(RuntimeError, \"after should_stop requested\"):\n for _ in range(30):\n sess.run(incr_global_step)\n\n def testPlateauOpHook(self):\n global_step = tf.train.create_global_step()\n counter = tf.get_variable(\"count\", initializer=0, dtype=tf.int32)\n indicator = tf.get_variable(\"indicator\", initializer=0, dtype=tf.int32)\n tf.summary.scalar(\"count\", counter)\n incr_global_step = tf.assign_add(global_step, 1)\n incr_counter = tf.assign_add(counter, 1)\n incr_indicator = tf.assign_add(indicator, 1)\n\n # Stop if the global step has not gone up by more than 1 in 20 steps.\n\n ckpt_dir = self.ckpt_dir(\"plateauop\")\n stop_hook = metrics_hook.PlateauOpHook(\n ckpt_dir,\n \"count_1\",\n incr_indicator,\n num_plateau_steps=20,\n plateau_delta=1.,\n plateau_decrease=False,\n every_n_steps=10)\n with self.sess(stop_hook, ckpt_dir) as sess:\n for _ in range(20):\n sess.run((incr_global_step, incr_counter))\n\n # Summary files should now have 2 values in them\n self.flush()\n\n # Run for more steps so that the hook gets triggered and we verify that we\n # don't stop.\n for _ in range(30):\n sess.run((incr_global_step, incr_counter))\n\n self.flush()\n\n # Run without incrementing the counter\n for _ in range(30):\n sess.run(incr_global_step)\n self.flush()\n\n self.assertTrue(sess.run(indicator) < 1)\n\n # Metrics should be written such that now the counter has gone >20 steps\n # without being incremented.\n # Check that we run the incr_indicator op several times\n for _ in range(3):\n for _ in range(10):\n sess.run(incr_global_step)\n self.flush()\n\n self.assertTrue(sess.run(indicator) > 1)\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.assign_add",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.test.get_temp_dir",
"tensorflow.get_variable",
"tensorflow.train.create_global_step",
"tensorflow.test.main"
]
] |
stillmatic/PyTorch-BigGraph | [
"d7d6576281faa54ec5850e204ffc07b1268fdb04"
] | [
"torchbiggraph/train_cpu.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE.txt file in the root directory of this source tree.\n\nimport logging\nimport math\nimport time\nfrom collections import defaultdict\nfrom functools import partial\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple\n\nimport torch\nimport torch.distributed as td\nfrom torch.optim import Optimizer\nfrom torchbiggraph.async_adagrad import AsyncAdagrad\nfrom torchbiggraph.batching import AbstractBatchProcessor, call, process_in_batches\nfrom torchbiggraph.bucket_scheduling import (\n BucketStats,\n DistributedBucketScheduler,\n LockServer,\n SingleMachineBucketScheduler,\n)\nfrom torchbiggraph.checkpoint_manager import (\n CheckpointManager,\n ConfigMetadataProvider,\n MetadataProvider,\n PartitionClient,\n)\nfrom torchbiggraph.config import ConfigSchema\nfrom torchbiggraph.distributed import ProcessRanks, init_process_group, start_server\nfrom torchbiggraph.edgelist import EdgeList\nfrom torchbiggraph.eval import RankingEvaluator\nfrom torchbiggraph.graph_storages import EDGE_STORAGES, ENTITY_STORAGES\nfrom torchbiggraph.losses import LOSS_FUNCTIONS, AbstractLossFunction\nfrom torchbiggraph.model import MultiRelationEmbedder, make_model\nfrom torchbiggraph.parameter_sharing import ParameterServer, ParameterSharer\nfrom torchbiggraph.row_adagrad import RowAdagrad\nfrom torchbiggraph.stats import Stats, StatsHandler\nfrom torchbiggraph.types import (\n SINGLE_TRAINER,\n UNPARTITIONED,\n Bucket,\n EntityName,\n FloatTensorType,\n ModuleStateDict,\n Partition,\n Rank,\n)\nfrom torchbiggraph.util import (\n BucketLogger,\n DummyOptimizer,\n EmbeddingHolder,\n allocate_shared_tensor,\n create_pool,\n fast_approx_rand,\n get_async_result,\n get_num_workers,\n hide_distributed_logging,\n round_up_to_nearest_multiple,\n split_almost_equally,\n tag_logs_with_process_name,\n)\n\n\nlogger = logging.getLogger(\"torchbiggraph\")\ndist_logger = logging.LoggerAdapter(logger, {\"distributed\": True})\n\n\nclass Trainer(AbstractBatchProcessor):\n def __init__(\n self,\n model_optimizer: Optimizer,\n loss_fn: AbstractLossFunction,\n relation_weights: List[float],\n ) -> None:\n super().__init__(loss_fn, relation_weights)\n self.model_optimizer = model_optimizer\n self.unpartitioned_optimizers: Dict[EntityName, Optimizer] = {}\n self.partitioned_optimizers: Dict[Tuple[EntityName, Partition], Optimizer] = {}\n\n def _process_one_batch(\n self, model: MultiRelationEmbedder, batch_edges: EdgeList\n ) -> Stats:\n model.zero_grad()\n\n scores, reg = model(batch_edges)\n\n loss = self.calc_loss(scores, batch_edges)\n\n stats = Stats(\n loss=float(loss),\n reg=float(reg) if reg is not None else 0.0,\n violators_lhs=int((scores.lhs_neg > scores.lhs_pos.unsqueeze(1)).sum()),\n violators_rhs=int((scores.rhs_neg > scores.rhs_pos.unsqueeze(1)).sum()),\n count=len(batch_edges),\n )\n if reg is not None:\n (loss + reg).backward()\n else:\n loss.backward()\n self.model_optimizer.step(closure=None)\n for optimizer in self.unpartitioned_optimizers.values():\n optimizer.step(closure=None)\n for optimizer in self.partitioned_optimizers.values():\n optimizer.step(closure=None)\n\n return stats\n\n\nclass IterationManager(MetadataProvider):\n def __init__(\n self,\n num_epochs: int,\n edge_paths: List[str],\n num_edge_chunks: int,\n *,\n iteration_idx: int = 0,\n ) -> None:\n self.num_epochs = num_epochs\n self.edge_paths = edge_paths\n self.num_edge_chunks = num_edge_chunks\n self.iteration_idx = iteration_idx\n\n @property\n def epoch_idx(self) -> int:\n return self.iteration_idx // self.num_edge_chunks // self.num_edge_paths\n\n @property\n def num_edge_paths(self) -> int:\n return len(self.edge_paths)\n\n @property\n def edge_path_idx(self) -> int:\n return self.iteration_idx // self.num_edge_chunks % self.num_edge_paths\n\n @property\n def edge_path(self) -> str:\n return self.edge_paths[self.edge_path_idx]\n\n @property\n def edge_chunk_idx(self) -> int:\n return self.iteration_idx % self.num_edge_chunks\n\n def __iter__(self) -> Iterable[Tuple[int, int, int]]:\n while self.epoch_idx < self.num_epochs:\n yield self.epoch_idx, self.edge_path_idx, self.edge_chunk_idx\n self.iteration_idx += 1\n\n def get_checkpoint_metadata(self) -> Dict[str, Any]:\n return {\n \"iteration/num_epochs\": self.num_epochs,\n \"iteration/epoch_idx\": self.epoch_idx,\n \"iteration/num_edge_paths\": self.num_edge_paths,\n \"iteration/edge_path_idx\": self.edge_path_idx,\n \"iteration/edge_path\": self.edge_path,\n \"iteration/num_edge_chunks\": self.num_edge_chunks,\n \"iteration/edge_chunk_idx\": self.edge_chunk_idx,\n }\n\n def __add__(self, delta: int) -> \"IterationManager\":\n return IterationManager(\n self.num_epochs,\n self.edge_paths,\n self.num_edge_chunks,\n iteration_idx=self.iteration_idx + delta,\n )\n\n\ndef should_preserve_old_checkpoint(\n iteration_manager: IterationManager, interval: Optional[int]\n) -> bool:\n \"\"\"Whether the checkpoint consumed by the current iteration should be kept\n\n Given the period, in number of epochs, at which to snapshot checkpoints,\n determinen whether the checkpoint that is used as input by the current\n iteration (as determined by the given manager) should be preserved rather\n than getting cleaned up.\n \"\"\"\n if interval is None:\n return False\n is_checkpoint_epoch = iteration_manager.epoch_idx % interval == 0\n is_first_edge_path = iteration_manager.edge_path_idx == 0\n is_first_edge_chunk = iteration_manager.edge_chunk_idx == 0\n return is_checkpoint_epoch and is_first_edge_path and is_first_edge_chunk\n\n\ndef get_num_edge_chunks(config: ConfigSchema) -> int:\n if config.num_edge_chunks is not None:\n return config.num_edge_chunks\n\n max_edges_per_bucket = 0\n # We should check all edge paths, all lhs partitions and all rhs partitions,\n # but the combinatorial explosion could lead to thousands of checks. Let's\n # assume that edges are uniformly distributed among buckets (this is not\n # exactly the case, as it's the entities that are uniformly distributed\n # among the partitions, and edge assignments to buckets are a function of\n # that, thus, for example, very high degree entities could skew this), and\n # use the size of bucket (0, 0) as an estimate of the average bucket size.\n # We still do it for all edge paths as there could be semantic differences\n # between them which lead to different sizes.\n for edge_path in config.edge_paths:\n edge_storage = EDGE_STORAGES.make_instance(edge_path)\n max_edges_per_bucket = max(\n max_edges_per_bucket,\n edge_storage.get_number_of_edges(UNPARTITIONED, UNPARTITIONED),\n )\n return max(1, math.ceil(max_edges_per_bucket / config.max_edges_per_chunk))\n\n\ndef make_optimizer(\n config: ConfigSchema, params: Iterable[torch.nn.Parameter], is_emb: bool\n) -> Optimizer:\n params = list(params)\n if len(params) == 0:\n optimizer = DummyOptimizer()\n elif is_emb:\n optimizer = RowAdagrad(params, lr=config.lr)\n else:\n if config.relation_lr is not None:\n lr = config.relation_lr\n else:\n lr = config.lr\n optimizer = AsyncAdagrad(params, lr=lr)\n optimizer.share_memory()\n return optimizer\n\n\nNOOP_STATS_HANDLER = StatsHandler()\n\n\nclass TrainingCoordinator:\n def __init__( # noqa\n self,\n config: ConfigSchema,\n model: Optional[MultiRelationEmbedder] = None,\n trainer: Optional[AbstractBatchProcessor] = None,\n evaluator: Optional[AbstractBatchProcessor] = None,\n rank: Rank = SINGLE_TRAINER,\n subprocess_init: Optional[Callable[[], None]] = None,\n stats_handler: StatsHandler = NOOP_STATS_HANDLER,\n ):\n \"\"\"Each epoch/pass, for each partition pair, loads in embeddings and edgelist\n from disk, runs HOGWILD training on them, and writes partitions back to disk.\n \"\"\"\n tag_logs_with_process_name(f\"Trainer-{rank}\")\n self.config = config\n if config.verbose > 0:\n import pprint\n\n pprint.PrettyPrinter().pprint(config.to_dict())\n\n logger.info(\"Loading entity counts...\")\n entity_storage = ENTITY_STORAGES.make_instance(config.entity_path)\n entity_counts: Dict[str, List[int]] = {}\n for entity, econf in config.entities.items():\n entity_counts[entity] = []\n for part in range(econf.num_partitions):\n entity_counts[entity].append(entity_storage.load_count(entity, part))\n\n # Figure out how many lhs and rhs partitions we need\n holder = self.holder = EmbeddingHolder(config)\n\n logger.debug(\n f\"nparts {holder.nparts_lhs} {holder.nparts_rhs} \"\n f\"types {holder.lhs_partitioned_types} {holder.rhs_partitioned_types}\"\n )\n\n # We know ahead of time that we wil need 1-2 storages for each embedding type,\n # as well as the max size of this storage (num_entities x D).\n # We allocate these storages n advance in `embedding_storage_freelist`.\n # When we need storage for an entity type, we pop it from this free list,\n # and then add it back when we 'delete' the embedding table.\n embedding_storage_freelist: Dict[\n EntityName, Set[torch.FloatStorage]\n ] = defaultdict(set)\n for entity_type, counts in entity_counts.items():\n max_count = max(counts)\n num_sides = (\n (1 if entity_type in holder.lhs_partitioned_types else 0)\n + (1 if entity_type in holder.rhs_partitioned_types else 0)\n + (\n 1\n if entity_type\n in (holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types)\n else 0\n )\n )\n for _ in range(num_sides):\n embedding_storage_freelist[entity_type].add(\n allocate_shared_tensor(\n (max_count, config.entity_dimension(entity_type)),\n dtype=torch.float,\n ).storage()\n )\n\n # create the handlers, threads, etc. for distributed training\n if config.num_machines > 1 or config.num_partition_servers > 0:\n if not 0 <= rank < config.num_machines:\n raise RuntimeError(\"Invalid rank for trainer\")\n if not td.is_available():\n raise RuntimeError(\n \"The installed PyTorch version doesn't provide \"\n \"distributed training capabilities.\"\n )\n ranks = ProcessRanks.from_num_invocations(\n config.num_machines, config.num_partition_servers\n )\n\n num_ps_groups = config.num_groups_for_partition_server\n groups: List[List[int]] = [ranks.trainers] # barrier group\n groups += [\n ranks.trainers + ranks.partition_servers\n ] * num_ps_groups # ps groups\n group_idxs_for_partition_servers = range(1, len(groups))\n\n if rank == SINGLE_TRAINER:\n logger.info(\"Setup lock server...\")\n start_server(\n LockServer(\n num_clients=len(ranks.trainers),\n nparts_lhs=holder.nparts_lhs,\n nparts_rhs=holder.nparts_rhs,\n entities_lhs=holder.lhs_partitioned_types,\n entities_rhs=holder.rhs_partitioned_types,\n entity_counts=entity_counts,\n init_tree=config.distributed_tree_init_order,\n stats_handler=stats_handler,\n ),\n process_name=\"LockServer\",\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n server_rank=ranks.lock_server,\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n self.bucket_scheduler = DistributedBucketScheduler(\n server_rank=ranks.lock_server, client_rank=ranks.trainers[rank]\n )\n\n logger.info(\"Setup param server...\")\n start_server(\n ParameterServer(num_clients=len(ranks.trainers)),\n process_name=f\"ParamS-{rank}\",\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n server_rank=ranks.parameter_servers[rank],\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n parameter_sharer = ParameterSharer(\n process_name=f\"ParamC-{rank}\",\n client_rank=ranks.parameter_clients[rank],\n all_server_ranks=ranks.parameter_servers,\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n if config.num_partition_servers == -1:\n start_server(\n ParameterServer(\n num_clients=len(ranks.trainers),\n group_idxs=group_idxs_for_partition_servers,\n log_stats=True,\n ),\n process_name=f\"PartS-{rank}\",\n init_method=config.distributed_init_method,\n world_size=ranks.world_size,\n server_rank=ranks.partition_servers[rank],\n groups=groups,\n subprocess_init=subprocess_init,\n )\n\n groups = init_process_group(\n rank=ranks.trainers[rank],\n world_size=ranks.world_size,\n init_method=config.distributed_init_method,\n groups=groups,\n )\n trainer_group, *groups_for_partition_servers = groups\n self.barrier_group = trainer_group\n\n if len(ranks.partition_servers) > 0:\n partition_client = PartitionClient(\n ranks.partition_servers,\n groups=groups_for_partition_servers,\n log_stats=True,\n )\n else:\n partition_client = None\n else:\n self.barrier_group = None\n self.bucket_scheduler = SingleMachineBucketScheduler(\n holder.nparts_lhs, holder.nparts_rhs, config.bucket_order, stats_handler\n )\n parameter_sharer = None\n partition_client = None\n hide_distributed_logging()\n\n # fork early for HOGWILD threads\n logger.info(\"Creating workers...\")\n self.num_workers = get_num_workers(config.workers)\n self.pool = create_pool(\n self.num_workers,\n subprocess_name=f\"TWorker-{rank}\",\n subprocess_init=subprocess_init,\n )\n\n checkpoint_manager = CheckpointManager(\n config.checkpoint_path,\n rank=rank,\n num_machines=config.num_machines,\n partition_client=partition_client,\n subprocess_name=f\"BackgRW-{rank}\",\n subprocess_init=subprocess_init,\n )\n self.checkpoint_manager = checkpoint_manager\n checkpoint_manager.register_metadata_provider(ConfigMetadataProvider(config))\n if rank == 0:\n checkpoint_manager.write_config(config)\n\n num_edge_chunks = get_num_edge_chunks(config)\n\n self.iteration_manager = IterationManager(\n config.num_epochs,\n config.edge_paths,\n num_edge_chunks,\n iteration_idx=checkpoint_manager.checkpoint_version,\n )\n checkpoint_manager.register_metadata_provider(self.iteration_manager)\n\n logger.info(\"Initializing global model...\")\n if model is None:\n model = make_model(config)\n model.share_memory()\n loss_fn = LOSS_FUNCTIONS.get_class(config.loss_fn)(margin=config.margin)\n relation_weights = [relation.weight for relation in config.relations]\n if trainer is None:\n trainer = Trainer(\n model_optimizer=make_optimizer(config, model.parameters(), False),\n loss_fn=loss_fn,\n relation_weights=relation_weights,\n )\n if evaluator is None:\n eval_overrides = {}\n if config.eval_num_batch_negs is not None:\n eval_overrides[\"num_batch_negs\"] = config.eval_num_batch_negs\n if config.eval_num_uniform_negs is not None:\n eval_overrides[\"num_uniform_negs\"] = config.eval_num_uniform_negs\n\n evaluator = RankingEvaluator(\n loss_fn=loss_fn,\n relation_weights=relation_weights,\n overrides=eval_overrides,\n )\n\n if config.init_path is not None:\n self.loadpath_manager = CheckpointManager(config.init_path)\n else:\n self.loadpath_manager = None\n\n # load model from checkpoint or loadpath, if available\n state_dict, optim_state = checkpoint_manager.maybe_read_model()\n if state_dict is None and self.loadpath_manager is not None:\n state_dict, optim_state = self.loadpath_manager.maybe_read_model()\n if state_dict is not None:\n model.load_state_dict(state_dict, strict=False)\n if optim_state is not None:\n trainer.model_optimizer.load_state_dict(optim_state)\n\n logger.debug(\"Loading unpartitioned entities...\")\n for entity in holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types:\n count = entity_counts[entity][0]\n s = embedding_storage_freelist[entity].pop()\n dimension = config.entity_dimension(entity)\n embs = torch.FloatTensor(s).view(-1, dimension)[:count]\n embs, optimizer = self._load_embeddings(entity, UNPARTITIONED, out=embs)\n holder.unpartitioned_embeddings[entity] = embs\n trainer.unpartitioned_optimizers[entity] = optimizer\n\n # start communicating shared parameters with the parameter server\n if parameter_sharer is not None:\n shared_parameters: Set[int] = set()\n for name, param in model.named_parameters():\n if id(param) in shared_parameters:\n continue\n shared_parameters.add(id(param))\n key = f\"model.{name}\"\n logger.info(\n f\"Adding {key} ({param.numel()} params) to parameter server\"\n )\n parameter_sharer.set_param(key, param.data)\n for entity, embs in holder.unpartitioned_embeddings.items():\n key = f\"entity.{entity}\"\n logger.info(f\"Adding {key} ({embs.numel()} params) to parameter server\")\n parameter_sharer.set_param(key, embs.data)\n\n # store everything in self\n self.model = model\n self.trainer = trainer\n self.evaluator = evaluator\n self.rank = rank\n self.entity_counts = entity_counts\n self.embedding_storage_freelist = embedding_storage_freelist\n self.stats_handler = stats_handler\n\n self.strict = False\n\n def train(self) -> None:\n\n holder = self.holder\n config = self.config\n iteration_manager = self.iteration_manager\n\n total_buckets = holder.nparts_lhs * holder.nparts_rhs\n\n # yield stats from checkpoint, to reconstruct\n # saved part of the learning curve\n if self.rank == SINGLE_TRAINER:\n for stats_dict in self.checkpoint_manager.maybe_read_stats():\n index: int = stats_dict[\"index\"]\n stats: Optional[Stats] = None\n if \"stats\" in stats_dict:\n stats: Stats = Stats.from_dict(stats_dict[\"stats\"])\n eval_stats_before: Optional[Stats] = None\n if \"eval_stats_before\" in stats_dict:\n eval_stats_before = Stats.from_dict(stats_dict[\"eval_stats_before\"])\n eval_stats_after: Optional[Stats] = None\n if \"eval_stats_after\" in stats_dict:\n eval_stats_after = Stats.from_dict(stats_dict[\"eval_stats_after\"])\n eval_stats_chunk_avg: Optional[Stats] = None\n if \"eval_stats_chunk_avg\" in stats_dict:\n eval_stats_chunk_avg = Stats.from_dict(\n stats_dict[\"eval_stats_chunk_avg\"]\n )\n self.stats_handler.on_stats(\n index,\n eval_stats_before,\n stats,\n eval_stats_after,\n eval_stats_chunk_avg,\n )\n\n for epoch_idx, edge_path_idx, edge_chunk_idx in iteration_manager:\n logger.info(\n f\"Starting epoch {epoch_idx + 1} / {iteration_manager.num_epochs}, \"\n f\"edge path {edge_path_idx + 1} / {iteration_manager.num_edge_paths}, \"\n f\"edge chunk {edge_chunk_idx + 1} / {iteration_manager.num_edge_chunks}\"\n )\n edge_storage = EDGE_STORAGES.make_instance(iteration_manager.edge_path)\n logger.info(f\"Edge path: {iteration_manager.edge_path}\")\n\n self._barrier()\n dist_logger.info(\"Lock client new epoch...\")\n self.bucket_scheduler.new_pass(\n is_first=iteration_manager.iteration_idx == 0\n )\n self._barrier()\n\n remaining = total_buckets\n cur_b: Optional[Bucket] = None\n cur_stats: Optional[BucketStats] = None\n while remaining > 0:\n old_b: Optional[Bucket] = cur_b\n old_stats: Optional[BucketStats] = cur_stats\n cur_b, remaining = self.bucket_scheduler.acquire_bucket()\n logger.info(f\"still in queue: {remaining}\")\n if cur_b is None:\n cur_stats = None\n if old_b is not None:\n # if you couldn't get a new pair, release the lock\n # to prevent a deadlock!\n tic = time.perf_counter()\n release_bytes = self._swap_partitioned_embeddings(\n old_b, None, old_stats\n )\n release_time = time.perf_counter() - tic\n logger.info(\n f\"Swapping old embeddings to release lock. io: {release_time:.2f} s for {release_bytes:,} bytes \"\n f\"( {release_bytes / release_time / 1e6:.2f} MB/sec )\"\n )\n time.sleep(1) # don't hammer td\n continue\n\n tic = time.perf_counter()\n self.cur_b = cur_b\n bucket_logger = BucketLogger(logger, bucket=cur_b)\n self.bucket_logger = bucket_logger\n\n io_bytes = self._swap_partitioned_embeddings(old_b, cur_b, old_stats)\n self.model.set_all_embeddings(holder, cur_b)\n\n current_index = (\n iteration_manager.iteration_idx + 1\n ) * total_buckets - remaining\n\n bucket_logger.debug(\"Loading edges\")\n edges = edge_storage.load_chunk_of_edges(\n cur_b.lhs,\n cur_b.rhs,\n edge_chunk_idx,\n iteration_manager.num_edge_chunks,\n shared=True,\n )\n num_edges = len(edges)\n\n # this might be off in the case of tensorlist or extra edge fields\n io_bytes += edges.lhs.tensor.numel() * edges.lhs.tensor.element_size()\n io_bytes += edges.rhs.tensor.numel() * edges.rhs.tensor.element_size()\n io_bytes += edges.rel.numel() * edges.rel.element_size()\n io_time = time.perf_counter() - tic\n tic = time.perf_counter()\n bucket_logger.debug(\"Shuffling edges\")\n # Fix a seed to get the same permutation every time; have it\n # depend on all and only what affects the set of edges.\n\n # Note: for the sake of efficiency, we sample eval edge idxs\n # from the edge set *with replacement*, meaning that there may\n # be duplicates of the same edge in the eval set. When we swap\n # edges into the eval set, if there are duplicates then all\n # but one will be clobbered. These collisions are unlikely\n # if eval_fraction is small.\n #\n # Importantly, this eval sampling strategy is theoretically\n # sound:\n # * Training and eval sets are (exactly) disjoint\n # * Eval set may have (rare) duplicates, but they are\n # uniformly sampled so it's still an unbiased estimator\n # of the out-of-sample statistics\n num_eval_edges = int(num_edges * config.eval_fraction)\n num_train_edges = num_edges - num_eval_edges\n if num_eval_edges > 0:\n g = torch.Generator()\n g.manual_seed(\n hash((edge_path_idx, edge_chunk_idx, cur_b.lhs, cur_b.rhs))\n )\n eval_edge_idxs = torch.randint(\n num_edges, (num_eval_edges,), dtype=torch.long, generator=g\n )\n else:\n eval_edge_idxs = None\n\n # HOGWILD evaluation before training\n eval_stats_before = self._coordinate_eval(edges, eval_edge_idxs)\n if eval_stats_before is not None:\n bucket_logger.info(f\"Stats before training: {eval_stats_before}\")\n eval_time = time.perf_counter() - tic\n tic = time.perf_counter()\n\n # HOGWILD training\n bucket_logger.debug(\"Waiting for workers to perform training\")\n stats = self._coordinate_train(edges, eval_edge_idxs, epoch_idx)\n if stats is not None:\n bucket_logger.info(f\"Training stats: {stats}\")\n train_time = time.perf_counter() - tic\n tic = time.perf_counter()\n\n # HOGWILD evaluation after training\n eval_stats_after = self._coordinate_eval(edges, eval_edge_idxs)\n if eval_stats_after is not None:\n bucket_logger.info(f\"Stats after training: {eval_stats_after}\")\n\n eval_time += time.perf_counter() - tic\n\n bucket_logger.info(\n f\"bucket {total_buckets - remaining} / {total_buckets} : \"\n f\"Trained {num_train_edges} edges in {train_time:.2f} s \"\n f\"( {num_train_edges / train_time / 1e6:.2g} M/sec ); \"\n f\"Eval 2*{num_eval_edges} edges in {eval_time:.2f} s \"\n f\"( {2 * num_eval_edges / eval_time / 1e6:.2g} M/sec ); \"\n f\"io: {io_time:.2f} s for {io_bytes:,} bytes ( {io_bytes / io_time / 1e6:.2f} MB/sec )\"\n )\n\n self.model.clear_all_embeddings()\n\n cur_stats = BucketStats(\n lhs_partition=cur_b.lhs,\n rhs_partition=cur_b.rhs,\n index=current_index,\n train=stats,\n eval_before=eval_stats_before,\n eval_after=eval_stats_after,\n )\n\n # release the final bucket\n self._swap_partitioned_embeddings(cur_b, None, cur_stats)\n\n # Distributed Processing: all machines can leave the barrier now.\n self._barrier()\n\n current_index = (iteration_manager.iteration_idx + 1) * total_buckets - 1\n\n self._maybe_write_checkpoint(\n epoch_idx, edge_path_idx, edge_chunk_idx, current_index\n )\n\n # now we're sure that all partition files exist,\n # so be strict about loading them\n self.strict = True\n\n def close(self):\n # cleanup\n self.pool.close()\n self.pool.join()\n\n self._barrier()\n\n self.checkpoint_manager.close()\n if self.loadpath_manager is not None:\n self.loadpath_manager.close()\n\n # FIXME join distributed workers (not really necessary)\n\n logger.info(\"Exiting\")\n\n ###########################################################################\n # private functions\n ###########################################################################\n\n def _barrier(self) -> None:\n if self.barrier_group is not None:\n td.barrier(group=self.barrier_group)\n\n def _load_embeddings(\n self,\n entity: EntityName,\n part: Partition,\n out: FloatTensorType,\n strict: bool = False,\n force_dirty: bool = False,\n ) -> Tuple[torch.nn.Parameter, Optimizer]:\n if strict:\n embs, optim_state = self.checkpoint_manager.read(\n entity, part, out=out, force_dirty=force_dirty\n )\n else:\n # Strict is only false during the first iteration, because in that\n # case the checkpoint may not contain any data (unless a previous\n # run was resumed) so we fall back on initial values.\n embs, optim_state = self.checkpoint_manager.maybe_read(\n entity, part, out=out, force_dirty=force_dirty\n )\n if embs is None and self.loadpath_manager is not None:\n embs, optim_state = self.loadpath_manager.maybe_read(\n entity, part, out=out\n )\n if embs is None:\n embs = out\n fast_approx_rand(embs)\n embs.mul_(self.config.init_scale)\n optim_state = None\n embs = torch.nn.Parameter(embs)\n optimizer = make_optimizer(self.config, [embs], True)\n if optim_state is not None:\n optimizer.load_state_dict(optim_state)\n return embs, optimizer\n\n def _swap_partitioned_embeddings(\n self,\n old_b: Optional[Bucket],\n new_b: Optional[Bucket],\n old_stats: Optional[BucketStats],\n ) -> int:\n io_bytes = 0\n logger.info(f\"Swapping partitioned embeddings {old_b} {new_b}\")\n\n holder = self.holder\n old_parts: Set[Tuple[EntityName, Partition]] = set()\n if old_b is not None:\n old_parts.update((e, old_b.lhs) for e in holder.lhs_partitioned_types)\n old_parts.update((e, old_b.rhs) for e in holder.rhs_partitioned_types)\n new_parts: Set[Tuple[EntityName, Partition]] = set()\n if new_b is not None:\n new_parts.update((e, new_b.lhs) for e in holder.lhs_partitioned_types)\n new_parts.update((e, new_b.rhs) for e in holder.rhs_partitioned_types)\n\n assert old_parts == holder.partitioned_embeddings.keys()\n\n if old_b is not None:\n if old_stats is None:\n raise TypeError(\"Got old bucket but not its stats\")\n logger.info(\"Saving partitioned embeddings to checkpoint\")\n for entity, part in old_parts - new_parts:\n logger.debug(f\"Saving ({entity} {part})\")\n embs = holder.partitioned_embeddings.pop((entity, part))\n optimizer = self.trainer.partitioned_optimizers.pop((entity, part))\n self.checkpoint_manager.write(\n entity, part, embs.detach(), optimizer.state_dict()\n )\n self.embedding_storage_freelist[entity].add(embs.storage())\n io_bytes += embs.numel() * embs.element_size() # ignore optim state\n # these variables are holding large objects; let them be freed\n del embs\n del optimizer\n\n self.bucket_scheduler.release_bucket(old_b, old_stats)\n\n if new_b is not None:\n logger.info(\"Loading partitioned embeddings from checkpoint\")\n for entity, part in new_parts - old_parts:\n logger.debug(f\"Loading ({entity} {part})\")\n force_dirty = self.bucket_scheduler.check_and_set_dirty(entity, part)\n count = self.entity_counts[entity][part]\n s = self.embedding_storage_freelist[entity].pop()\n dimension = self.config.entity_dimension(entity)\n embs = torch.FloatTensor(s).view(-1, dimension)[:count]\n embs, optimizer = self._load_embeddings(\n entity, part, out=embs, strict=self.strict, force_dirty=force_dirty\n )\n holder.partitioned_embeddings[entity, part] = embs\n self.trainer.partitioned_optimizers[entity, part] = optimizer\n io_bytes += embs.numel() * embs.element_size() # ignore optim state\n\n assert new_parts == holder.partitioned_embeddings.keys()\n\n return io_bytes\n\n def _coordinate_train(self, edges, eval_edge_idxs, epoch_idx) -> Stats:\n assert self.config.num_gpus == 0, \"GPU training not supported\"\n\n if eval_edge_idxs is not None:\n num_train_edges = len(edges) - len(eval_edge_idxs)\n train_edge_idxs = torch.arange(len(edges))\n train_edge_idxs[eval_edge_idxs] = torch.arange(num_train_edges, len(edges))\n train_edge_idxs = train_edge_idxs[:num_train_edges]\n edge_perm = train_edge_idxs[torch.randperm(num_train_edges)]\n else:\n edge_perm = torch.randperm(len(edges))\n\n future_all_stats = self.pool.map_async(\n call,\n [\n partial(\n process_in_batches,\n batch_size=self.config.batch_size,\n model=self.model,\n batch_processor=self.trainer,\n edges=edges,\n indices=edge_perm[s],\n # FIXME should we only delay if iteration_idx == 0?\n delay=self.config.hogwild_delay\n if epoch_idx == 0 and self.rank > 0\n else 0,\n )\n for rank, s in enumerate(\n split_almost_equally(edge_perm.size(0), num_parts=self.num_workers)\n )\n ],\n )\n all_stats = get_async_result(future_all_stats, self.pool)\n return Stats.sum(all_stats).average()\n\n def _coordinate_eval(self, edges, eval_edge_idxs) -> Optional[Stats]:\n eval_batch_size = round_up_to_nearest_multiple(\n self.config.batch_size, self.config.eval_num_batch_negs\n )\n if eval_edge_idxs is not None:\n self.bucket_logger.debug(\"Waiting for workers to perform evaluation\")\n future_all_eval_stats = self.pool.map_async(\n call,\n [\n partial(\n process_in_batches,\n batch_size=eval_batch_size,\n model=self.model,\n batch_processor=self.evaluator,\n edges=edges,\n indices=eval_edge_idxs[s],\n )\n for s in split_almost_equally(\n eval_edge_idxs.size(0), num_parts=self.num_workers\n )\n ],\n )\n all_eval_stats = get_async_result(future_all_eval_stats, self.pool)\n return Stats.sum(all_eval_stats).average()\n else:\n return None\n\n def _maybe_write_checkpoint(\n self,\n epoch_idx: int,\n edge_path_idx: int,\n edge_chunk_idx: int,\n current_index: int,\n ) -> None:\n\n config = self.config\n\n # Preserving a checkpoint requires two steps:\n # - create a snapshot (w/ symlinks) after it's first written;\n # - don't delete it once the following one is written.\n # These two happen in two successive iterations of the main loop: the\n # one just before and the one just after the epoch boundary.\n preserve_old_checkpoint = should_preserve_old_checkpoint(\n self.iteration_manager, config.checkpoint_preservation_interval\n )\n preserve_new_checkpoint = should_preserve_old_checkpoint(\n self.iteration_manager + 1, config.checkpoint_preservation_interval\n )\n\n # Write metadata: for multiple machines, write from rank-0\n logger.info(\n f\"Finished epoch {epoch_idx + 1} / {self.iteration_manager.num_epochs}, \"\n f\"edge path {edge_path_idx + 1} / {self.iteration_manager.num_edge_paths}, \"\n f\"edge chunk {edge_chunk_idx + 1} / \"\n f\"{self.iteration_manager.num_edge_chunks}\"\n )\n if self.rank == 0:\n for entity, embs in self.holder.unpartitioned_embeddings.items():\n logger.info(f\"Writing {entity} embeddings\")\n optimizer = self.trainer.unpartitioned_optimizers[entity]\n self.checkpoint_manager.write(\n entity,\n UNPARTITIONED,\n embs.detach(),\n optimizer.state_dict(),\n unpartitioned=True,\n )\n\n logger.info(\"Writing the metadata\")\n state_dict: ModuleStateDict = self.model.state_dict()\n self.checkpoint_manager.write_model(\n state_dict, self.trainer.model_optimizer.state_dict()\n )\n\n logger.info(\"Writing the training stats\")\n all_stats_dicts: List[Dict[str, Any]] = []\n bucket_eval_stats_list = []\n chunk_stats_dict = {\n \"epoch_idx\": epoch_idx,\n \"edge_path_idx\": edge_path_idx,\n \"edge_chunk_idx\": edge_chunk_idx,\n }\n for stats in self.bucket_scheduler.get_stats_for_pass():\n stats_dict = {\n \"lhs_partition\": stats.lhs_partition,\n \"rhs_partition\": stats.rhs_partition,\n \"index\": stats.index,\n \"stats\": stats.train.to_dict(),\n }\n if stats.eval_before is not None:\n stats_dict[\"eval_stats_before\"] = stats.eval_before.to_dict()\n bucket_eval_stats_list.append(stats.eval_before)\n\n if stats.eval_after is not None:\n stats_dict[\"eval_stats_after\"] = stats.eval_after.to_dict()\n\n stats_dict.update(chunk_stats_dict)\n all_stats_dicts.append(stats_dict)\n\n if len(bucket_eval_stats_list) != 0:\n eval_stats_chunk_avg = Stats.average_list(bucket_eval_stats_list)\n self.stats_handler.on_stats(\n index=current_index, eval_stats_chunk_avg=eval_stats_chunk_avg\n )\n chunk_stats_dict[\"index\"] = current_index\n chunk_stats_dict[\n \"eval_stats_chunk_avg\"\n ] = eval_stats_chunk_avg.to_dict()\n all_stats_dicts.append(chunk_stats_dict)\n\n self.checkpoint_manager.append_stats(all_stats_dicts)\n\n logger.info(\"Writing the checkpoint\")\n self.checkpoint_manager.write_new_version(\n config, self.entity_counts, self.embedding_storage_freelist\n )\n\n dist_logger.info(\n \"Waiting for other workers to write their parts of the checkpoint\"\n )\n self._barrier()\n dist_logger.info(\"All parts of the checkpoint have been written\")\n\n logger.info(\"Switching to the new checkpoint version\")\n self.checkpoint_manager.switch_to_new_version()\n\n dist_logger.info(\n \"Waiting for other workers to switch to the new checkpoint version\"\n )\n self._barrier()\n dist_logger.info(\"All workers have switched to the new checkpoint version\")\n\n # After all the machines have finished committing\n # checkpoints, we either remove the old checkpoints\n # or we preserve it\n if preserve_new_checkpoint:\n # Add 1 so the index is a multiple of the interval, it looks nicer.\n self.checkpoint_manager.preserve_current_version(config, epoch_idx + 1)\n if not preserve_old_checkpoint:\n self.checkpoint_manager.remove_old_version(config)\n"
] | [
[
"torch.FloatTensor",
"torch.Generator",
"torch.randint",
"torch.distributed.is_available",
"torch.nn.Parameter",
"torch.distributed.barrier",
"torch.randperm"
]
] |
EricRemmerswaal/tensorflow | [
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca",
"141ff27877579c81a213fa113bd1b474c1749aca"
] | [
"tensorflow/python/ops/ragged/ragged_getitem_test.py",
"tensorflow/python/framework/sparse_tensor_test.py",
"tensorflow/python/training/tracking/resource.py",
"tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver.py",
"tensorflow/python/tpu/tpu_embedding_v2.py",
"tensorflow/python/framework/op_def_library_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for third_party.tensorflow.python.ops.ragged_tensor.\"\"\"\n\nimport re\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor\n\nfrom tensorflow.python.platform import googletest\n\n\nclass _SliceBuilder:\n \"\"\"Helper to construct arguments for __getitem__.\n\n Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>.\n \"\"\"\n\n def __getitem__(self, slice_spec):\n return slice_spec\n\n\nSLICE_BUILDER = _SliceBuilder()\n\n\ndef _make_tensor_slice_spec(slice_spec, use_constant=True):\n \"\"\"Wraps all integers in an extended slice spec w/ a tensor.\n\n This function is used to help test slicing when the slice spec contains\n tensors, rather than integers.\n\n Args:\n slice_spec: The extended slice spec.\n use_constant: If true, then wrap each integer with a tf.constant. If false,\n then wrap each integer with a tf.placeholder.\n\n Returns:\n A copy of slice_spec, but with each integer i replaced with tf.constant(i).\n \"\"\"\n\n def make_piece_scalar(piece):\n if isinstance(piece, int):\n scalar = constant_op.constant(piece)\n if use_constant:\n return scalar\n else:\n return array_ops.placeholder_with_default(scalar, [])\n elif isinstance(piece, slice):\n return slice(\n make_piece_scalar(piece.start), make_piece_scalar(piece.stop),\n make_piece_scalar(piece.step))\n else:\n return piece\n\n if isinstance(slice_spec, tuple):\n return tuple(make_piece_scalar(piece) for piece in slice_spec)\n else:\n return make_piece_scalar(slice_spec)\n\n\n# Example 2D ragged tensor value with one ragged dimension and with scalar\n# values, expressed as nested python lists and as splits+values.\nEXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],\n [b'g']]\nEXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]\nEXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n\n# Example 4D ragged tensor value, with two ragged dimensions and with values\n# whose shape is [2], expressed as nested python lists and as splits+values.\nEXAMPLE_RAGGED_TENSOR_4D = [\n [ # rt[0]\n [[1, 2], [3, 4], [5, 6]], # rt[0][0]\n [[7, 8], [9, 10], [11, 12]]], # rt[0][1]\n [], # rt[1]\n [ # rt[2]\n [[13, 14], [15, 16], [17, 18]]], # rt[2][0]\n [ # rt[3]\n [[19, 20]]] # rt[3][0]\n] # pyformat: disable\nEXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]\nEXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]\nEXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],\n [11, 12], [13, 14], [15, 16], [17, 18],\n [19, 20]]\n\n# Example 3D ragged tensor with uniform_row_lengths.\nEXAMPLE_RAGGED_TENSOR_3D = [[[1, 2, 3], [4], [5, 6]], [[], [7, 8, 9], []]]\nEXAMPLE_RAGGED_TENSOR_3D_ROWLEN = 3\nEXAMPLE_RAGGED_TENSOR_3D_SPLITS = [0, 3, 4, 6, 6, 9, 9]\nEXAMPLE_RAGGED_TENSOR_3D_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedGetItemTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name\n\n #=============================================================================\n # RaggedTensor.__getitem__\n #=============================================================================\n\n def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):\n \"\"\"Helper function for testing RaggedTensor.__getitem__.\n\n Checks that calling `rt.__getitem__(slice_spec) returns the expected value.\n Checks three different configurations for each slice spec:\n\n * Call __getitem__ with the slice spec as-is (with int values)\n * Call __getitem__ with int values in the slice spec wrapped in\n `tf.constant()`.\n * Call __getitem__ with int values in the slice spec wrapped in\n `tf.compat.v1.placeholder()` (so value is not known at graph\n construction time).\n\n Args:\n rt: The RaggedTensor to test.\n slice_spec: The slice spec.\n expected: The expected value of rt.__getitem__(slice_spec), as a python\n list; or an exception class.\n expected_shape: The expected shape for `rt.__getitem__(slice_spec)`.\n \"\"\"\n tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)\n tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)\n value1 = rt.__getitem__(slice_spec)\n value2 = rt.__getitem__(tensor_slice_spec1)\n value3 = rt.__getitem__(tensor_slice_spec2)\n self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))\n self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))\n self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))\n if expected_shape is not None:\n value1.shape.assert_is_compatible_with(expected_shape)\n value2.shape.assert_is_compatible_with(expected_shape)\n value3.shape.assert_is_compatible_with(expected_shape)\n\n def _TestGetItemException(self, rt, slice_spec, expected, message):\n \"\"\"Helper function for testing RaggedTensor.__getitem__ exceptions.\"\"\"\n tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)\n with self.assertRaisesRegex(expected, message):\n self.evaluate(rt.__getitem__(slice_spec))\n with self.assertRaisesRegex(expected, message):\n self.evaluate(rt.__getitem__(tensor_slice_spec))\n\n @parameterized.parameters(\n # Tests for rt[i]\n (SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),\n (SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),\n (SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),\n (SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),\n (SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),\n (SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),\n\n # Tests for rt[i:]\n (SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),\n (SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),\n (SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),\n (SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),\n (SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),\n (SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),\n\n # Tests for rt[:j]\n (SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),\n (SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),\n (SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),\n (SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),\n (SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),\n (SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),\n\n # Tests for rt[i:j]\n (SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),\n (SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),\n (SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),\n (SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),\n (SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),\n (SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),\n\n # Tests for rt[i, j]\n (SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),\n (SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),\n (SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),\n (SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),\n (SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),\n (SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),\n\n # Empty slice spec.\n ([], EXAMPLE_RAGGED_TENSOR_2D),\n\n # Test for ellipsis\n (SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),\n (SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),\n (SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),\n (SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),\n (SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),\n (SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),\n\n # Test for array_ops.newaxis\n (SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, array_ops.newaxis],\n [[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n\n # Slicing inner ragged dimensions.\n (SLICE_BUILDER[-1:,\n 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),\n (SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n\n # Strided slices\n (SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_2D[::2]),\n (SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_2D[::-1]),\n (SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_2D[::-2]),\n (SLICE_BUILDER[::-3], EXAMPLE_RAGGED_TENSOR_2D[::-3]),\n (SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, ::-1], [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, ::-2], [row[::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, ::-3], [row[::-3] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, 2::-1],\n [row[2::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, -1::-1],\n [row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[..., -1::-1],\n [row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[:, 2::-2],\n [row[2::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),\n (SLICE_BUILDER[::-1, ::-1],\n [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D[::-1]]),\n ) # pyformat: disable\n def testWithRaggedRank1(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n # Ragged tensor\n rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,\n EXAMPLE_RAGGED_TENSOR_2D_SPLITS)\n\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)\n self._TestGetItem(rt, slice_spec, expected)\n\n # pylint: disable=g-complex-comprehension\n @parameterized.parameters([(start, stop)\n for start in [-2, -1, None, 0, 1, 2]\n for stop in [-2, -1, None, 0, 1, 2]])\n def testWithStridedSlices(self, start, stop):\n test_value = [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10], [], [9],\n [1, 2, 3, 4, 5, 6, 7, 8]]\n rt = ragged_factory_ops.constant(test_value)\n for step in [-3, -2, -1, 1, 2, 3]:\n # Slice outer dimension\n self.assertAllEqual(rt[start:stop:step], test_value[start:stop:step],\n 'slice=%s:%s:%s' % (start, stop, step))\n # Slice inner dimension\n self.assertAllEqual(rt[:, start:stop:step],\n [row[start:stop:step] for row in test_value],\n 'slice=%s:%s:%s' % (start, stop, step))\n\n # pylint: disable=invalid-slice-index\n @parameterized.parameters(\n # Tests for out-of-bound errors\n (SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[0, 2], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n (SLICE_BUILDER[3, 0], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n\n # Indexing into an inner ragged dimension\n (SLICE_BUILDER[:, 3], ValueError,\n 'Cannot index into an inner ragged dimension'),\n (SLICE_BUILDER[:1, 3], ValueError,\n 'Cannot index into an inner ragged dimension'),\n (SLICE_BUILDER[..., 3], ValueError,\n 'Cannot index into an inner ragged dimension'),\n\n # Tests for type errors\n (SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),\n (SLICE_BUILDER[1:3:0.5], TypeError, re.escape(\n array_ops._SLICE_TYPE_ERROR)),\n (SLICE_BUILDER[:, 1:3:0.5], TypeError,\n 'slice strides must be integers or None'),\n (SLICE_BUILDER[:, 0.5:1.5], TypeError,\n 'slice offsets must be integers or None'),\n (SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),\n (SLICE_BUILDER[:, 'foo':'foo'], TypeError,\n 'slice offsets must be integers or None'),\n\n # Tests for other errors\n (SLICE_BUILDER[..., 0, 0,\n 0], IndexError, 'Too many indices for RaggedTensor'),\n )\n def testErrorsWithRaggedRank1(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n # Ragged tensor\n rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,\n EXAMPLE_RAGGED_TENSOR_2D_SPLITS)\n\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n @parameterized.parameters(\n # Tests for rt[index, index, ...]\n (SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),\n (SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),\n (SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),\n (SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),\n (SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),\n (SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),\n (SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),\n\n # Tests for rt[index, slice, ...]\n (SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),\n (SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),\n (SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),\n (SLICE_BUILDER[1, :, :, 1], []),\n (SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),\n (SLICE_BUILDER[3, :, :, 1], [[20]]),\n\n # Tests for rt[slice, slice, ...]\n (SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),\n (SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],\n [[20]]]),\n (SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),\n (SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),\n\n # Test for ellipsis\n (SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),\n (SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),\n (SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),\n (SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],\n [[19]]]),\n (SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),\n (SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),\n\n # Test for array_ops.newaxis\n (SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, array_ops.newaxis],\n [[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n\n # Empty slice spec.\n ([], EXAMPLE_RAGGED_TENSOR_4D),\n\n # Slicing inner ragged dimensions.\n (SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, :-1],\n [[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, 1:2],\n [[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[1:, 1:3, 1:2],\n [[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),\n\n # Strided slices\n (SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),\n (SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_4D[::-1]),\n (SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_4D[::-2]),\n (SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),\n (SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, ::2],\n [[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, 1::2],\n [[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, ::-1],\n [[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[:, :, ::-2],\n [[v[::-2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[..., ::-1, :],\n [[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),\n (SLICE_BUILDER[..., ::-1], [[[v[::-1] for v in col] for col in row]\n for row in EXAMPLE_RAGGED_TENSOR_4D]),\n ) # pyformat: disable\n def testWithRaggedRank2(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_nested_row_splits(\n EXAMPLE_RAGGED_TENSOR_4D_VALUES,\n [EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)\n self._TestGetItem(rt, slice_spec, expected)\n\n @parameterized.parameters(\n # Test for errors in unsupported cases\n (SLICE_BUILDER[:, 0], ValueError,\n 'Cannot index into an inner ragged dimension.'),\n (SLICE_BUILDER[:, :, 0], ValueError,\n 'Cannot index into an inner ragged dimension.'),\n\n # Test for out-of-bounds errors.\n (SLICE_BUILDER[1, 0], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n (SLICE_BUILDER[0, 0, 3],\n (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n (SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[0, 5], (IndexError, ValueError,\n errors.InvalidArgumentError), '.*out of bounds.*'),\n )\n def testErrorsWithRaggedRank2(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_nested_row_splits(\n EXAMPLE_RAGGED_TENSOR_4D_VALUES,\n [EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n @parameterized.parameters(\n (SLICE_BUILDER[:], []),\n (SLICE_BUILDER[2:], []),\n (SLICE_BUILDER[:-3], []),\n )\n def testWithEmptyTensor(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_row_splits([], [0])\n self._TestGetItem(rt, slice_spec, expected)\n\n @parameterized.parameters(\n (SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n (SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),\n '.*out of bounds.*'),\n )\n def testErrorsWithEmptyTensor(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_row_splits([], [0])\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n @parameterized.parameters(\n (SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),\n (SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),\n (SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),\n (SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),\n (SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),\n (SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),\n (SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),\n )\n def testWithPlaceholderShapes(self, slice_spec, expected):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n # Intentionally use an unknown shape for `splits`, to force the code path\n # that deals with having nrows unknown at graph construction time.\n splits = constant_op.constant(\n EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)\n splits = array_ops.placeholder_with_default(splits, None)\n rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)\n self._TestGetItem(rt, slice_spec, expected)\n\n @parameterized.parameters(\n (SLICE_BUILDER[..., 2], ValueError,\n 'Ellipsis not supported for unknown shape RaggedTensors'),)\n def testErrorsWithPlaceholderShapes(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n if not context.executing_eagerly():\n # Intentionally use an unknown shape for `values`.\n values = array_ops.placeholder_with_default([0], None)\n rt = RaggedTensor.from_row_splits(values, [0, 1])\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n def testNewAxis(self):\n # rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []]\n splits1 = [0, 3, 3]\n splits2 = [0, 2, 2, 3]\n values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])\n rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])\n rt_newaxis0 = rt[array_ops.newaxis]\n rt_newaxis1 = rt[:, array_ops.newaxis]\n rt_newaxis2 = rt[:, :, array_ops.newaxis]\n rt_newaxis3 = rt[:, :, :, array_ops.newaxis]\n rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]\n\n self.assertAllEqual(\n rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])\n self.assertAllEqual(\n rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])\n self.assertAllEqual(\n rt_newaxis1,\n [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])\n self.assertAllEqual(\n rt_newaxis2,\n [[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])\n self.assertAllEqual(\n rt_newaxis3,\n [[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])\n self.assertAllEqual(\n rt_newaxis4,\n [[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])\n\n self.assertEqual(rt.ragged_rank, 2)\n self.assertEqual(rt_newaxis0.ragged_rank, 3)\n self.assertEqual(rt_newaxis1.ragged_rank, 3)\n self.assertEqual(rt_newaxis2.ragged_rank, 3)\n self.assertEqual(rt_newaxis3.ragged_rank, 2)\n self.assertEqual(rt_newaxis4.ragged_rank, 2)\n\n self.assertEqual(rt_newaxis0.shape.as_list(), [1, 2, None, None, 2])\n self.assertEqual(rt_newaxis1.shape.as_list(), [2, 1, None, None, 2])\n self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, 1, None, 2])\n self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])\n self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])\n\n @parameterized.parameters(\n # EXAMPLE_RAGGED_TENSOR_3D.shape = [2, 3, None]\n\n # Indexing into uniform_row_splits dimension:\n (SLICE_BUILDER[:, 1], [r[1] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[:, -2], [r[-2] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[:, -3], [r[-3] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[1:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],\n [1, None]),\n (SLICE_BUILDER[:, 1, 1:], [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, None]),\n (SLICE_BUILDER[1:, 1, 1:],\n [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],\n [1, None]),\n\n # Slicing uniform_row_splits dimension:\n (SLICE_BUILDER[:, 2:], [r[2:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 1, None]),\n (SLICE_BUILDER[:, -2:], [r[-2:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 2, None]),\n (SLICE_BUILDER[:, :, 1:],\n [[c[1:] for c in r] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 3, None]),\n (SLICE_BUILDER[:, 5:], [r[5:] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 0, None]),\n\n # Slicing uniform_row_splits dimension with a non-default step size:\n (SLICE_BUILDER[:, ::2], [r[::2] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 2, None]),\n (SLICE_BUILDER[:, ::-1], [r[::-1] for r in EXAMPLE_RAGGED_TENSOR_3D],\n [2, 3, None]),\n ) # pyformat: disable\n def testWithUniformRowLength(self, slice_spec, expected, expected_shape):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_uniform_row_length(\n RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,\n EXAMPLE_RAGGED_TENSOR_3D_SPLITS),\n EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)\n self.assertIsNot(rt.uniform_row_length, None)\n self._TestGetItem(rt, slice_spec, expected, expected_shape)\n\n # If the result is 3D, then check that it still has a uniform row length:\n actual = rt.__getitem__(slice_spec) # pylint: disable=assignment-from-no-return\n if actual.shape.rank == 3:\n self.assertIsNot(actual.uniform_row_length, None)\n self.assertAllEqual(actual.uniform_row_length, expected_shape[1])\n\n @parameterized.parameters(\n (SLICE_BUILDER[:, 3], errors.InvalidArgumentError, 'out of bounds'),\n (SLICE_BUILDER[:, -4], errors.InvalidArgumentError, 'out of bounds'),\n (SLICE_BUILDER[:, 10], errors.InvalidArgumentError, 'out of bounds'),\n (SLICE_BUILDER[:, -10], errors.InvalidArgumentError, 'out of bounds'),\n )\n def testErrorsWithUniformRowLength(self, slice_spec, expected, message):\n \"\"\"Test that rt.__getitem__(slice_spec) == expected.\"\"\"\n rt = RaggedTensor.from_uniform_row_length(\n RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,\n EXAMPLE_RAGGED_TENSOR_3D_SPLITS),\n EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)\n self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)\n self._TestGetItemException(rt, slice_spec, expected, message)\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.python.framework.sparse_tensor.\"\"\"\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import googletest\n\n\nclass SparseTensorTest(test_util.TensorFlowTestCase):\n\n def testPythonConstruction(self):\n indices = [[1, 2], [2, 0], [3, 4]]\n values = [b\"a\", b\"b\", b\"c\"]\n shape = [4, 5]\n sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)\n for sp in [\n sparse_tensor.SparseTensor(indices, values, shape),\n sparse_tensor.SparseTensor.from_value(sp_value),\n sparse_tensor.SparseTensor.from_value(\n sparse_tensor.SparseTensor(indices, values, shape))]:\n self.assertEqual(sp.indices.dtype, dtypes.int64)\n self.assertEqual(sp.values.dtype, dtypes.string)\n self.assertEqual(sp.dense_shape.dtype, dtypes.int64)\n self.assertEqual(sp.get_shape(), (4, 5))\n\n value = self.evaluate(sp)\n self.assertAllEqual(indices, value.indices)\n self.assertAllEqual(values, value.values)\n self.assertAllEqual(shape, value.dense_shape)\n sp_value = self.evaluate(sp)\n self.assertAllEqual(sp_value.indices, value.indices)\n self.assertAllEqual(sp_value.values, value.values)\n self.assertAllEqual(sp_value.dense_shape, value.dense_shape)\n\n def testShape(self):\n\n @def_function.function\n def test_fn(tensor):\n tensor = sparse_ops.sparse_transpose(tensor)\n self.assertEqual(tensor.shape.rank, 2)\n return tensor\n\n tensor = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])\n test_fn(tensor)\n\n def testIsSparse(self):\n self.assertFalse(sparse_tensor.is_sparse(3))\n self.assertFalse(sparse_tensor.is_sparse(\"foo\"))\n self.assertFalse(sparse_tensor.is_sparse(np.array(3)))\n self.assertTrue(\n sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))\n self.assertTrue(\n sparse_tensor.is_sparse(\n sparse_tensor.SparseTensorValue([[0]], [0], [1])))\n\n def testConsumers(self):\n with context.graph_mode():\n sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])\n w = ops.convert_to_tensor(np.ones([4, 1], np.float32))\n out = sparse_ops.sparse_tensor_dense_matmul(sp, w)\n self.assertEqual(len(sp.consumers()), 1)\n self.assertEqual(sp.consumers()[0], out.op)\n\n dense = sparse_ops.sparse_tensor_to_dense(sp)\n self.assertEqual(len(sp.consumers()), 2)\n self.assertIn(dense.op, sp.consumers())\n self.assertIn(out.op, sp.consumers())\n\n def testWithValues(self):\n source = sparse_tensor.SparseTensor(\n indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])\n new_tensor = source.with_values([5.0, 1.0])\n self.assertAllEqual(new_tensor.indices, source.indices)\n self.assertAllEqual(new_tensor.values, [5.0, 1.0])\n self.assertAllEqual(new_tensor.dense_shape, source.dense_shape)\n\n # ensure new value's shape is checked\n with self.assertRaises((errors.InvalidArgumentError, ValueError)):\n source.with_values([[5.0, 1.0]])\n\n\nclass ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):\n\n def test_convert_dense(self):\n value = [42, 43]\n from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(\n value)\n self.assertAllEqual(value, self.evaluate(from_value))\n\n def test_convert_sparse(self):\n indices = [[0, 1], [1, 0]]\n values = [42, 43]\n shape = [2, 2]\n sparse_tensor_value = sparse_tensor.SparseTensorValue(\n indices, values, shape)\n st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)\n from_value = self.evaluate(\n sparse_tensor.convert_to_tensor_or_sparse_tensor(sparse_tensor_value))\n from_tensor = self.evaluate(\n sparse_tensor.convert_to_tensor_or_sparse_tensor(st))\n for convertee in [from_value, from_tensor]:\n self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)\n self.assertAllEqual(sparse_tensor_value.values, convertee.values)\n self.assertAllEqual(\n sparse_tensor_value.dense_shape, convertee.dense_shape)\n\n\nclass SparseTensorShapeTest(test_util.TensorFlowTestCase):\n\n def test_simple(self):\n indices = [[0, 2]]\n values = [1]\n dense_shape = [5, 5]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n\n self.assertIsInstance(sp.shape, tensor_shape.TensorShape)\n self.assertIsInstance(sp.dense_shape, ops.Tensor)\n self.assertEqual(sp.shape.as_list(), [5, 5])\n\n def test_unknown_shape(self):\n\n @def_function.function\n def my_func(dense_shape):\n indices = [[0, 2]]\n values = [1]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, None])\n return sp\n\n my_func.get_concrete_function(\n dense_shape=tensor_spec.TensorSpec(\n dtype=dtypes.int64, shape=[2,]))\n\n def test_partial_shape(self):\n\n @def_function.function\n def my_func(x):\n indices = [[0, 2]]\n values = [1]\n y = ops.convert_to_tensor(3, dtype=dtypes.int64)\n dense_shape = [x, y]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, 3])\n return sp\n\n my_func.get_concrete_function(\n x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[]))\n\n def test_neg_shape(self):\n indices = [[0, 2]]\n values = [1]\n dense_shape = [-1, 5]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, 5])\n\n def test_unknown_tensor_shape(self):\n\n @def_function.function\n def my_func(x):\n indices = [[0, 0]]\n values = [1]\n dense_shape = array_ops.shape(x)\n dense_shape = math_ops.cast(dense_shape, dtypes.int64)\n\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.as_list(), [None, None])\n return sp\n\n my_func.get_concrete_function(\n x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None, None]))\n\n def test_unknown_rank(self):\n\n @def_function.function\n def my_func(dense_shape):\n indices = [[0, 0]]\n values = [1]\n sp = sparse_tensor.SparseTensor(indices, values, dense_shape)\n self.assertEqual(sp.shape.rank, None)\n return sp\n\n my_func.get_concrete_function(\n dense_shape=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None]))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SparseTensorSpecTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertAllTensorsEqual(self, list1, list2):\n self.assertLen(list1, len(list2))\n for (t1, t2) in zip(list1, list2):\n self.assertAllEqual(t1, t2)\n\n def testConstruction(self):\n spec1 = sparse_tensor.SparseTensorSpec()\n self.assertEqual(spec1.shape.rank, None)\n self.assertEqual(spec1.dtype, dtypes.float32)\n\n spec2 = sparse_tensor.SparseTensorSpec([None, None], dtypes.string)\n self.assertEqual(spec2.shape.as_list(), [None, None])\n self.assertEqual(spec2.dtype, dtypes.string)\n\n def testValueType(self):\n spec1 = sparse_tensor.SparseTensorSpec()\n self.assertEqual(spec1.value_type, sparse_tensor.SparseTensor)\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec(),\n (tensor_shape.TensorShape(None), dtypes.float32)),\n (sparse_tensor.SparseTensorSpec(shape=[5, None, None]),\n (tensor_shape.TensorShape([5, None, None]), dtypes.float32)),\n (sparse_tensor.SparseTensorSpec(dtype=dtypes.int32),\n (tensor_shape.TensorShape(None), dtypes.int32)),\n ]) # pyformat: disable\n def testSerialize(self, st_spec, expected):\n serialization = st_spec._serialize()\n # TensorShape has an unconventional definition of equality, so we can't use\n # assertEqual directly here. But repr() is deterministic and lossless for\n # the expected values, so we can use that instead.\n self.assertEqual(repr(serialization), repr(expected))\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec(dtype=dtypes.string), [\n tensor_spec.TensorSpec([None, None], dtypes.int64),\n tensor_spec.TensorSpec([None], dtypes.string),\n tensor_spec.TensorSpec([None], dtypes.int64)\n ]),\n (sparse_tensor.SparseTensorSpec(shape=[5, None, None]), [\n tensor_spec.TensorSpec([None, 3], dtypes.int64),\n tensor_spec.TensorSpec([None], dtypes.float32),\n tensor_spec.TensorSpec([3], dtypes.int64)\n ]),\n ])\n def testComponentSpecs(self, st_spec, expected):\n self.assertEqual(st_spec._component_specs, expected)\n\n @parameterized.parameters([\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec(),\n \"indices\": [[0, 1], [10, 8]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 100]\n },\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec([100, None, None]),\n \"indices\": [[0, 1, 3], [10, 8, 2]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 20, 20]\n },\n ])\n def testToFromComponents(self, st_spec, indices, values, dense_shape):\n st = sparse_tensor.SparseTensor(indices, values, dense_shape)\n actual_components = st_spec._to_components(st)\n self.assertAllTensorsEqual(actual_components,\n [indices, values, dense_shape])\n st_reconstructed = st_spec._from_components(actual_components)\n self.assertAllEqual(st.indices, st_reconstructed.indices)\n self.assertAllEqual(st.values, st_reconstructed.values)\n self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)\n\n @test_util.run_v1_only(\"SparseTensorValue is deprecated in v2\")\n def testFromNumpyComponents(self):\n indices = np.array([[0], [8]])\n values = np.array([1.0, 9.0])\n dense_shape = np.array([100])\n spec = sparse_tensor.SparseTensorSpec()\n st = spec._from_components([indices, values, dense_shape])\n self.assertIsInstance(st, sparse_tensor.SparseTensorValue)\n self.assertAllEqual(st.indices, indices)\n self.assertAllEqual(st.values, values)\n self.assertAllEqual(st.dense_shape, dense_shape)\n\n @parameterized.parameters([\n sparse_tensor.SparseTensorSpec(dtype=dtypes.string),\n sparse_tensor.SparseTensorSpec(shape=[5, None, None]),\n ])\n def testFlatTensorSpecs(self, st_spec):\n self.assertEqual(st_spec._flat_tensor_specs,\n [tensor_spec.TensorSpec(None, dtypes.variant)])\n\n @parameterized.parameters([\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec(),\n \"indices\": [[0, 1], [10, 8]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 100]\n },\n {\n \"st_spec\": sparse_tensor.SparseTensorSpec([100, None, None]),\n \"indices\": [[0, 1, 3], [10, 8, 2]],\n \"values\": [3.0, 5.0],\n \"dense_shape\": [100, 20, 20]\n },\n ])\n def testToFromTensorList(self, st_spec, indices, values, dense_shape):\n st = sparse_tensor.SparseTensor(indices, values, dense_shape)\n tensor_list = st_spec._to_tensor_list(st)\n st_reconstructed = st_spec._from_tensor_list(tensor_list)\n self.assertAllEqual(st.indices, st_reconstructed.indices)\n self.assertAllEqual(st.values, st_reconstructed.values)\n self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec([2, None], dtypes.float32), 32,\n sparse_tensor.SparseTensorSpec([32, 2, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([4, None], dtypes.float32), None,\n sparse_tensor.SparseTensorSpec([None, 4, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([2], dtypes.float32), 32,\n sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32)),\n ])\n def testBatch(self, spec, batch_size, expected):\n self.assertEqual(spec._batch(batch_size), expected)\n\n @parameterized.parameters([\n (sparse_tensor.SparseTensorSpec([32, None, None], dtypes.float32),\n sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([None, None, None], dtypes.float32),\n sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),\n (sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32),\n sparse_tensor.SparseTensorSpec([2], dtypes.float32)),\n ])\n def testUnbatch(self, spec, expected):\n self.assertEqual(spec._unbatch(), expected)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Definitions for resource-type trackable object classes.\"\"\"\n\nimport contextlib\nimport copy\nimport weakref\n\nimport six\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n# global _RESOURCE_TRACKER_STACK\n_RESOURCE_TRACKER_STACK = []\n\n\nclass ResourceTracker(object):\n \"\"\"An object that tracks a list of resources.\"\"\"\n\n __slots__ = [\"_resources\"]\n\n def __init__(self):\n self._resources = []\n\n @property\n def resources(self):\n return self._resources\n\n def add_resource(self, resource):\n self._resources.append(resource)\n\n\n@tf_contextlib.contextmanager\ndef resource_tracker_scope(resource_tracker):\n \"\"\"A context to manage resource trackers.\n\n Use this in order to collect up all resources created within a block of code.\n Example usage:\n\n ```python\n resource_tracker = ResourceTracker()\n with resource_tracker_scope(resource_tracker):\n resource = TrackableResource()\n\n assert resource_tracker.resources == [resource]\n\n Args:\n resource_tracker: The passed in ResourceTracker object\n\n Yields:\n A scope in which the resource_tracker is active.\n \"\"\"\n global _RESOURCE_TRACKER_STACK\n old = list(_RESOURCE_TRACKER_STACK)\n _RESOURCE_TRACKER_STACK.append(resource_tracker)\n try:\n yield\n finally:\n _RESOURCE_TRACKER_STACK = old\n\n\ndef _make_getter(captured_getter, captured_previous):\n \"\"\"To avoid capturing loop variables.\"\"\"\n\n def getter(*args, **kwargs):\n return captured_getter(captured_previous, *args, **kwargs)\n\n return getter\n\n\nclass _ResourceMetaclass(type):\n \"\"\"Metaclass for CapturableResource.\"\"\"\n\n def __call__(cls, *args, **kwargs):\n\n def default_resource_creator(next_creator, *a, **kw):\n assert next_creator is None\n obj = cls.__new__(cls, *a, **kw)\n obj.__init__(*a, **kw)\n return obj\n\n previous_getter = lambda *a, **kw: default_resource_creator(None, *a, **kw)\n resource_creator_stack = ops.get_default_graph()._resource_creator_stack\n for getter in resource_creator_stack[cls._resource_type()]:\n previous_getter = _make_getter(getter, previous_getter)\n\n return previous_getter(*args, **kwargs)\n\n\nclass CapturableResource(six.with_metaclass(_ResourceMetaclass,\n base.Trackable)):\n \"\"\"Holds a Tensor which a tf.function can capture.\n\n `CapturableResource`s are discovered by traversing the graph of object\n attributes, e.g. during `tf.saved_model.save`. They are excluded from the\n scope-based tracking of `TrackableResource`; generally things that require\n initialization should inherit from `TrackableResource` instead of\n `CapturableResource` directly.\n \"\"\"\n\n def __init__(self, device=\"\"):\n \"\"\"Initialize the `CapturableResource`.\n\n Args:\n device: A string indicating a required placement for this resource,\n e.g. \"CPU\" if this resource must be created on a CPU device. A blank\n device allows the user to place resource creation, so generally this\n should be blank unless the resource only makes sense on one device.\n \"\"\"\n self._resource_handle_value = None\n self._resource_device = device\n self._self_destruction_context = (\n context.eager_mode if context.executing_eagerly()\n else ops.get_default_graph().as_default)\n\n @classmethod\n def _resource_type(cls):\n return cls.__name__\n\n @property\n def _destruction_context(self):\n return getattr(self, \"_self_destruction_context\",\n # no-op context\n contextlib.suppress)\n\n @_destruction_context.setter\n def _destruction_context(self, destruction_context):\n self._self_destruction_context = destruction_context\n\n def _create_resource(self):\n \"\"\"A function that creates a resource handle.\"\"\"\n raise NotImplementedError(\"TrackableResource._create_resource not \"\n \"implemented.\")\n\n @property\n def _resource_handle(self):\n return self._resource_handle_value\n\n @_resource_handle.setter\n def _resource_handle(self, value):\n if isinstance(value, (ops.Tensor, ops.EagerTensor)):\n value._parent_trackable = weakref.ref(self) # pylint: disable=protected-access\n self._resource_handle_value = value\n\n def _initialize(self):\n \"\"\"A function that initializes the resource. Optional.\"\"\"\n pass\n\n def _destroy_resource(self):\n \"\"\"A function that destroys the resource. Optional.\"\"\"\n pass\n\n @property\n def resource_handle(self):\n \"\"\"Returns the resource handle associated with this Resource.\"\"\"\n if self._resource_handle is None:\n with ops.device(self._resource_device):\n self._resource_handle = self._create_resource()\n return self._resource_handle\n\n def _map_resources(self, _):\n \"\"\"For implementing `Trackable`.\"\"\"\n new_obj = copy.copy(self)\n # pylint: disable=protected-access\n with ops.device(self._resource_device):\n new_resource = new_obj._create_resource()\n new_obj._resource_handle = new_resource\n # pylint: enable=protected-access\n obj_map = {self: new_obj}\n resource_map = {self.resource_handle: new_resource}\n return obj_map, resource_map\n\n def _trackable_children(self, save_type, **kwargs):\n children = super()._trackable_children(save_type, **kwargs)\n if save_type == \"savedmodel\":\n @def_function.function(input_signature=[], autograph=False)\n def _creator():\n resource = self._create_resource()\n return resource\n\n @def_function.function(input_signature=[], autograph=False)\n def _initializer():\n self._initialize()\n return 1 # Dummy return\n\n @def_function.function(input_signature=[], autograph=False)\n def _destroyer():\n self._destroy_resource()\n return 1 # Dummy return\n\n children.update({\n \"_create_resource\": _creator,\n \"_initialize\": _initializer,\n \"_destroy_resource\": _destroyer,\n })\n return children\n\n def __del__(self):\n try:\n # Outer race condition: on program exit, the destruction context may be\n # deleted before this __del__ is called. At this point we can safely\n # exit without calling _destroy_resource() and let Python handle things.\n with self._destruction_context():\n # Inner race condition: possible between this and `ScopedTFFunction`\n # whereby if an entire garbage collection chain containing both\n # objects is moved to unreachable during the same garbage collection\n # cycle, the __del__ for `ScopedTFFunction` can be collected before\n # this method is called. In that case, we can't do much but\n # continue.\n self._destroy_resource()\n except Exception: # pylint: disable=broad-except\n # Silence all error logs that occur when attempting to destroy this\n # resource.\n pass\n\n\n@tf_export(\"saved_model.experimental.TrackableResource\")\nclass TrackableResource(CapturableResource):\n \"\"\"Holds a Tensor which a tf.function can capture.\n\n A TrackableResource is most useful for stateful Tensors that require\n initialization, such as `tf.lookup.StaticHashTable`. `TrackableResource`s\n are discovered by traversing the graph of object attributes, e.g. during\n `tf.saved_model.save`.\n\n A TrackableResource has three methods to override:\n\n * `_create_resource` should create the resource tensor handle.\n * `_initialize` should initialize the resource held at `self.resource_handle`.\n * `_destroy_resource` is called upon a `TrackableResource`'s destruction\n and should decrement the resource's ref count. For most resources, this\n should be done with a call to `tf.raw_ops.DestroyResourceOp`.\n\n Example usage:\n\n >>> class DemoResource(tf.saved_model.experimental.TrackableResource):\n ... def __init__(self):\n ... super().__init__()\n ... self._initialize()\n ... def _create_resource(self):\n ... return tf.raw_ops.VarHandleOp(dtype=tf.float32, shape=[2])\n ... def _initialize(self):\n ... tf.raw_ops.AssignVariableOp(\n ... resource=self.resource_handle, value=tf.ones([2]))\n ... def _destroy_resource(self):\n ... tf.raw_ops.DestroyResourceOp(resource=self.resource_handle)\n >>> class DemoModule(tf.Module):\n ... def __init__(self):\n ... self.resource = DemoResource()\n ... def increment(self, tensor):\n ... return tensor + tf.raw_ops.ReadVariableOp(\n ... resource=self.resource.resource_handle, dtype=tf.float32)\n >>> demo = DemoModule()\n >>> demo.increment([5, 1])\n <tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 2.], dtype=float32)>\n \"\"\"\n\n def __init__(self, device=\"\"):\n \"\"\"Initialize the `TrackableResource`.\n\n Args:\n device: A string indicating a required placement for this resource,\n e.g. \"CPU\" if this resource must be created on a CPU device. A blank\n device allows the user to place resource creation, so generally this\n should be blank unless the resource only makes sense on one device.\n \"\"\"\n global _RESOURCE_TRACKER_STACK\n for resource_tracker in _RESOURCE_TRACKER_STACK:\n resource_tracker.add_resource(self)\n super(TrackableResource, self).__init__(device=device)\n\n\n# TODO(b/124205571,b/124092991): Solve destruction of resources.\nclass RestoredResource(TrackableResource):\n \"\"\"Restored SavedResource.\"\"\"\n\n def __init__(self, device=\"\"):\n super(RestoredResource, self).__init__(device=device)\n\n @classmethod\n def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs):\n obj = cls(device=object_proto.resource.device)\n resource_creator = dependencies.get(\"_create_resource\")\n if resource_creator is not None:\n obj._create_resource = resource_creator # pylint: disable=protected-access\n return obj\n\n def _add_trackable_child(self, name, value):\n setattr(self, name, value)\n if (isinstance(value, base.Trackable) and\n not isinstance(value, def_function.Function)):\n self._track_trackable(value, name)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of Cluster Resolvers for Kubernetes.\"\"\"\n\nfrom tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('distribute.cluster_resolver.KubernetesClusterResolver')\nclass KubernetesClusterResolver(ClusterResolver):\n \"\"\"ClusterResolver for Kubernetes.\n\n This is an implementation of cluster resolvers for Kubernetes. When given the\n the Kubernetes namespace and label selector for pods, we will retrieve the\n pod IP addresses of all running pods matching the selector, and return a\n ClusterSpec based on that information.\n\n Note: it cannot retrieve `task_type`, `task_id` or `rpc_layer`. To use it\n with some distribution strategies like\n `tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to\n specify `task_type` and `task_id` by setting these attributes.\n\n Usage example with tf.distribute.Strategy:\n\n ```Python\n # On worker 0\n cluster_resolver = KubernetesClusterResolver(\n {\"worker\": [\"job-name=worker-cluster-a\", \"job-name=worker-cluster-b\"]})\n cluster_resolver.task_type = \"worker\"\n cluster_resolver.task_id = 0\n strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n cluster_resolver=cluster_resolver)\n\n # On worker 1\n cluster_resolver = KubernetesClusterResolver(\n {\"worker\": [\"job-name=worker-cluster-a\", \"job-name=worker-cluster-b\"]})\n cluster_resolver.task_type = \"worker\"\n cluster_resolver.task_id = 1\n strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n cluster_resolver=cluster_resolver)\n ```\n \"\"\"\n\n def __init__(self,\n job_to_label_mapping=None,\n tf_server_port=8470,\n rpc_layer='grpc',\n override_client=None):\n \"\"\"Initializes a new KubernetesClusterResolver.\n\n This initializes a new Kubernetes ClusterResolver. The ClusterResolver\n will attempt to talk to the Kubernetes master to retrieve all the instances\n of pods matching a label selector.\n\n Args:\n job_to_label_mapping: A mapping of TensorFlow jobs to label selectors.\n This allows users to specify many TensorFlow jobs in one Cluster\n Resolver, and each job can have pods belong with different label\n selectors. For example, a sample mapping might be\n ```\n {'worker': ['job-name=worker-cluster-a', 'job-name=worker-cluster-b'],\n 'ps': ['job-name=ps-1', 'job-name=ps-2']}\n ```\n tf_server_port: The port the TensorFlow server is listening on.\n rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate\n between tasks in Kubernetes. Defaults to 'grpc'.\n override_client: The Kubernetes client (usually automatically retrieved\n using `from kubernetes import client as k8sclient`). If you pass this\n in, you are responsible for setting Kubernetes credentials manually.\n\n Raises:\n ImportError: If the Kubernetes Python client is not installed and no\n `override_client` is passed in.\n RuntimeError: If autoresolve_task is not a boolean or a callable.\n \"\"\"\n try:\n from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top\n\n k8sconfig.load_kube_config()\n except ImportError:\n if not override_client:\n raise ImportError('The Kubernetes Python client must be installed '\n 'before using the Kubernetes Cluster Resolver. '\n 'To install the Kubernetes Python client, run '\n '`pip install kubernetes` on your command line.')\n\n if not job_to_label_mapping:\n job_to_label_mapping = {'worker': ['job-name=tensorflow']}\n\n self._job_to_label_mapping = job_to_label_mapping\n self._tf_server_port = tf_server_port\n self._override_client = override_client\n\n self.task_type = None\n self.task_id = None\n self.rpc_layer = rpc_layer\n\n def master(self, task_type=None, task_id=None, rpc_layer=None):\n \"\"\"Returns the master address to use when creating a session.\n\n You must have set the task_type and task_id object properties before\n calling this function, or pass in the `task_type` and `task_id`\n parameters when using this function. If you do both, the function parameters\n will override the object properties.\n\n Note: this is only useful for TensorFlow 1.x.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC protocol for the given cluster.\n\n Returns:\n The name or URL of the session master.\n \"\"\"\n task_type = task_type if task_type is not None else self.task_type\n task_id = task_id if task_id is not None else self.task_id\n\n if task_type is not None and task_id is not None:\n return format_master_url(\n self.cluster_spec().task_address(task_type, task_id),\n rpc_layer or self.rpc_layer)\n\n return ''\n\n def cluster_spec(self):\n \"\"\"Returns a ClusterSpec object based on the latest info from Kubernetes.\n\n We retrieve the information from the Kubernetes master every time this\n method is called.\n\n Returns:\n A ClusterSpec containing host information returned from Kubernetes.\n\n Raises:\n RuntimeError: If any of the pods returned by the master is not in the\n `Running` phase.\n \"\"\"\n if self._override_client:\n client = self._override_client\n else:\n from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top\n from kubernetes import client as k8sclient # pylint: disable=g-import-not-at-top\n\n k8sconfig.load_kube_config()\n client = k8sclient.CoreV1Api()\n\n cluster_map = {}\n\n for tf_job in self._job_to_label_mapping:\n all_pods = []\n for selector in self._job_to_label_mapping[tf_job]:\n ret = client.list_pod_for_all_namespaces(label_selector=selector)\n selected_pods = []\n\n # Sort the list by the name to make sure it doesn't change call to call.\n for pod in sorted(ret.items, key=lambda x: x.metadata.name):\n if pod.status.phase == 'Running':\n selected_pods.append(\n '%s:%s' % (pod.status.host_ip, self._tf_server_port))\n else:\n raise RuntimeError('Pod \"%s\" is not running; phase: \"%s\"' %\n (pod.metadata.name, pod.status.phase))\n all_pods.extend(selected_pods)\n cluster_map[tf_job] = all_pods\n\n return server_lib.ClusterSpec(cluster_map)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Mid level API for TPU Embeddings.\"\"\"\n\nimport functools\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Text, Tuple, Union\n\nfrom absl import logging\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework.tensor_shape import TensorShape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.tpu import tpu\nfrom tensorflow.python.tpu import tpu_embedding_v2_utils\nfrom tensorflow.python.tpu.ops import tpu_ops\nfrom tensorflow.python.training.saving import saveable_hook\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.types import core\nfrom tensorflow.python.types import internal as internal_types\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_HOOK_KEY = \"TPUEmbedding_saveable\"\n_NAME_KEY = \"_tpu_embedding_layer\"\n\n\nclass TPUShardedVariable(sharded_variable.ShardedVariableMixin):\n \"\"\"A ShardedVariable class for TPU.\"\"\"\n\n @property\n def _in_graph_mode(self):\n return self.variables[0]._in_graph_mode # pylint: disable=protected-access\n\n\ndef _add_key_attr(op, name):\n op._set_attr(_NAME_KEY, attr_value_pb2.AttrValue(s=compat.as_bytes(name))) # pylint: disable=protected-access\n\n\n@tf_export(\"tpu.experimental.embedding.TPUEmbedding\")\nclass TPUEmbedding(tracking.AutoTrackable):\n \"\"\"The TPUEmbedding mid level API.\n\n NOTE: When instantiated under a TPUStrategy, this class can only be created\n once per call to `tf.tpu.experimental.initialize_tpu_system`. If you wish to\n re-initialize the embedding engine you must re-initialize the tpu as well.\n Doing this will clear any variables from TPU, so ensure you have checkpointed\n before you do this. If a further instances of the class are needed,\n set the `initialize_tpu_embedding` argument to `False`.\n\n This class can be used to support training large embeddings on TPU. When\n creating an instance of this class, you must specify the complete set of\n tables and features you expect to lookup in those tables. See the\n documentation of `tf.tpu.experimental.embedding.TableConfig` and\n `tf.tpu.experimental.embedding.FeatureConfig` for more details on the complete\n set of options. We will cover the basic usage here.\n\n NOTE: multiple `FeatureConfig` objects can use the same `TableConfig` object,\n allowing different features to share the same table:\n\n ```python\n table_config_one = tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=...,\n dim=...)\n table_config_two = tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=...,\n dim=...)\n feature_config = {\n 'feature_one': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_one),\n 'feature_two': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_one),\n 'feature_three': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_two)}\n ```\n\n There are two modes under which the `TPUEmbedding` class can used. This\n depends on if the class was created under a `TPUStrategy` scope or not.\n\n Under `TPUStrategy`, we allow access to the method `enqueue`, `dequeue` and\n `apply_gradients`. We will show examples below of how to use these to train\n and evaluate your model. Under CPU, we only access to the `embedding_tables`\n property which allow access to the embedding tables so that you can use them\n to run model evaluation/prediction on CPU.\n\n First lets look at the `TPUStrategy` mode. Initial setup looks like:\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n ```\n\n When creating a distributed dataset that is to be passed to the enqueue\n operation a special input option must be specified:\n\n ```python\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n ```\n\n Different feature inputs can have different shapes. For dense and sparse\n tensor, rank 2 and above is supported. For ragged tensor, although only rank 2\n is supported, you can specify the output shape to be rank 2 and above. The\n output shape specified in the FeatureConfig has the first priority. The input\n shape passed in build method has second priority and the input shapes\n auto detected from input feature has the lowest priority. The latter two will\n be converted to output shapes by omitting the last dimension. If the lower\n priority one has output shapes which don't match the former one. A ValueError\n will be raised. Only when the former one has undefined output shapes, the\n latter one can override.\n\n NOTE: All batches passed to the layer can have different input shapes. But\n these input shapes need to match with the output shapes set by either\n `FeatureConfig` or build method except for ragged tensor. Only 2D\n ragged tensor with output shape set to higher dimensions is allowed as\n long as the total number of elements matches. All subsequent calls must have\n the same input shapes. In the event that the input shapes cannot be\n automatically determined by the enqueue method, you must call\n the build method with the input shapes or provide output shapes in the\n `FeatureConfig` to initialize the layer.\n\n To use this API on TPU you should use a custom training loop. Below is an\n example of a training and evaluation step:\n\n ```python\n @tf.function\n def training_step(dataset_iterator, num_steps):\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n model_output = model(activations)\n loss = ... # some function of labels and model_output\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n # Insert your model gradient and optimizer application here\n\n for _ in tf.range(num_steps):\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features, ))\n\n @tf.function\n def evalution_step(dataset_iterator, num_steps):\n def tpu_step(tpu_features):\n activations = embedding.dequeue()\n model_output = model(activations)\n # Insert your evaluation code here.\n\n for _ in tf.range(num_steps):\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=False)\n strategy.run(tpu_step, args=(tpu_features, ))\n ```\n\n NOTE: The calls to `enqueue` have `training` set to `True` when\n `embedding.apply_gradients` is used and set to `False` when\n `embedding.apply_gradients` is not present in the function. If you don't\n follow this pattern you may cause an error to be raised or the tpu may\n deadlock.\n\n In the above examples, we assume that the user has a dataset which returns\n a tuple where the first element of the tuple matches the structure of what\n was passed as the `feature_config` argument to the object initializer. Also we\n utilize `tf.range` to get a `tf.while_loop` in order to increase performance.\n\n When checkpointing your model, you should include your\n `tf.tpu.experimental.embedding.TPUEmbedding` object in the checkpoint. It is a\n trackable object and saving it will save the embedding tables and their\n optimizer slot variables:\n\n ```python\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.save(...)\n ```\n\n On CPU, only the `embedding_table` property is usable. This will allow you to\n restore a checkpoint to the object and have access to the table variables:\n\n ```python\n model = model_fn(...)\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.restore(...)\n\n tables = embedding.embedding_tables\n ```\n\n You can now use table in functions like `tf.nn.embedding_lookup` to perform\n your embedding lookup and pass to your model.\n\n \"\"\"\n\n def __init__(\n self,\n feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic\n optimizer: Optional[tpu_embedding_v2_utils._Optimizer], # pylint:disable=protected-access\n pipeline_execution_with_tensor_core: bool = False):\n \"\"\"Creates the TPUEmbedding mid level API object.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=tf.tpu.experimental.embedding.FeatureConfig(\n table=tf.tpu.experimental.embedding.TableConfig(\n dim=...,\n vocabulary_size=...)))\n ```\n\n Args:\n feature_config: A nested structure of\n `tf.tpu.experimental.embedding.FeatureConfig` configs.\n optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,\n `tf.tpu.experimental.embedding.Adagrad` or\n `tf.tpu.experimental.embedding.Adam`. When not created under\n TPUStrategy may be set to None to avoid the creation of the optimizer\n slot variables, useful for optimizing memory consumption when exporting\n the model for serving where slot variables aren't needed.\n pipeline_execution_with_tensor_core: If True, the TPU embedding\n computations will overlap with the TensorCore computations (and hence\n will be one step old). Set to True for improved performance.\n\n Raises:\n ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD,\n Adam or Adagrad) or None when created under a TPUStrategy.\n \"\"\"\n self._strategy = distribution_strategy_context.get_strategy()\n self._using_tpu = isinstance(self._strategy, (tpu_strategy.TPUStrategy,\n tpu_strategy.TPUStrategyV2))\n self._pipeline_execution_with_tensor_core = (\n pipeline_execution_with_tensor_core)\n\n self._feature_config = feature_config\n self._output_shapes = []\n for feature in nest.flatten(feature_config):\n self._output_shapes.append(feature.output_shape)\n\n # The TPU embedding ops are slightly inconsistent with how they refer to\n # tables:\n # * The enqueue op takes a parallel list of tensors for input, one of those\n # is the table id for the feature which matches the integer index of the\n # table in the proto created by _create_config_proto().\n # * The recv_tpu_embedding_activations op emits lookups per table in the\n # order from the config proto.\n # * The send_tpu_embedding_gradients expects input tensors to be per table\n # in the same order as the config proto.\n # * Per optimizer load and retrieve ops are specified per table and take the\n # table name rather than the table id.\n # Thus we must fix a common order to tables and ensure they have unique\n # names.\n\n # Set table order here to the order of the first occurence of the table in a\n # feature provided by the user. The order of this struct must be fixed\n # to provide the user with deterministic behavior over multiple\n # instantiations.\n self._table_config = []\n for feature in nest.flatten(feature_config):\n if feature.table not in self._table_config:\n self._table_config.append(feature.table)\n\n # Ensure tables have unique names. Also error check the optimizer as we\n # specifically don't do that in the TableConfig class to allow high level\n # APIs that are built on this to use strings/other classes to represent\n # optimizers (before they are passed to this class).\n table_names = []\n for i, table in enumerate(self._table_config):\n if table.optimizer is None:\n # TODO(bfontain) Should we allow some sort of optimizer merging here?\n table.optimizer = optimizer\n if ((table.optimizer is not None or self._using_tpu) and\n not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)): # pylint: disable=protected-access\n raise ValueError(\"{} is an unsupported optimizer class. Please pass an \"\n \"instance of one of the optimizer classes under \"\n \"tf.tpu.experimental.embedding.\".format(\n type(table.optimizer)))\n if table.name is None:\n table.name = \"table_{}\".format(i)\n if table.name in table_names:\n raise ValueError(\"Tables must have a unique name. \"\n f\"Multiple tables with name {table.name} found.\")\n table_names.append(table.name)\n\n if self._using_tpu:\n # Extract a list of callable learning rates also in fixed order. Each\n # table in the confix proto will get a index into this list and we will\n # pass this list in the same order after evaluation to the\n # send_tpu_embedding_gradients op.\n self._dynamic_learning_rates = list({\n table.optimizer.learning_rate for table in self._table_config if\n callable(table.optimizer.learning_rate)})\n\n # We need to list of host devices for the load/retrieve operations.\n self._hosts = get_list_of_hosts(self._strategy)\n\n self._built = False\n self._verify_output_shapes_on_enqueue = True\n\n def build(self, per_replica_input_shapes=None, per_replica_batch_size=None): # pylint:disable=g-bare-generic\n \"\"\"Create the underlying variables and initializes the TPU for embeddings.\n\n This method creates the underlying variables (including slot variables). If\n created under a TPUStrategy, this will also initialize the TPU for\n embeddings.\n\n This function will automatically get called by enqueue, which will try to\n determine your output shapes. If this fails, you must manually\n call this method before you call enqueue.\n\n Args:\n per_replica_input_shapes: A nested structure of The per replica input\n shapes that matches the structure of the feature config. The input\n shapes should be the same as the input shape of the feature (except for\n ragged tensor) Note that it is fixed and the same per replica input\n shapes must be used for both training and evaluation. If you want to\n calculate this from the global input shapes, you can use\n `num_replicas_in_sync` property of your strategy object. May be set to\n None if not created under a TPUStrategy.\n per_replica_batch_size: (Deprecated) The per replica batch size that you\n intend to use. Note that is fixed and the same batch size must be used\n for both training and evaluation. If you want to calculate this from the\n global batch size, you can use `num_replicas_in_sync` property of your\n strategy object. May be set to None if not created under a TPUStrategy.\n\n Raises:\n ValueError: If per_replica_input_shapes is inconsistent with the output\n shapes stored in the feature config or the output shapes get from the\n input shapes are not fully defined.\n RuntimeError: If tpu embedding is already initialized on TPU.\n \"\"\"\n if self._built:\n return\n\n if self._using_tpu:\n # If the tpu embedding is already initialized on TPU, raise runtime error.\n # Below logic is not added in `initialize_system_for_tpu_embedding`\n # because doing exception control flow in graph mode is difficult.\n if tpu_ops.is_tpu_embedding_initialized():\n raise RuntimeError(\n \"TPU is already initialized for embeddings. This may be caused by \"\n \"using multiple TPUEmbedding instances in a TPU scope which is \"\n \"unsupported\")\n self._get_and_update_output_shapes_from_input(per_replica_input_shapes,\n per_replica_batch_size)\n\n self._config_proto = self._create_config_proto()\n\n logging.info(\"Initializing TPU Embedding engine.\")\n tpu_embedding_v2_utils.log_tpu_embedding_configuration(self._config_proto)\n\n @def_function.function\n def load_config():\n tpu.initialize_system_for_tpu_embedding(self._config_proto)\n\n load_config()\n logging.info(\"Done initializing TPU Embedding engine.\")\n\n # Create and load variables and slot variables into the TPU.\n # Note that this is a dict of dicts. Keys to the first dict are table names.\n # We would prefer to use TableConfigs, but then these variables won't be\n # properly tracked by the tracking API.\n self._variables = self._create_variables_and_slots()\n\n self._built = True\n\n # This is internally conditioned self._built and self._using_tpu\n self._load_variables()\n\n def _maybe_build(self,\n output_shapes: Optional[Union[List[int], Iterable]] = None): # pylint:disable=g-bare-generic\n if not self._built:\n # This can be called while tracing a function, so we wrap the\n # initialization code with init_scope so it runs eagerly, this means that\n # it will not be included the function graph generated by tracing so that\n # we can be sure that we only initialize the TPU for embeddings exactly\n # once.\n with ops.init_scope():\n self.build(output_shapes)\n\n def _get_and_update_output_shapes_from_input(\n self,\n per_replica_input_shapes: Optional[List[TensorShape]] = None,\n per_replica_batch_size: Optional[int] = None):\n \"\"\"Get and update the per replica output shapes from the input.\"\"\"\n per_replica_output_shapes = None\n if per_replica_batch_size and per_replica_input_shapes is None:\n logging.warning(\n \"per_replica_batch_size argument will be deprecated, please specify \"\n \"all the input shapes using per_replica_input_shapes argument.\")\n per_replica_output_shapes = self._get_output_shapes_from_batch_size(\n per_replica_batch_size)\n\n # Update the input shapes if provided.\n if per_replica_input_shapes is not None:\n if isinstance(per_replica_input_shapes, int):\n logging.warning(\n \"Passing batch size to per_replica_input_shapes argument will be\"\n \" deprecated, please specify all the input shapes using\"\n \" per_replica_input_shapes argument.\")\n per_replica_output_shapes = self._get_output_shapes_from_batch_size(\n per_replica_input_shapes)\n else:\n nest.assert_same_structure(\n nest.flatten(per_replica_input_shapes),\n nest.flatten(self._feature_config))\n\n # Convert the nested structure to list.\n per_replica_input_shapes = nest.flatten(per_replica_input_shapes)\n\n per_replica_output_shapes = self._get_output_shapes_from_input_shapes(\n per_replica_input_shapes)\n\n if per_replica_output_shapes is not None:\n\n # Check the output shapes with existing output shapes setting.\n self._check_output_shapes(per_replica_output_shapes)\n\n # Update the output shapes with existing output shapes setting.\n # This is necessary Because the output shapes might be missing from\n # the feature config, the usr can set it:\n # 1. calling the build method\n # 2. output shapes auto detected when calling the dequeue method for\n # for the first time. The dequeue method will call build method\n # with the output shapes.\n # Either these two situations will lead to an update to the existing\n # output shapes.\n self._update_output_shapes(per_replica_output_shapes)\n\n # Check if the output shapes are fully defined. This is required in order\n # to set them in the feature descriptor field of the tpu embedding config\n # proto.\n self._check_output_shapes_fully_defined()\n\n def _get_output_shapes_from_input_shapes(\n self, input_shapes: List[TensorShape]) -> List[TensorShape]:\n \"\"\"Get output shapes from the flattened input shapes list.\"\"\"\n output_shapes = []\n for input_shape, feature in zip(input_shapes,\n nest.flatten(self._feature_config)):\n if input_shape.rank is None or input_shape.rank < 1:\n raise ValueError(\n \"Received input tensor of shape {}. Rank must be 1 and above\"\n .format(input_shape))\n # Update the input shape with the max sequence length. Only update when\n # 1. Input feature is 2D ragged or sparse tensor.\n # 2. Output shape is not set in the feature config and the max sequence\n # length is set.\n if (len(input_shape) == 2 and input_shape[-1] != 1 and\n not feature.output_shape and feature.max_sequence_length > 0):\n input_shape_list = input_shape.as_list()\n input_shape_list.insert(\n len(input_shape_list) - 1, feature.max_sequence_length)\n input_shape = TensorShape(input_shape_list)\n if input_shape.rank == 1:\n output_shapes.append(input_shape)\n else:\n output_shapes.append(input_shape[:-1])\n return output_shapes\n\n @property\n def embedding_tables(\n self\n ) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n \"\"\"Returns a dict of embedding tables, keyed by `TableConfig`.\n\n This property only works when the `TPUEmbedding` object is created under a\n non-TPU strategy. This is intended to be used to for CPU based lookup when\n creating a serving checkpoint.\n\n Returns:\n A dict of embedding tables, keyed by `TableConfig`.\n\n Raises:\n RuntimeError: If object was created under a `TPUStrategy`.\n \"\"\"\n # We don't support returning tables on TPU due to their sharded nature and\n # the fact that when using a TPUStrategy:\n # 1. Variables are stale and are only updated when a checkpoint is made.\n # 2. Updating the variables won't affect the actual tables on the TPU.\n if self._using_tpu:\n if save_context.in_save_context():\n return {table: self._variables[table.name][\"parameters\"].variables[0]\n for table in self._table_config}\n raise RuntimeError(\"Unable to retrieve embedding tables when using a TPU \"\n \"strategy. If you need access, save your model, \"\n \"create this object under a CPU strategy and restore.\")\n\n self._maybe_build(None)\n\n # Only return the tables and not the slot variables. On CPU this are honest\n # tf.Variables.\n return {table: self._variables[table.name][\"parameters\"]\n for table in self._table_config}\n\n def _create_config_proto(\n self\n ) -> tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration:\n \"\"\"Creates the TPUEmbeddingConfiguration proto.\n\n This proto is used to initialize the TPU embedding engine.\n\n Returns:\n A TPUEmbeddingConfiguration proto.\n \"\"\"\n\n config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()\n\n # Map each callable dynamic learning rate to its in index in the list.\n # The learning rate index is the index of the dynamic learning rate for this\n # table (if it exists) in the list we created at initialization. We don't\n # simply create one learning rate index per table as this has extremely bad\n # performance characteristics. The more separate optimization configurations\n # we have, the worse the performance will be.\n learning_rate_index = {r: i for i, r in enumerate(\n self._dynamic_learning_rates)}\n\n for table in self._table_config:\n table_descriptor = config_proto.table_descriptor.add()\n table_descriptor.name = table.name\n\n # For small tables, we pad to the number of hosts so that at least one\n # id will be assigned to each host.\n table_descriptor.vocabulary_size = max(table.vocabulary_size,\n self._strategy.extended.num_hosts)\n table_descriptor.dimension = table.dim\n\n parameters = table_descriptor.optimization_parameters\n\n # We handle the learning rate separately here and don't allow the\n # optimization class to handle this, as it doesn't know about dynamic\n # rates.\n if callable(table.optimizer.learning_rate):\n parameters.learning_rate.dynamic.tag = (\n learning_rate_index[table.optimizer.learning_rate])\n else:\n parameters.learning_rate.constant = table.optimizer.learning_rate\n\n # Use optimizer to handle the rest of the parameters.\n table.optimizer._set_optimization_parameters(parameters) # pylint: disable=protected-access\n\n table_to_id = {table: i for i, table in enumerate(self._table_config)}\n\n # Set feature descriptor field in the config proto.\n for feature, output_shape in zip(\n nest.flatten(self._feature_config), self._output_shapes):\n feature_descriptor = config_proto.feature_descriptor.add()\n\n if feature.name:\n feature_descriptor.name = feature.name\n\n feature_descriptor.table_id = table_to_id[feature.table]\n # The input shape of the feature is the actual shape of the input tensor\n # except the last dimension because the last dimension will always be\n # reduced.\n feature_descriptor.input_shape.extend(output_shape.as_list())\n\n # Always set mode to training, we override the mode during enqueue.\n config_proto.mode = (\n tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING)\n\n config_proto.num_hosts = self._strategy.extended.num_hosts\n config_proto.num_tensor_cores = self._strategy.num_replicas_in_sync\n\n # TODO(bfontain): Allow users to pick MOD for the host sharding.\n config_proto.sharding_strategy = (\n tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT)\n config_proto.pipeline_execution_with_tensor_core = (\n self._pipeline_execution_with_tensor_core)\n\n return config_proto\n\n def apply_gradients(self, gradients, name: Optional[Text] = None):\n \"\"\"Applies the gradient update to the embedding tables.\n\n If a gradient of `None` is passed in any position of the nested structure,\n then an gradient update with a zero gradient is applied for that feature.\n For optimizers like SGD or Adagrad, this is the same as applying no update\n at all. For lazy Adam and other sparsely applied optimizers with decay,\n ensure you understand the effect of applying a zero gradient.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features, ))\n\n training_step()\n ```\n\n Args:\n gradients: A nested structure of gradients, with structure matching the\n `feature_config` passed to this object.\n name: A name for the underlying op.\n\n Raises:\n RuntimeError: If called when object wasn't created under a `TPUStrategy`\n or if not built (either by manually calling build or calling enqueue).\n ValueError: If a non-`tf.Tensor` non-`None` gradient is passed in, or a\n `tf.Tensor` of the incorrect shape is passed in. Also if\n the size of any sequence in `gradients` does not match corresponding\n sequence in `feature_config`.\n TypeError: If the type of any sequence in `gradients` does not match\n corresponding sequence in `feature_config`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"apply_gradients is not valid when TPUEmbedding \"\n \"object is not created under a TPUStrategy.\")\n\n if not self._built:\n raise RuntimeError(\"apply_gradients called on unbuilt TPUEmbedding \"\n \"object. Please either call enqueue first or manually \"\n \"call the build method.\")\n\n nest.assert_same_structure(self._feature_config, gradients)\n updated_gradients = []\n for (path, gradient), feature, output_shape in zip(\n nest.flatten_with_joined_string_paths(gradients),\n nest.flatten(self._feature_config), self._output_shapes):\n full_output_shape = list(output_shape) + [feature.table.dim]\n if gradient is not None and not isinstance(gradient, ops.Tensor):\n raise ValueError(\n f\"found non-tensor type: {type(gradient)} at path {path}.\")\n if gradient is not None:\n if gradient.shape != full_output_shape:\n raise ValueError(\"Found gradient of shape {} at path {}. Expected \"\n \"shape {}.\".format(gradient.shape, path,\n full_output_shape))\n else:\n # No gradient for this feature, since we must give a gradient for all\n # features, pass in a zero tensor here. Note that this is not correct\n # for all optimizers.\n logging.warning(\n \"No gradient passed for feature %s, sending zero \"\n \"gradient. This may not be correct behavior for certain \"\n \"optimizers like Adam.\", path)\n gradient = array_ops.zeros(full_output_shape, dtype=dtypes.float32)\n # Some gradients can be passed with op which shape is not correctly set.\n # This ensures that the shape of the gradient is correctly set.\n updated_gradients.append(\n array_ops.reshape(gradient, shape=gradient.shape))\n op = tpu_ops.send_tpu_embedding_gradients(\n inputs=updated_gradients,\n learning_rates=[\n math_ops.cast(fn(), dtype=dtypes.float32)\n for fn in self._dynamic_learning_rates\n ],\n config=self._config_proto.SerializeToString())\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(op, name)\n\n def dequeue(self, name: Optional[Text] = None):\n \"\"\"Get the embedding results.\n\n Returns a nested structure of `tf.Tensor` objects, matching the structure of\n the `feature_config` argument to the `TPUEmbedding` class. The output shape\n of the tensors is `(*output_shape, dim)`, `dim` is the dimension of the\n corresponding `TableConfig`. For output_shape, there are three places where\n it can be set.\n 1. FeatureConfig provided in the __init__ function.\n 2. Per_replica_output_shapes by directly calling the build method\n after initializing the tpu embedding class.\n 3. Auto detected from the shapes of the input feature.\n The priority of these places is the exact same order.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features, ))\n\n training_step()\n ```\n\n Args:\n name: A name for the underlying op.\n\n Returns:\n A nested structure of tensors, with the same structure as `feature_config`\n passed to this instance of the `TPUEmbedding` object.\n\n Raises:\n RuntimeError: If called when object wasn't created under a `TPUStrategy`\n or if not built (either by manually calling build or calling enqueue).\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"dequeue is not valid when TPUEmbedding object is not \"\n \"created under a TPUStrategy.\")\n\n if not self._built:\n raise RuntimeError(\"dequeue called on unbuilt TPUEmbedding object. \"\n \"Please either call enqueue first or manually call \"\n \"the build method.\")\n\n # The activations returned by this op are per feature.\n activations = tpu_ops.recv_tpu_embedding_activations(\n num_outputs=len(self._config_proto.feature_descriptor),\n config=self._config_proto.SerializeToString())\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(activations[0].op, name)\n\n # Pack the list back into the same nested structure as the features.\n return nest.pack_sequence_as(self._feature_config, activations)\n\n def _create_variables_and_slots(\n self\n ) -> Dict[Text, Dict[Text, tf_variables.Variable]]:\n \"\"\"Create variables for TPU embeddings.\n\n Note under TPUStrategy this will ensure that all creations happen within a\n variable creation scope of the sharded variable creator.\n\n Returns:\n A dict of dicts. The outer dict is keyed by the table names and the inner\n dicts are keyed by 'parameters' and the slot variable names.\n \"\"\"\n\n def create_variables(table):\n \"\"\"Create all variables.\"\"\"\n variable_shape = (table.vocabulary_size, table.dim)\n\n def getter(name, shape, dtype, initializer, trainable):\n del shape\n # _add_variable_with_custom_getter clears the shape sometimes, so we\n # take the global shape from outside the getter.\n initial_value = functools.partial(initializer, variable_shape,\n dtype=dtype)\n return tf_variables.Variable(\n name=name,\n initial_value=initial_value,\n shape=variable_shape,\n dtype=dtype,\n trainable=trainable)\n\n def variable_creator(name, initializer, trainable=True):\n # use add_variable_with_custom_getter here so that we take advantage of\n # the checkpoint loading to allow restore before the variables get\n # created which avoids double initialization.\n return self._add_variable_with_custom_getter(\n name=name,\n initializer=initializer,\n shape=variable_shape,\n dtype=dtypes.float32,\n getter=getter,\n trainable=trainable)\n\n parameters = variable_creator(table.name, table.initializer,\n trainable=not self._using_tpu)\n\n def slot_creator(name, initializer):\n return variable_creator(table.name + \"/\" + name,\n initializer,\n False)\n\n if table.optimizer is not None:\n slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access\n else:\n slot_vars = {}\n slot_vars[\"parameters\"] = parameters\n return slot_vars\n\n # Store tables based on name rather than TableConfig as we can't track\n # through dicts with non-string keys, i.e. we won't be able to save.\n variables = {}\n for table in self._table_config:\n if not self._using_tpu:\n variables[table.name] = create_variables(table)\n else:\n with variable_scope.variable_creator_scope(\n make_sharded_variable_creator(self._hosts)):\n variables[table.name] = create_variables(table)\n\n return variables\n\n def _load_variables(self):\n # Only load the variables if we are:\n # 1) Using TPU\n # 2) Variables are created\n # 3) Not in save context (except if running eagerly)\n if self._using_tpu and self._built and not (\n not context.executing_eagerly() and save_context.in_save_context()):\n _load_variables_impl(self._config_proto.SerializeToString(),\n self._hosts,\n self._variables,\n self._table_config)\n\n def _retrieve_variables(self):\n # Only retrieve the variables if we are:\n # 1) Using TPU\n # 2) Variables are created\n # 3) Not in save context (except if running eagerly)\n if self._using_tpu and self._built and not (\n not context.executing_eagerly() and save_context.in_save_context()):\n _retrieve_variables_impl(self._config_proto.SerializeToString(),\n self._hosts,\n self._variables,\n self._table_config)\n\n def _gather_saveables_for_checkpoint(\n self\n ) -> Dict[Text, Callable[[Text], \"TPUEmbeddingSaveable\"]]:\n \"\"\"Overrides default Trackable implementation to add load/retrieve hook.\"\"\"\n # This saveable should be here in both TPU and CPU checkpoints, so when on\n # CPU, we add the hook with no functions.\n # TODO(bfontain): Update restore logic in saver so that these hooks are\n # always executed. Once that is done, we can output an empty list when on\n # CPU.\n\n def factory(name=_HOOK_KEY):\n return TPUEmbeddingSaveable(name, self._load_variables,\n self._retrieve_variables)\n return {_HOOK_KEY: factory}\n\n # Some helper functions for the below enqueue function.\n def _add_data_for_tensor(self, tensor, weight, indices, values, weights,\n int_zeros, float_zeros, path):\n if weight is not None:\n raise ValueError(\n \"Weight specified for dense input {}, which is not allowed. \"\n \"Weight will always be 1 in this case.\".format(path))\n # For tensors, there are no indices and no weights.\n indices.append(int_zeros)\n values.append(math_ops.cast(array_ops.reshape(tensor, [-1]), dtypes.int64))\n weights.append(float_zeros)\n\n def _add_data_for_sparse_tensor(self, tensor, weight, indices, values,\n weights, int_zeros, float_zeros, path,\n feature):\n sample_indices = math_ops.cast(tensor.indices, dtypes.int32)\n if tensor.shape.rank == 2:\n if not feature.output_shape and feature.max_sequence_length > 0:\n # Add one dimension to the last axis.\n sample_indices = array_ops.pad(\n sample_indices, paddings=[[0, 0], [0, 1]])\n indices.append(sample_indices)\n values.append(math_ops.cast(tensor.values, dtypes.int64))\n # If we have weights they must be a SparseTensor.\n if weight is not None:\n if not isinstance(weight, sparse_tensor.SparseTensor):\n raise ValueError(\"Weight for {} is type {} which does not match \"\n \"type input which is SparseTensor.\".format(\n path, type(weight)))\n weights.append(math_ops.cast(weight.values, dtypes.float32))\n else:\n weights.append(float_zeros)\n\n def _add_data_for_ragged_tensor(self, tensor, weight, row_splits, values,\n weights, int_zeros, float_zeros, path,\n feature):\n row_splits.append(math_ops.cast(tensor.row_splits, dtypes.int32))\n values.append(math_ops.cast(tensor.values, dtypes.int64))\n # If we have weights they must be a RaggedTensor.\n if weight is not None:\n if not isinstance(weight, ragged_tensor.RaggedTensor):\n raise ValueError(\"Weight for {} is type {} which does not match \"\n \"type input which is RaggedTensor.\".format(\n path, type(weight)))\n weights.append(math_ops.cast(weight.values, dtypes.float32))\n else:\n weights.append(float_zeros)\n\n def _generate_enqueue_op(\n self,\n flat_inputs: List[internal_types.NativeObject],\n flat_weights: List[Optional[internal_types.NativeObject]],\n flat_features: List[tpu_embedding_v2_utils.FeatureConfig],\n device_ordinal: int,\n mode_override: Text\n ) -> ops.Operation:\n \"\"\"Outputs a the enqueue op given the inputs and weights.\n\n Args:\n flat_inputs: A list of input tensors.\n flat_weights: A list of input weights (or None) of the same length as\n flat_inputs.\n flat_features: A list of FeatureConfigs of the same length as flat_inputs.\n device_ordinal: The device to create the enqueue op for.\n mode_override: A tensor containing the string \"train\" or \"inference\".\n\n Returns:\n The enqueue op.\n \"\"\"\n # Combiners are per table, list in the same order as the table order.\n combiners = [table.combiner for table in self._table_config]\n\n # These parallel arrays will be the inputs to the enqueue op.\n # sample_indices for sparse, row_splits for ragged.\n indices_or_row_splits = []\n values = []\n weights = []\n\n # We have to supply a empty/zero tensor in a list position where we don't\n # have data (e.g. indices for standard Tensor input, weight when no weight\n # is specified). We create one op here per call, so that we reduce the\n # graph size.\n int_zeros = array_ops.zeros((0,), dtype=dtypes.int32)\n float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)\n\n # In the following loop we insert casts so that everything is either int32\n # or float32. This is because op inputs which are lists of tensors must be\n # of the same type within the list. Moreover the CPU implementations of\n # these ops cast to these types anyway, so we don't lose any data by casting\n # early.\n for inp, weight, (path, feature) in zip(\n flat_inputs, flat_weights, flat_features):\n if isinstance(inp, ops.Tensor):\n self._add_data_for_tensor(inp, weight, indices_or_row_splits, values,\n weights, int_zeros, float_zeros, path)\n elif isinstance(inp, sparse_tensor.SparseTensor):\n self._add_data_for_sparse_tensor(inp, weight, indices_or_row_splits,\n values, weights, int_zeros,\n float_zeros, path, feature)\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n self._add_data_for_ragged_tensor(inp, weight, indices_or_row_splits,\n values, weights, int_zeros,\n float_zeros, path, feature)\n else:\n raise ValueError(\"Input {} is of unknown type {}. Please only pass \"\n \"Tensor, SparseTensor or RaggedTensor as input to \"\n \"enqueue.\".format(path, type(inp)))\n\n return tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch(\n sample_indices_or_row_splits=indices_or_row_splits,\n embedding_indices=values,\n aggregation_weights=weights,\n mode_override=mode_override,\n device_ordinal=device_ordinal,\n combiners=combiners)\n\n def _raise_error_for_incorrect_control_flow_context(self):\n \"\"\"Raises an error if we are not in the TPUReplicateContext.\"\"\"\n # Do not allow any XLA control flow (i.e. control flow in between a\n # TPUStrategy's run call and the call to this function), as we can't\n # extract the enqueue from the head when in XLA control flow.\n graph = ops.get_default_graph()\n in_tpu_ctx = False\n while graph is not None:\n ctx = graph._get_control_flow_context() # pylint: disable=protected-access\n while ctx is not None:\n if isinstance(ctx, tpu.TPUReplicateContext):\n in_tpu_ctx = True\n break\n ctx = ctx.outer_context\n if in_tpu_ctx:\n break\n graph = getattr(graph, \"outer_graph\", None)\n if graph != ops.get_default_graph() and in_tpu_ctx:\n raise RuntimeError(\n \"Current graph {} does not match graph which contains \"\n \"TPUReplicateContext {}. This is most likely due to the fact that \"\n \"enqueueing embedding data is called inside control flow or a \"\n \"nested function inside `strategy.run`. This is not supported \"\n \"because outside compilation fails to extract the enqueue ops as \"\n \"head of computation.\".format(ops.get_default_graph(), graph))\n return in_tpu_ctx\n\n def _raise_error_for_non_direct_inputs(self, features):\n \"\"\"Checks all tensors in features to see if they are a direct input.\"\"\"\n\n # expand_composites here is important: as composite tensors pass through\n # tpu.replicate, they get 'flattened' into their component tensors and then\n # repacked before being passed to the tpu function. In means that it is the\n # component tensors which are produced by an op with the\n # \"_tpu_input_identity\" attribute.\n for path, input_tensor in nest.flatten_with_joined_string_paths(\n features, expand_composites=True):\n if input_tensor.op.type == \"Placeholder\":\n continue\n try:\n is_input = input_tensor.op.get_attr(\"_tpu_input_identity\")\n except ValueError:\n is_input = False\n if not is_input:\n raise ValueError(\n \"Received input tensor {} which is the output of op {} (type {}) \"\n \"which does not have the `_tpu_input_identity` attr. Please \"\n \"ensure that the inputs to this layer are taken directly from \"\n \"the arguments of the function called by \"\n \"strategy.run. Two possible causes are: dynamic batch size \"\n \"support or you are using a keras layer and are not passing \"\n \"tensors which match the dtype of the `tf.keras.Input`s.\"\n \"If you are triggering dynamic batch size support, you can \"\n \"disable it by passing tf.distribute.RunOptions(\"\n \"experimental_enable_dynamic_batch_size=False) to the options \"\n \"argument of strategy.run().\".format(path,\n input_tensor.op.name,\n input_tensor.op.type))\n\n def _raise_error_for_inputs_not_on_cpu(self, flat_inputs, flat_paths):\n \"\"\"Checks all tensors in features to see are placed on the CPU.\"\"\"\n\n def check_device(path, device_string):\n spec = tf_device.DeviceSpec.from_string(device_string)\n if spec.device_type == \"TPU\":\n raise ValueError(\n \"Received input tensor {} which is on a TPU input device {}. Input \"\n \"tensors for TPU embeddings must be placed on the CPU. Please \"\n \"ensure that your dataset is prefetching tensors to the host by \"\n \"setting the 'experimental_fetch_to_device' option of the \"\n \"dataset distribution function. See the documentation of the \"\n \"enqueue method for an example.\".format(path, device_string))\n\n # expand_composites here is important, we need to check the device of each\n # underlying tensor.\n for input_tensor, input_path in zip(flat_inputs, flat_paths):\n if nest.is_nested_or_composite(input_tensor):\n input_tensors = nest.flatten(input_tensor, expand_composites=True)\n else:\n input_tensors = [input_tensor]\n for t in input_tensors:\n if (t.op.type == \"Identity\" and\n t.op.inputs[0].op.type == \"TPUReplicatedInput\"):\n for tensor in t.op.inputs[0].op.inputs:\n check_device(input_path, tensor.device)\n else:\n check_device(input_path, t.device)\n\n def enqueue(\n self,\n features,\n weights=None,\n training: bool = True,\n name: Optional[Text] = None,\n device: Optional[Text] = None):\n \"\"\"Enqueues id tensors for embedding lookup.\n\n This function enqueues a structure of features to be looked up in the\n embedding tables. We expect that the input shapes of each of the tensors in\n features matches the output shapes set via FeatureConfig or build method\n (if any). the output shapes will be auto detected based on the input shapes\n with the max_sequence_length or output shape setting in the FeatureConfig.\n Note that the output shapes is based on per replica batch size.\n If your input dataset is batched to the global batch size and you use\n `tf.distribute.TPUStrategy`'s `experimental_distribute_dataset`\n or if you use `distribute_datasets_from_function` and batch\n to the per core batch size computed by the context passed to your input\n function, the output shapes should match automatically.\n\n The auto detected the output shapes:\n 1. For dense tensor, if rank 2 or above, make sure the tensor has last\n dimension as 1. The output shape will be the input shape excluding\n the last dimension.\n 2. For sparse tensor, make sure the tensor has rank 2 and above.\n a. If feature config has max_sequence_length equals 0 or output shape\n set (the max_sequence_length setting will be ignored), the\n output shape will be the input shape excluding the last dimension.\n b. Otherwize if the tensor is rank 2, the output shape will be input\n shape with last dimension set as max_sequence_length. If the\n tensor is above rank 2, the output shape will be the input shape\n excluding the last dimension and the last dimension of the output\n shape will be set to max_sequence_length.\n 3. For ragged tensor, make sure the tensor has rank 2.\n a. If feature config has max_sequence_length equals 0 or output shape\n set (the max_sequence_length setting will be ignored), the\n output shape will be the input shape excluding the last dimension.\n b. Otherwise, the output shape will be the input shape excluding the\n last dimension and the last dimension of the output shape will be\n set to max_sequence_length.\n\n ```python\n strategy = tf.distribute.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features,))\n\n training_step()\n ```\n\n NOTE: You should specify `training=True` when using\n `embedding.apply_gradients` as above and `training=False` when not using\n `embedding.apply_gradients` (e.g. for frozen embeddings or when doing\n evaluation).\n\n For finer grained control, in the above example the line\n\n ```\n embedding.enqueue(embedding_features, training=True)\n ```\n\n may be replaced with\n\n ```\n per_core_embedding_features = self.strategy.experimental_local_results(\n embedding_features)\n\n def per_core_enqueue(ctx):\n core_id = ctx.replica_id_in_sync_group\n device = strategy.extended.worker_devices[core_id]\n embedding.enqueue(per_core_embedding_features[core_id],\n device=device)\n\n strategy.experimental_distribute_values_from_function(\n per_core_queue_inputs)\n ```\n\n Args:\n features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\n will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\n or `tf.RaggedTensor` is supported per call.\n weights: If not `None`, a nested structure of `tf.Tensor`s,\n `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\n that the tensors should be of float type (and they will be downcast to\n `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\n same for the parallel entries from `features` and similarly for\n `tf.RaggedTensor`s we assume the row_splits are the same.\n training: Defaults to `True`. If `False`, enqueue the batch as inference\n batch (forward pass only). Do not call `apply_gradients` when this is\n `False` as this may lead to a deadlock.\n name: A name for the underlying op.\n device: The device name (e.g. '/task:0/device:TPU:2') where this batch\n should be enqueued. This should be set if and only if features is not a\n `tf.distribute.DistributedValues` and enqueue is not being called\n inside a TPU context (e.g. inside `TPUStrategy.run`).\n\n Raises:\n ValueError: When called inside a strategy.run call and input is not\n directly taken from the args of the `strategy.run` call. Also if\n the size of any sequence in `features` does not match corresponding\n sequence in `feature_config`. Similarly for `weights`, if not `None`.\n If input shapes of features is unequal or different from a previous\n call.\n RuntimeError: When called inside a strategy.run call and inside XLA\n control flow. If batch_size is not able to be determined and build was\n not called.\n TypeError: If the type of any sequence in `features` does not match\n corresponding sequence in `feature_config`. Similarly for `weights`, if\n not `None`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"enqueue is not valid when TPUEmbedding object is not \"\n \"created under a TPUStrategy.\")\n\n in_tpu_context = self._raise_error_for_incorrect_control_flow_context()\n\n nest.assert_same_structure(self._feature_config, features)\n\n if not self._verify_output_shapes_on_enqueue:\n if not self._output_shapes or not self._built:\n raise ValueError(\n \"Configured not to check output shapes on each enqueue() call; please \"\n \"ensure build() was called with output shapes to initialize \"\n \"the TPU for embeddings.\")\n else:\n input_shapes = self._get_input_shapes(features, in_tpu_context)\n\n self._maybe_build(input_shapes)\n # If is already built, we still need to check if the output shapes matches\n # with the previous ones.\n self._check_output_shapes(\n self._get_output_shapes_from_input_shapes(input_shapes))\n\n flat_inputs = nest.flatten(features)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(self._feature_config, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(self._feature_config)\n flat_paths, _ = zip(*flat_features)\n\n self._raise_error_for_inputs_not_on_cpu(flat_inputs, flat_paths)\n # If we are in a tpu_context, automatically apply outside compilation.\n if in_tpu_context:\n self._raise_error_for_non_direct_inputs(features)\n\n def generate_enqueue_ops():\n \"\"\"Generate enqueue ops for outside compilation.\"\"\"\n # Note that we put array_ops.where_v2 rather than a python if so that\n # the op is explicitly create and the constant ops are both in the graph\n # even though we don't expect training to be a tensor (and thus generate\n # control flow automatically). This need to make it easier to re-write\n # the graph later if we need to fix which mode needs to be used.\n mode_override = array_ops.where_v2(training,\n constant_op.constant(\"train\"),\n constant_op.constant(\"inference\"))\n # Device ordinal is -1 here, a later rewrite will fix this once the op\n # is expanded by outside compilation.\n enqueue_op = self._generate_enqueue_op(\n flat_inputs, flat_weights, flat_features, device_ordinal=-1,\n mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n\n # Ensure that this op has outbound control flow, otherwise it won't be\n # executed.\n ops.get_default_graph().control_outputs.append(enqueue_op)\n\n tpu.outside_compilation(generate_enqueue_ops)\n\n elif device is None:\n mode_override = \"train\" if training else \"inference\"\n # We generate enqueue ops per device, so we need to gather the all\n # features for a single device in to a dict.\n # We rely here on the fact that the devices in the PerReplica value occur\n # in the same (standard) order as self._strategy.extended.worker_devices.\n enqueue_ops = []\n for replica_id in range(self._strategy.num_replicas_in_sync):\n replica_inputs = distribute_utils.select_replica(replica_id,\n flat_inputs)\n replica_weights = distribute_utils.select_replica(replica_id,\n flat_weights)\n tpu_device = self._strategy.extended.worker_devices[replica_id]\n # TPU devices string are like /job:worker/replica:0/task:0/device:TPU:0\n # the device ordinal is the last number\n device_ordinal = (\n tf_device.DeviceSpec.from_string(tpu_device).device_index)\n\n with ops.device(device_util.get_host_for_device(tpu_device)):\n enqueue_op = self._generate_enqueue_op(\n replica_inputs, replica_weights, flat_features,\n device_ordinal=device_ordinal, mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n enqueue_ops.append(enqueue_op)\n ops.get_default_graph().control_outputs.extend(enqueue_ops)\n else:\n mode_override = \"train\" if training else \"inference\"\n device_spec = tf_device.DeviceSpec.from_string(device)\n if device_spec.device_type != \"TPU\":\n raise ValueError(\n \"Non-TPU device {} passed to enqueue.\".format(device))\n\n with ops.device(device_util.get_host_for_device(device)):\n enqueue_op = self._generate_enqueue_op(\n flat_inputs, flat_weights, flat_features,\n device_ordinal=device_spec.device_index,\n mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n ops.get_default_graph().control_outputs.append(enqueue_op)\n\n def _get_input_shapes(self, tensors,\n in_tpu_context: bool) -> List[TensorShape]:\n \"\"\"Get the input shapes from the input tensor.\"\"\"\n input_shapes = []\n for (path, maybe_tensor), feature in zip(\n nest.flatten_with_joined_string_paths(tensors),\n nest.flatten(self._feature_config)):\n if not in_tpu_context:\n tensor = distribute_utils.select_replica(0, maybe_tensor)\n else:\n tensor = maybe_tensor\n\n if isinstance(tensor, ops.Tensor):\n input_shapes.append(\n self._get_input_shape_for_tensor(tensor, feature, path))\n elif isinstance(tensor, sparse_tensor.SparseTensor):\n input_shapes.append(\n self._get_input_shape_for_sparse_tensor(tensor, feature, path))\n elif isinstance(tensor, ragged_tensor.RaggedTensor):\n input_shapes.append(\n self._get_input_shape_for_ragged_tensor(tensor, feature, path))\n return input_shapes\n\n def _get_input_shape_for_tensor(self, tensor, feature, path) -> TensorShape:\n \"\"\"Get the input shape for the dense tensor.\"\"\"\n shape = tensor.shape.as_list()\n if len(shape) < 1:\n raise ValueError(\"Only rank 1 and above dense tensor is supported,\"\n \" find rank {} sparse tensor for input {}\".format(\n len(shape), path))\n if len(shape) > 1 and shape[-1] != 1:\n raise ValueError(\n \"Rank 2 or above dense tensor should have last dimension as 1 \"\n \"as the last dimension will always be reduced. \"\n \"Instead got dense tensor as shape {}\".format(shape))\n return TensorShape(shape)\n\n def _get_input_shape_for_sparse_tensor(self, tensor, feature,\n path) -> TensorShape:\n \"\"\"Get the input shape for the sparse tensor.\"\"\"\n shape = tensor.shape.as_list()\n # Only 2 and above rank sparse tensor is supported.\n if len(shape) < 2:\n raise ValueError(\"Only rank 2 and above sparse tensor is supported,\"\n \" find rank {} sparse tensor for input {}\".format(\n len(shape), path))\n if not feature.output_shape and feature.max_sequence_length > 0:\n # If the max_sequence_length is set and the output shape for FeatureConfig\n # is not set, we modify the shape of the input feature. Only rank 2\n # feature output shape is modified\n if len(shape) == 2:\n # If the sparse tensor is 2D and max_sequence_length is set,\n # we need to add one dimension to the input feature.\n shape.insert(len(shape) - 1, feature.max_sequence_length)\n\n return TensorShape(shape)\n\n def _get_input_shape_for_ragged_tensor(self, tensor, feature,\n path) -> TensorShape:\n \"\"\"Get the input shape for the ragged tensor.\"\"\"\n shape = tensor.shape.as_list()\n # Only rank 2 ragged tensor is supported.\n if len(shape) != 2:\n raise ValueError(\"Only rank 2 ragged tensor is supported,\"\n \" find rank {} ragged tensor for input {}\".format(\n len(shape), path))\n if not feature.output_shape and feature.max_sequence_length > 0:\n # If the max_sequence_length is set and the output shape for FeatureConfig\n # is not set, add the sequence length as second last dimension of\n # the ragged tensor.\n shape.insert(len(shape) - 1, feature.max_sequence_length)\n\n return TensorShape(shape)\n\n def _update_output_shapes(self, incoming_output_shapes: List[TensorShape]):\n \"\"\"Update the existing output shapes based on the new output shapes.\n\n The existing output shapes always have higher piority than the new incoming\n output shapes.\n Args:\n incoming_output_shapes: nested structure of TensorShape to override the\n existing output shapes.\n \"\"\"\n nest.assert_same_structure(self._output_shapes, incoming_output_shapes)\n updated_output_shapes = []\n for old_output_shape, incoming_output_shape in zip(self._output_shapes,\n incoming_output_shapes):\n if old_output_shape:\n updated_output_shapes.append(old_output_shape)\n else:\n updated_output_shapes.append(incoming_output_shape)\n self._output_shapes = updated_output_shapes\n\n def _check_output_shapes(self, incoming_output_shapes: List[TensorShape]):\n \"\"\"Check the incoming output shapes against the output shapes stored.\"\"\"\n # The incoming output shape should have the same structure with the existing\n # output shapes.\n nest.assert_same_structure(self._output_shapes, incoming_output_shapes)\n\n for (path, _), old_output_shape, incoming_output_shape in zip(\n nest.flatten_with_joined_string_paths(self._feature_config),\n self._output_shapes, incoming_output_shapes):\n # First check if both shapes are not None.\n if old_output_shape and incoming_output_shape:\n # We skip the check when the incoming output shape is rank 1 or 2 and\n # rank of the old output shape is larger. This can happen for\n # (sequence) ragged tensor, we push the check down to the enqueue op.\n if (len(incoming_output_shape) == 1 or len(incoming_output_shape)\n == 2) and len(old_output_shape) > len(incoming_output_shape):\n continue\n if len(old_output_shape) != len(\n incoming_output_shape) or not self._is_tensor_shape_match(\n old_output_shape, incoming_output_shape):\n raise ValueError(\n f\"Inconsistent shape founded for input feature {path}, \"\n f\"Output shape is set to be {old_output_shape}, \"\n f\"But got incoming output shape {incoming_output_shape}\")\n\n def _check_output_shapes_fully_defined(self):\n \"\"\"Check if the output shape is fully defined.\"\"\"\n for (path, _), output_shape in zip(\n nest.flatten_with_joined_string_paths(self._feature_config),\n self._output_shapes):\n if not output_shape.is_fully_defined():\n raise ValueError(\n f\"Input Feature {path} has output shape set as \"\n f\"{output_shape} which is not fully defined. \"\n \"Please specify the fully defined shape in either FeatureConfig \"\n \"or for the build method.\")\n\n def _is_tensor_shape_match(self, shape_a: TensorShape,\n shape_b: TensorShape) -> bool:\n \"\"\"Check if shape b matches with shape a.\"\"\"\n for s_a, s_b in zip(shape_a.as_list(), shape_b.as_list()):\n if s_a and s_b and s_a != s_b:\n return False\n return True\n\n def _get_output_shapes_from_batch_size(self, per_replica_batch_size):\n \"\"\"Get the output shapes from the batch size.\"\"\"\n output_shapes = []\n for feature in nest.flatten(self._feature_config):\n if not feature.output_shape and feature.max_sequence_length > 0:\n output_shapes.append(\n TensorShape([per_replica_batch_size, feature.max_sequence_length]))\n else:\n output_shapes.append(TensorShape(per_replica_batch_size))\n return output_shapes\n\n\n@def_function.function\ndef _load_variables_impl(\n config: Text,\n hosts: List[Tuple[int, Text]],\n variables: Dict[Text, Dict[Text, tf_variables.Variable]],\n table_config: tpu_embedding_v2_utils.TableConfig):\n \"\"\"Load embedding tables to onto TPU for each table and host.\n\n Args:\n config: A serialized TPUEmbeddingConfiguration proto.\n hosts: A list of CPU devices, on per host.\n variables: A dictionary of dictionaries of TPUShardedVariables. First key is\n the table name, second key is 'parameters' or the optimizer slot name.\n table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.\n \"\"\"\n def select_fn(host_id):\n\n def select_or_zeros(x):\n if host_id >= len(x.variables):\n # In the edge case where we have more hosts than variables, due to using\n # a small number of rows, we load zeros for the later hosts. We copy\n # the shape of the first host's variables, which we assume is defined\n # because TableConfig guarantees at least one row.\n return array_ops.zeros_like(x.variables[0])\n return x.variables[host_id]\n\n return select_or_zeros\n\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n host_variables = nest.map_structure(select_fn(host_id), variables)\n for table in table_config:\n table.optimizer._load()( # pylint: disable=protected-access\n table_name=table.name,\n num_shards=len(hosts),\n shard_id=host_id,\n config=config,\n **host_variables[table.name])\n # Ensure that only the first table/first host gets a config so that we\n # don't bloat graph by attaching this large string to each op.\n # We have num tables * num hosts of these so for models with a large\n # number of tables training on a large slice, this can be an issue.\n config = None\n\n\n@def_function.function\ndef _retrieve_variables_impl(\n config: Text,\n hosts: List[Tuple[int, Text]],\n variables: Dict[Text, Dict[Text, tf_variables.Variable]],\n table_config: tpu_embedding_v2_utils.TableConfig):\n \"\"\"Retrieve embedding tables from TPU to host memory.\n\n Args:\n config: A serialized TPUEmbeddingConfiguration proto.\n hosts: A list of all the host CPU devices.\n variables: A dictionary of dictionaries of TPUShardedVariables. First key is\n the table name, second key is 'parameters' or the optimizer slot name.\n table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.\n \"\"\"\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n for table in table_config:\n retrieved = table.optimizer._retrieve()( # pylint: disable=protected-access\n table_name=table.name,\n num_shards=len(hosts),\n shard_id=host_id,\n config=config)\n # When there are no slot variables (e.g with SGD) this returns a\n # single tensor rather than a tuple. In this case we put the tensor in\n # a list to make the following code easier to write.\n if not isinstance(retrieved, tuple):\n retrieved = (retrieved,)\n\n for i, slot in enumerate([\"parameters\"] +\n table.optimizer._slot_names()): # pylint: disable=protected-access\n # We must assign the CPU variables the values of tensors that were\n # returned from the TPU.\n sharded_var = variables[table.name][slot]\n if host_id < len(sharded_var.variables):\n # In the edge case where we have more hosts than variables, due to\n # using a small number of rows, we skip the later hosts.\n sharded_var.variables[host_id].assign(retrieved[i])\n # Ensure that only the first table/first host gets a config so that we\n # don't bloat graph by attaching this large string to each op.\n # We have num tables * num hosts of these so for models with a large\n # number of tables training on a large slice, this can be an issue.\n config = None\n\n\nclass TPUEmbeddingSaveable(saveable_hook.SaveableHook):\n \"\"\"Save/Restore hook to Retrieve/Load TPUEmbedding variables.\"\"\"\n\n def __init__(\n self,\n name: Text,\n load: Callable[[], Any],\n retrieve: Callable[[], Any]):\n self._load = load\n self._retrieve = retrieve\n super(TPUEmbeddingSaveable, self).__init__(name=name)\n\n def before_save(self):\n if self._retrieve is not None:\n self._retrieve()\n\n def after_restore(self):\n if self._load is not None:\n self._load()\n\n\ndef _ragged_embedding_lookup_with_reduce(\n table: tf_variables.Variable,\n ragged: ragged_tensor.RaggedTensor,\n weights: ragged_tensor.RaggedTensor,\n combiner: Text) -> core.Tensor:\n \"\"\"Compute a ragged lookup followed by a reduce on axis 1.\n\n Args:\n table: The embedding table.\n ragged: A RaggedTensor of ids to look up.\n weights: A RaggedTensor of weights (or None).\n combiner: One of \"mean\", \"sum\", \"sqrtn\".\n\n Returns:\n A Tensor.\n \"\"\"\n if weights is None:\n weights = array_ops.ones_like(ragged, dtype=table.dtype)\n weights = array_ops.expand_dims(weights, axis=2)\n ragged_result = embedding_ops.embedding_lookup_ragged(table, ragged)\n ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)\n if combiner == \"mean\":\n ragged_result = math_ops.div_no_nan(ragged_result,\n math_ops.reduce_sum(weights, axis=1))\n elif combiner == \"sqrtn\":\n ragged_result = math_ops.div_no_nan(\n ragged_result,\n math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)))\n return ragged_result\n\n\n@tf_export(\"tpu.experimental.embedding.serving_embedding_lookup\")\ndef cpu_embedding_lookup(inputs, weights, tables, feature_config):\n \"\"\"Apply standard lookup ops with `tf.tpu.experimental.embedding` configs.\n\n This function is a utility which allows using the\n `tf.tpu.experimental.embedding` config objects with standard lookup functions.\n This can be used when exporting a model which uses\n `tf.tpu.experimental.embedding.TPUEmbedding` for serving on CPU. In particular\n `tf.tpu.experimental.embedding.TPUEmbedding` only supports lookups on TPUs and\n should not be part of your serving graph.\n\n Note that TPU specific options (such as `max_sequence_length`) in the\n configuration objects will be ignored.\n\n In the following example we take a trained model (see the documentation for\n `tf.tpu.experimental.embedding.TPUEmbedding` for the context) and create a\n saved model with a serving function that will perform the embedding lookup and\n pass the results to your model:\n\n ```python\n model = model_fn(...)\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n batch_size=1024,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.restore(...)\n\n @tf.function(input_signature=[{'feature_one': tf.TensorSpec(...),\n 'feature_two': tf.TensorSpec(...),\n 'feature_three': tf.TensorSpec(...)}])\n def serve_tensors(embedding_features):\n embedded_features = tf.tpu.experimental.embedding.serving_embedding_lookup(\n embedding_features, None, embedding.embedding_tables,\n feature_config)\n return model(embedded_features)\n\n model.embedding_api = embedding\n tf.saved_model.save(model,\n export_dir=...,\n signatures={'serving_default': serve_tensors})\n\n ```\n\n NOTE: Its important to assign the embedding api object to a member of your\n model as `tf.saved_model.save` only supports saving variables one `Trackable`\n object. Since the model's weights are in `model` and the embedding table are\n managed by `embedding`, we assign `embedding` to and attribute of `model` so\n that tf.saved_model.save can find the embedding variables.\n\n NOTE: The same `serve_tensors` function and `tf.saved_model.save` call will\n work directly from training.\n\n Args:\n inputs: a nested structure of Tensors, SparseTensors or RaggedTensors.\n weights: a nested structure of Tensors, SparseTensors or RaggedTensors or\n None for no weights. If not None, structure must match that of inputs, but\n entries are allowed to be None.\n tables: a dict of mapping TableConfig objects to Variables.\n feature_config: a nested structure of FeatureConfig objects with the same\n structure as inputs.\n\n Returns:\n A nested structure of Tensors with the same structure as inputs.\n \"\"\"\n\n nest.assert_same_structure(inputs, feature_config)\n\n flat_inputs = nest.flatten(inputs)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(inputs, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(feature_config)\n\n outputs = []\n for inp, weight, (path, feature) in zip(\n flat_inputs, flat_weights, flat_features):\n table = tables[feature.table]\n\n if weight is not None:\n if isinstance(inp, ops.Tensor):\n raise ValueError(\n \"Weight specified for {}, but input is dense.\".format(path))\n elif type(weight) is not type(inp):\n raise ValueError(\n \"Weight for {} is of type {} but it does not match type of the \"\n \"input which is {}.\".format(path, type(weight), type(inp)))\n elif feature.max_sequence_length > 0:\n raise ValueError(\"Weight specified for {}, but this is a sequence \"\n \"feature.\".format(path))\n\n if isinstance(inp, ops.Tensor):\n if feature.max_sequence_length > 0:\n raise ValueError(\"Feature {} is a sequence feature but a dense tensor \"\n \"was passed.\".format(path))\n outputs.append(embedding_ops.embedding_lookup_v2(table, inp))\n\n elif isinstance(inp, sparse_tensor.SparseTensor):\n if not feature.output_shape and feature.max_sequence_length > 0:\n batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)\n sparse_shape = array_ops.stack(\n [batch_size, feature.max_sequence_length], axis=0)\n # TPU Embedding truncates sequences to max_sequence_length, and if we\n # don't truncate, scatter_nd will error out if the index was out of\n # bounds.\n truncated_inp = sparse_ops.sparse_slice(inp, start=[0, 0],\n size=sparse_shape)\n\n dense_output_shape = array_ops.stack(\n [batch_size, feature.max_sequence_length, feature.table.dim],\n axis=0)\n outputs.append(\n array_ops.scatter_nd(\n truncated_inp.indices,\n array_ops.gather(table.read_value(), truncated_inp.values),\n dense_output_shape))\n else:\n inp_rank = inp.dense_shape.get_shape()[0]\n if (not feature.validate_weights_and_indices and\n inp_rank is not None and inp_rank <= 2):\n outputs.append(\n embedding_ops.embedding_lookup_sparse_v2(\n table,\n inp,\n sp_weights=weight,\n combiner=feature.table.combiner))\n else:\n outputs.append(\n embedding_ops.safe_embedding_lookup_sparse_v2(\n table,\n inp,\n sparse_weights=weight,\n combiner=feature.table.combiner))\n\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n if inp.shape.rank != 2:\n raise ValueError(\n \"Only rank 2 ragged tensor is supported, but got rank {}\".format(\n inp.shape.rank))\n batch_size = inp.shape[0]\n if feature.output_shape:\n output_batch_size = math_ops.reduce_prod(feature.output_shape)\n # If the output batch size matches the data batch size, treat it as\n # normal ragged input.\n if output_batch_size == batch_size:\n ragged_output = _ragged_embedding_lookup_with_reduce(\n table, inp, weight, feature.table.combiner)\n ragged_output = array_ops.reshape(\n ragged_output, shape=feature.output_shape + [feature.table.dim])\n # If the data batch size is a factor of the output batch size, the\n # divide result will be the sequence length. Ignore the weights and\n # combiner.\n elif output_batch_size > batch_size and output_batch_size % batch_size == 0:\n ragged_output = embedding_ops.embedding_lookup_v2(table, inp)\n # Pad or truncate in the sequence dimension\n ragged_output = ragged_output.to_tensor(shape=[\n batch_size, output_batch_size // batch_size, feature.table.dim\n ])\n # Reshape to desire output shape.\n ragged_output = array_ops.reshape(\n ragged_output, feature.output_shape + [feature.table.dim])\n else:\n raise ValueError(\n \"Output shape set in the FeatureConfig should be the factor of \"\n \"the input data batch size. But instead got output shape {}, \"\n \"input data batch size {}\".format(feature.output_shape,\n batch_size))\n else:\n if feature.max_sequence_length > 0:\n output_shape = [\n batch_size, feature.max_sequence_length, feature.table.dim\n ]\n ragged_lookup = embedding_ops.embedding_lookup_v2(table, inp)\n # Unlike scatter_nd, RaggedTensor.to_tensor truncates to the given\n # shape.\n ragged_output = ragged_lookup.to_tensor(shape=output_shape)\n else:\n ragged_output = _ragged_embedding_lookup_with_reduce(\n table, inp, weight, feature.table.combiner)\n outputs.append(ragged_output)\n else:\n raise ValueError(\"Input {} is type {}. Tensor, SparseTensor or \"\n \"RaggedTensor expected.\".format(path, type(inp)))\n return nest.pack_sequence_as(feature_config, outputs)\n\n\ndef get_list_of_hosts(strategy: tpu_strategy.TPUStrategy) -> List[Text]:\n \"\"\"Returns a sorted list of CPU devices for the remote jobs.\n\n Args:\n strategy: A TPUStrategy object.\n\n Returns:\n A sort list of device strings.\n \"\"\"\n list_of_hosts = []\n # Assume this is sorted by task\n for tpu_device in strategy.extended.worker_devices:\n host = device_util.get_host_for_device(tpu_device)\n if host not in list_of_hosts:\n list_of_hosts.append(host)\n assert len(list_of_hosts) == strategy.extended.num_hosts\n return list_of_hosts\n\n\ndef extract_variable_info(\n kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]:\n \"\"\"Extracts the variable creation attributes from the kwargs.\n\n Args:\n kwargs: a dict of keyword arguments that were passed to a variable creator\n scope.\n\n Returns:\n A tuple of variable name, shape, dtype, initialization function.\n \"\"\"\n if (isinstance(kwargs[\"initial_value\"], functools.partial) and (\n \"shape\" in kwargs[\"initial_value\"].keywords or\n kwargs[\"initial_value\"].args)):\n # Sometimes shape is passed positionally, sometimes it's passed as a kwarg.\n if \"shape\" in kwargs[\"initial_value\"].keywords:\n shape = kwargs[\"initial_value\"].keywords[\"shape\"]\n else:\n shape = kwargs[\"initial_value\"].args[0]\n return (kwargs[\"name\"], shape,\n kwargs[\"initial_value\"].keywords.get(\"dtype\", kwargs[\"dtype\"]),\n kwargs[\"initial_value\"].func)\n elif \"shape\" not in kwargs or kwargs[\"shape\"] is None or not callable(\n kwargs[\"initial_value\"]):\n raise ValueError(\n \"Unable to extract initializer function and shape from {}. Please \"\n \"either pass a function that expects a shape and dtype as the \"\n \"initial value for your variable or functools.partial object with \"\n \"the shape and dtype kwargs set. This is needed so that we can \"\n \"initialize the shards of the ShardedVariable locally.\".format(\n kwargs[\"initial_value\"]))\n else:\n return (kwargs[\"name\"], kwargs[\"shape\"], kwargs[\"dtype\"],\n kwargs[\"initial_value\"])\n\n\ndef make_sharded_variable_creator(\n hosts: List[Text]) -> Callable[..., TPUShardedVariable]:\n \"\"\"Makes a sharded variable creator given a list of hosts.\n\n Args:\n hosts: a list of tensorflow devices on which to shard the tensors.\n\n Returns:\n A variable creator function.\n \"\"\"\n\n def sharded_variable_creator(\n next_creator: Callable[..., tf_variables.Variable], *args, **kwargs):\n \"\"\"The sharded variable creator.\"\"\"\n kwargs[\"skip_mirrored_creator\"] = True\n\n num_hosts = len(hosts)\n name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs)\n initial_value = kwargs[\"initial_value\"]\n rows = shape[0]\n cols = shape[1]\n partial_partition = rows % num_hosts\n full_rows_per_host = rows // num_hosts\n # We partition as if we were using MOD sharding: at least\n # `full_rows_per_host` rows to `num_hosts` hosts, where the first\n # `partial_partition` hosts get an additional row when the number of rows\n # is not cleanly divisible. Note that `full_rows_per_host` may be zero.\n partitions = (\n [full_rows_per_host + 1] * partial_partition\n + [full_rows_per_host] * (num_hosts - partial_partition))\n variables = []\n sharding_aware = \"shard_info\" in tf_inspect.getargspec(initial_value).args\n\n # Keep track of offset for sharding aware initializers.\n offset = 0\n kwargs[\"dtype\"] = dtype\n for i, p in enumerate(partitions):\n if p == 0:\n # Skip variable creation for empty partitions, resulting from the edge\n # case of 'rows < num_hosts'. This is safe because both load/restore\n # can handle the missing values.\n continue\n with ops.device(hosts[i]):\n kwargs[\"name\"] = \"{}_{}\".format(name, i)\n kwargs[\"shape\"] = (p, cols)\n if sharding_aware:\n shard_info = base.ShardInfo(kwargs[\"shape\"], (offset, 0))\n kwargs[\"initial_value\"] = functools.partial(\n initial_value, shard_info=shard_info)\n offset += p\n else:\n kwargs[\"initial_value\"] = functools.partial(\n unwrapped_initial_value, kwargs[\"shape\"], dtype=dtype)\n variables.append(next_creator(*args, **kwargs))\n return TPUShardedVariable(variables, name=name)\n return sharded_variable_creator\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.python.ops.op_def_library.\"\"\"\n\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.python.eager import function as eager_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import op_def_library\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.util import compat\n\n\nclass OpDefLibraryTest(test_util.TensorFlowTestCase):\n\n def Tensor(self, t, name=\"in\"):\n return op_def_library.apply_op(\"OutT\", T=t, name=name)\n\n def testNoRegisteredOpFails(self):\n with self.assertRaises(RuntimeError) as cm:\n op_def_library.apply_op(\"unknown\")\n self.assertEqual(str(cm.exception), \"Unrecognized Op name unknown\")\n\n def testSimple(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Simple\", a=3)\n self.assertEqual(dtypes.float32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'Simple' op: 'Simple' input: 'Simple/a'\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Simple\", a=4)\n self.assertProtoEquals(\"\"\"\n name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Simple\", a=5, name=\"named\")\n self.assertProtoEquals(\"\"\"\n name: 'named' op: 'Simple' input: 'named/a'\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\n \"Simple\", a=[[1, 2, 3], [4, 5, 6]], name=\"two_d\")\n self.assertProtoEquals(\"\"\"\n name: 'two_d' op: 'Simple' input: 'two_d/a'\n \"\"\", out.op.node_def)\n\n def testSimpleFailures(self):\n with ops.Graph().as_default():\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a=\"Bad string\")\n self.assertIn(\n \"Expected int32 passed to parameter 'a' of op 'Simple', \"\n \"got 'Bad string' of type 'str' instead.\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a=self.Tensor(dtypes.string))\n self.assertIn(\n \"Input 'a' of 'Simple' Op has type string \"\n \"that does not match expected type of int32.\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a=6, extra=\"bogus\")\n self.assertIn(\"Simple got unexpected keyword arguments: extra\",\n str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"Simple\", a=6, extra1=\"bogus\", extra2=\"also_bogus\")\n self.assertIn(\n \"Simple got unexpected keyword arguments: extra1, \"\n \"extra2\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\")\n self.assertIn(\"No argument for input a\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", wrong=7)\n self.assertIn(\"No argument for input a\", str(cm.exception))\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Simple\", a={\"label\": 1})\n self.assertIn(\n \"Expected int32 passed to parameter 'a' of op 'Simple', \"\n \"got {'label': 1} of type 'dict' instead.\", str(cm.exception))\n\n def testReservedInput(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"ReservedInput\", input_=7, name=\"x\")\n self.assertProtoEquals(\"\"\"\n name: 'x' op: 'ReservedInput' input: 'x/input'\n \"\"\", op.node_def)\n\n def testPolymorphic(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Polymorphic\", a=7, name=\"p\")\n self.assertEqual(dtypes.int32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'Polymorphic' input: 'p/a'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Polymorphic\", a=\"s\", name=\"q\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'Polymorphic' input: 'q/a'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Polymorphic\", a=[\"s\", \"t\", \"u\"], name=\"r\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'Polymorphic' input: 'r/a'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Polymorphic\", a=\"s\", T=dtypes.string)\n self.assertEqual(\n str(cm.exception),\n \"Should not specify value for inferred attr 'T' for \"\n \"Polymorphic.\")\n\n def testPolymorphicOut(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"PolymorphicOut\", T=dtypes.int32, name=\"p\")\n self.assertEqual(dtypes.int32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'PolymorphicOut'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"PolymorphicOut\", T=dtypes.bool, name=\"q\")\n self.assertEqual(dtypes.bool, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'PolymorphicOut'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"PolymorphicOut\")\n self.assertEqual(\n str(cm.exception), \"No argument found for attr T for PolymorphicOut\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"PolymorphicOut\", T=None)\n self.assertEqual(str(cm.exception),\n \"Expected DataType for argument 'T' not None.\")\n\n def testPolymorphicDefaultOut(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"PolymorphicDefaultOut\", T=None, name=\"p\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'PolymorphicDefaultOut'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\n \"PolymorphicDefaultOut\", T=dtypes.bool, name=\"q\")\n self.assertEqual(dtypes.bool, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'PolymorphicDefaultOut'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n def testBinary(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Binary\", a=8, b=9, name=\"b\")\n self.assertEqual(dtypes.int32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Binary\", a=\"left\", b=\"right\", name=\"c\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\"Binary\", a=\"left\", b=12)\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\n \"Binary\", a=self.Tensor(dtypes.string), b=self.Tensor(dtypes.int32))\n\n def testRestrict(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"Restrict\", a=\"foo\", name=\"g\")\n self.assertEqual(dtypes.string, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'g' op: 'Restrict' input: 'g/a'\n attr { key: 'T' value { type: DT_STRING } }\n \"\"\", out.op.node_def)\n\n out = op_def_library.apply_op(\"Restrict\", a=True, name=\"h\")\n self.assertEqual(dtypes.bool, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'h' op: 'Restrict' input: 'h/a'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Restrict\", a=17)\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 'a' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testTypeList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"TypeList\", a=[\"foo\"], name=\"z\")\n self.assertProtoEquals(\"\"\"\n name: 'z' op: 'TypeList' input: 'z/a_0'\n attr { key: 'T' value { list { type: DT_STRING } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"TypeList\", a=[True, 12], name=\"y\")\n self.assertProtoEquals(\"\"\"\n name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'\n attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"TypeList\", a=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeList\", a=17)\n self.assertStartsWith(str(cm.exception),\n \"Expected list for 'a' \"\n \"argument to 'TypeList' Op, not \")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeList\", a=[self.Tensor(dtypes.int32), None])\n self.assertStartsWith(str(cm.exception),\n \"Tensors in list passed to 'a' of 'TypeList' Op \"\n \"have types [int32, <NOT CONVERTIBLE TO TENSOR>]\")\n\n def testTypeListTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"TypeListTwice\", a=[\"foo\", True], b=[\"bar\", False], name=\"z\")\n self.assertProtoEquals(\"\"\"\n name: 'z' op: 'TypeListTwice'\n input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'\n attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"TypeListTwice\", a=[], b=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeListTwice\", a=[\"foo\", True], b=[\"bar\", 6])\n self.assertEqual(str(cm.exception),\n \"Input 'b' of 'TypeListTwice' Op has type list of \"\n \"string, int32 that does not match type list \"\n \"string, bool of argument 'a'.\")\n\n def testOutTypeList(self):\n with ops.Graph().as_default():\n out, = op_def_library.apply_op(\n \"OutTypeList\", T=[dtypes.float32], name=\"x\")\n self.assertEqual(dtypes.float32, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'x' op: 'OutTypeList'\n attr { key: 'T' value { list { type: DT_FLOAT } } }\n \"\"\", out.op.node_def)\n\n out1, out2 = op_def_library.apply_op(\n \"OutTypeList\", T=[dtypes.int32, dtypes.bool], name=\"w\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'w' op: 'OutTypeList'\n attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }\n \"\"\", out1.op.node_def)\n\n out = op_def_library.apply_op(\"OutTypeList\", T=[], name=\"empty\")\n self.assertEqual([], out)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"OutTypeList\", T=dtypes.int32)\n self.assertEqual(\n str(cm.exception), \"Expected list for attr T, obtained \"\n \"DType instead.\")\n\n def testTypeListRestrict(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"TypeListRestrict\", a=[\"foo\", False], name=\"v\")\n self.assertProtoEquals(\"\"\"\n name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'\n attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"TypeListRestrict\", a=[True, 12])\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 'a' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testOutTypeListRestrict(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\n \"OutTypeListRestrict\", t=[dtypes.bool, dtypes.string], name=\"u\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.string, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'OutTypeListRestrict'\n attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }\n \"\"\", out1.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"OutTypeListRestrict\", t=[dtypes.string, dtypes.int32])\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 't' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testAttr(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"Attr\", a=12, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"Attr\", a=tensor_shape.Dimension(13), name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\", a=\"bad\")\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'a' not 'bad'.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\", a=[12])\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'a' not [12].\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\", a=None)\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'a' not None.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"Attr\")\n self.assertEqual(\n str(cm.exception), \"No argument found for attr a for \"\n \"Attr\")\n\n def testAttrFloat(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrFloat\", a=1.2, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrFloat\", a=12, name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrFloat\", a=\"bad\")\n self.assertEqual(str(cm.exception),\n \"Expected float for argument 'a' not 'bad'.\")\n\n def testAttrFunc(self):\n with ops.Graph().as_default():\n @function.Defun(dtypes.float32, func_name=\"MyFn\")\n def fn(x):\n return 2 + x\n\n op = op_def_library.apply_op(\"FuncAttr\", f=fn, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'FuncAttr' attr { key: 'f'\n value { func { name: 'MyFn' } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"FuncAttr\", f=3)\n self.assertEqual(str(cm.exception),\n \"Don't know how to convert 3 to a func for argument f\")\n\n def testAttrFuncWithFuncWithAttrs(self):\n with ops.Graph().as_default():\n @eager_function.defun_with_attributes(\n input_signature=(tensor_spec.TensorSpec(None, dtypes.float32),),\n autograph=False,\n attributes={\"_dummy_attr\": 15})\n def fn(x):\n return 2 + x\n\n concrete_fn = fn.get_concrete_function()\n\n op = op_def_library.apply_op(\"FuncAttr\", f=concrete_fn, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'FuncAttr'\n attr {\n key: 'f'\n value {\n func {\n name: '%s'\n attr { key: \"_dummy_attr\" value { i: 15 } }\n }\n }\n }\n \"\"\" % compat.as_str(concrete_fn.name), op.node_def)\n\n def testAttrFuncList(self):\n with ops.Graph().as_default():\n @function.Defun(dtypes.float32, func_name=\"MyFn\")\n def fn1(x):\n return 2 + x\n @function.Defun(dtypes.int32, dtypes.float32, func_name=\"MyFn2\")\n def fn2(x, y):\n return 2 + x, y * 3\n @function.Defun(dtypes.int32, func_name=\"MyFn3\")\n def fn3(y):\n return 2 + y\n\n op = op_def_library.apply_op(\"FuncListAttr\", f=[fn1, fn2, fn3], name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'FuncListAttr'\n attr { key: 'f' value { list { func { name: 'MyFn' }\n func { name: 'MyFn2' }\n func { name: 'MyFn3' } } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"FuncListAttr\", f=[fn1, 3, fn2])\n self.assertEqual(str(cm.exception),\n \"Don't know how to convert 3 to a func for argument f\")\n\n def testAttrBool(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrBool\", a=True, name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrBool\", a=False, name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBool\", a=0)\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not 0.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBool\", a=1)\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not 1.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBool\", a=[])\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not [].\")\n\n def testAttrBoolList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrBoolList\", a=[True, False, True], name=\"t\")\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'AttrBoolList'\n attr { key: 'a' value { list { b: true b: false b:true } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrBoolList\", a=[], name=\"u\")\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"AttrBoolList\", a=[0])\n self.assertEqual(str(cm.exception),\n \"Expected bool for argument 'a' not 0.\")\n\n def testAttrMin(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrMin\", a=12, name=\"s\")\n self.assertProtoEquals(\"\"\"\n name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"AttrMin\", a=2)\n self.assertEqual(str(cm.exception),\n \"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.\")\n\n def testAttrListMin(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrListMin\", a=[1, 2], name=\"r\")\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'AttrListMin'\n attr { key: 'a' value { list { i: 1 i: 2 } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"AttrListMin\", a=[17])\n self.assertEqual(str(cm.exception),\n \"Attr 'a' of 'AttrListMin' Op \"\n \"passed list of length 1 less than minimum 2.\")\n\n def testAttrEnum(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrEnum\", a=\"oranges\", name=\"e\")\n self.assertProtoEquals(\"\"\"\n name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"AttrEnum\", a=\"invalid\")\n self.assertEqual(str(cm.exception),\n 'Attr \\'a\\' of \\'AttrEnum\\' Op '\n 'passed string \\'invalid\\' not in: '\n '\"apples\", \"oranges\".')\n\n def testAttrEnumList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrEnumList\", a=[\"oranges\", \"apples\"], name=\"f\")\n self.assertProtoEquals(\"\"\"\n name: 'f' op: 'AttrEnumList'\n attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\n \"AttrEnumList\", a=[\"apples\", \"invalid\", \"oranges\"])\n self.assertEqual(str(cm.exception),\n 'Attr \\'a\\' of \\'AttrEnumList\\' Op '\n 'passed string \\'invalid\\' not '\n 'in: \"apples\", \"oranges\".')\n\n def testAttrShape(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrShape\", a=[5], name=\"s1\")\n self.assertProtoEquals(\"\"\"\n name: 's1' op: 'AttrShape'\n attr { key: 'a' value { shape { dim { size: 5 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrShape\", a=(4, 3, 2), name=\"s2\")\n self.assertProtoEquals(\"\"\"\n name: 's2' op: 'AttrShape'\n attr { key: 'a' value {\n shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"AttrShape\", a=tensor_shape.TensorShape([3, 2]), name=\"s3\")\n self.assertProtoEquals(\"\"\"\n name: 's3' op: 'AttrShape'\n attr { key: 'a' value {\n shape { dim { size: 3 } dim { size: 2 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrShape\", a=[], name=\"s4\")\n self.assertProtoEquals(\"\"\"\n name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }\n \"\"\", op.node_def)\n\n shape = tensor_shape_pb2.TensorShapeProto()\n shape.dim.add().size = 6\n shape.dim.add().size = 3\n op = op_def_library.apply_op(\"AttrShape\", a=shape, name=\"s5\")\n self.assertProtoEquals(\"\"\"\n name: 's5' op: 'AttrShape'\n attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }\n \"\"\", op.node_def)\n\n # TODO(josh11b): Re-enable this test once we stop promoting scalars to\n # shapes.\n # with self.assertRaises(TypeError) as cm:\n # op_def_library.apply_op(\"AttrShape\", a=5)\n # self.assertEqual(str(cm.exception),\n # \"Don't know how to convert 5 to a TensorShapeProto for\"\n # \" argument 'a'\")\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\"AttrShape\", a=\"ABC\")\n\n def testAttrShapeList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrShapeList\", a=[[3, 2], [6, 5, 4]], name=\"sl\")\n self.assertProtoEquals(\"\"\"\n name: 'sl' op: 'AttrShapeList'\n attr { key: 'a' value { list {\n shape { dim { size: 3 } dim { size: 2 } }\n shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrShapeList\", a=[], name=\"esl\")\n self.assertProtoEquals(\"\"\"\n name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testAttrPartialShape(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrPartialShape\", a=[5], name=\"s1\")\n self.assertProtoEquals(\"\"\"\n name: 's1' op: 'AttrPartialShape'\n attr { key: 'a' value { shape { dim { size: 5 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"AttrPartialShape\", a=(4, None, 2), name=\"s2\")\n self.assertProtoEquals(\"\"\"\n name: 's2' op: 'AttrPartialShape'\n attr { key: 'a' value {\n shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"AttrPartialShape\", a=tensor_shape.TensorShape([3, None]), name=\"s3\")\n self.assertProtoEquals(\"\"\"\n name: 's3' op: 'AttrPartialShape'\n attr { key: 'a' value {\n shape { dim { size: 3 } dim { size: -1 } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrPartialShape\", a=[], name=\"s4\")\n self.assertProtoEquals(\"\"\"\n name: 's4' op: 'AttrPartialShape'\n attr { key: 'a' value { shape { } } }\n \"\"\", op.node_def)\n\n shape = tensor_shape_pb2.TensorShapeProto()\n shape.dim.add().size = -1\n shape.dim.add().size = 3\n op = op_def_library.apply_op(\"AttrPartialShape\", a=shape, name=\"s5\")\n self.assertProtoEquals(\"\"\"\n name: 's5' op: 'AttrPartialShape'\n attr { key: 'a' value {\n shape { dim { size: -1 } dim { size: 3 } } } }\n \"\"\", op.node_def)\n\n # TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.\n # with self.assertRaises(TypeError) as cm:\n # op_def_library.apply_op(\"AttrPartialShape\", a=5)\n # self.assertEqual(str(cm.exception),\n # \"Don't know how to convert 5 to a TensorShapeProto for\"\n # \" argument 'a'\")\n\n with self.assertRaises(TypeError):\n op_def_library.apply_op(\"AttrPartialShape\", a=\"ABC\")\n\n def testAttrPartialShapeList(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"AttrPartialShapeList\", a=[[3, 2], [6, None, 4]], name=\"sl\")\n self.assertProtoEquals(\"\"\"\n name: 'sl' op: 'AttrPartialShapeList'\n attr { key: 'a' value { list {\n shape { dim { size: 3 } dim { size: 2 } }\n shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrPartialShapeList\", a=[], name=\"esl\")\n self.assertProtoEquals(\"\"\"\n name: 'esl' op: 'AttrPartialShapeList' attr {\n key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testAttrDefault(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrDefault\", a=None, name=\"d\")\n self.assertProtoEquals(\"\"\"\n name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrDefault\", a=\"kiwi\", name=\"c\")\n self.assertProtoEquals(\"\"\"\n name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }\n \"\"\", op.node_def)\n\n def testAttrListDefault(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrListDefault\", a=None, name=\"b\")\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'AttrListDefault'\n attr { key: 'a' value { list { i: 5 i: 15 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrListDefault\", a=[3], name=\"a\")\n self.assertProtoEquals(\"\"\"\n name: 'a' op: 'AttrListDefault'\n attr { key: 'a' value { list { i: 3 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrListDefault\", a=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'AttrListDefault'\n attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testAttrEmptyListDefault(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"AttrEmptyListDefault\", a=None, name=\"b\")\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'AttrEmptyListDefault'\n attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrEmptyListDefault\", a=[3], name=\"a\")\n self.assertProtoEquals(\"\"\"\n name: 'a' op: 'AttrEmptyListDefault'\n attr { key: 'a' value { list { f: 3 } } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"AttrEmptyListDefault\", a=[], name=\"empty\")\n self.assertProtoEquals(\"\"\"\n name: 'empty' op: 'AttrEmptyListDefault'\n attr { key: 'a' value { list { } } }\n \"\"\", op.node_def)\n\n def testReservedAttr(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"ReservedAttr\", range_=7, name=\"x\")\n self.assertProtoEquals(\"\"\"\n name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }\n \"\"\", op.node_def)\n\n def testDefaultAttrType(self):\n with ops.Graph().as_default():\n # Give an input whose type has no obvious output type.\n op = op_def_library.apply_op(\"AttrTypeDefault\", a=[], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'AttrTypeDefault' input: 'n/a'\n attr { key: 'T' value { type: DT_INT32 } }\n \"\"\", op.node_def)\n\n # Give an input whose type can be inferred as different\n # than the default.\n op = op_def_library.apply_op(\"AttrTypeDefault\", a=[1.0], name=\"f\")\n self.assertProtoEquals(\"\"\"\n name: 'f' op: 'AttrTypeDefault' input: 'f/a'\n attr { key: 'T' value { type: DT_FLOAT } }\n \"\"\", op.node_def)\n\n def testDefaultListAttrType(self):\n with ops.Graph().as_default():\n # Give an input whose type can be inferred as different\n # than the default.\n op = op_def_library.apply_op(\n \"AttrListTypeDefault\", a=[1.0], b=[2.0], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 1 } }\n \"\"\", op.node_def)\n\n def testNIntsIn(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"NIntsIn\", a=[1, 2], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"NIntsIn\", a=[5, 4, 3, 2, 1], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NIntsIn'\n input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'\n attr { key: 'N' value { i: 5 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=[\"foo\", \"bar\"])\n self.assertEqual(\n str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op have types \"\n \"[string, string] that do not match expected type int32.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NIntsIn\",\n a=[self.Tensor(dtypes.string),\n self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op have \"\n \"types [string, string] that do not match expected type \"\n \"int32.\")\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=[99])\n self.assertEqual(str(cm.exception),\n \"List argument 'a' to 'NIntsIn' Op \"\n \"with length 1 shorter than \"\n \"minimum length 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=[38, \"bar\"])\n self.assertEqual(\n str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op have types \"\n \"[int32, string] that do not match expected type int32.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NIntsIn\",\n a=[self.Tensor(dtypes.int32),\n self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NIntsIn' Op \"\n \"have types [int32, string] that do not match expected \"\n \"type int32.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsIn\", a=17)\n self.assertStartsWith(str(cm.exception),\n \"Expected list for 'a' argument \"\n \"to 'NIntsIn' Op, not \")\n\n def testNPolymorphicIn(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\"NPolymorphicIn\", a=[1, 2], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicIn\", a=[5, 4, 3, 2, 1], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NPolymorphicIn'\n input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 5 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"NPolymorphicIn\", a=[\"foo\", \"bar\"], name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicIn\",\n a=[1, self.Tensor(dtypes.float32, name=\"x\")],\n name=\"q\")\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicIn\",\n a=[\n self.Tensor(dtypes.float32, name=\"y\"),\n self.Tensor(dtypes.float32_ref, name=\"z\")\n ],\n name=\"r\")\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=[99])\n self.assertEqual(str(cm.exception),\n \"List argument 'a' to 'NPolymorphicIn' Op with length 1 \"\n \"shorter than minimum length 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=[38, \"bar\"])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [int32, string] that don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NPolymorphicIn\", a=[38, self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [int32, string] that don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=[38, None])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that \"\n \"don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NPolymorphicIn\", a=[\"abcd\", self.Tensor(dtypes.int32)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'a' of 'NPolymorphicIn' Op \"\n \"have types [string, int32] that don't all match.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicIn\", a=17)\n self.assertStartsWith(str(cm.exception),\n \"Expected list for 'a' argument \"\n \"to 'NPolymorphicIn' Op, not \")\n\n def testNPolymorphicRestrictIn(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NPolymorphicRestrictIn\", a=[\"foo\", \"bar\"], name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NPolymorphicRestrictIn\", a=[False, True, False], name=\"b\")\n self.assertProtoEquals(\"\"\"\n name: 'b' op: 'NPolymorphicRestrictIn'\n input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicRestrictIn\", a=[1, 2])\n self.assertEqual(\n str(cm.exception),\n \"Value passed to parameter 'a' has DataType int32 not in \"\n \"list of allowed values: string, bool\")\n\n def testNInTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NInTwice\", a=[1, 2], b=[\"one\", \"two\"], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NInTwice'\n input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"NInTwice\", a=[], b=[], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NInTwice\", a=[1, 2, 3], b=[\"too short\"])\n self.assertEqual(str(cm.exception),\n \"List argument 'b' to 'NInTwice' Op \"\n \"with length 1 must match \"\n \"length 3 of argument 'a'.\")\n\n def testNInPolymorphicTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NInPolymorphicTwice\", a=[1, 2], b=[3, 4], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NInPolymorphicTwice'\n input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NInPolymorphicTwice\", a=[1, 2, 3], b=[5])\n self.assertEqual(str(cm.exception),\n \"List argument 'b' to 'NInPolymorphicTwice' Op \"\n \"with length 1 \"\n \"must match length 3 of argument 'a'.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NInPolymorphicTwice\", a=[1, 2], b=[\"one\", \"two\"])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'b' of 'NInPolymorphicTwice' \"\n \"Op have types [string, string] that do not match type \"\n \"int32 inferred from earlier arguments.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"NInPolymorphicTwice\",\n a=[self.Tensor(dtypes.int32)],\n b=[self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'b' of \"\n \"'NInPolymorphicTwice' Op have types [string] that do \"\n \"not match type int32 inferred from earlier arguments.\")\n\n def testNInTwoTypeVariables(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"NInTwoTypeVariables\", a=[1, 2], b=[True, False], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NInTwoTypeVariables'\n input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'\n attr { key: 'S' value { type: DT_INT32 } }\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NInTwoTypeVariables\", a=[1, 2], b=[3, 4], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NInTwoTypeVariables'\n input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'\n attr { key: 'S' value { type: DT_INT32 } }\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"NInTwoTypeVariables\",\n a=[self.Tensor(dtypes.int32, name=\"q\")],\n b=[self.Tensor(dtypes.string, name=\"r\")],\n name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'\n attr { key: 'S' value { type: DT_INT32 } }\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 1 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NInTwoTypeVariables\", a=[1, 2, 3], b=[\"5\"])\n self.assertEqual(str(cm.exception),\n \"List argument 'b' to 'NInTwoTypeVariables' Op \"\n \"with length 1 \"\n \"must match length 3 of argument 'a'.\")\n\n def testInPolymorphicTwice(self):\n with ops.Graph().as_default():\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[8], b=[3, 4, 5], name=\"n\")\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'InPolymorphicTwice'\n input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 1 } }\n attr { key: 'M' value { i: 3 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\"InPolymorphicTwice\", a=[8], b=[], name=\"o\")\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 1 } }\n attr { key: 'M' value { i: 0 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[], b=[3, 4], name=\"p\")\n self.assertProtoEquals(\"\"\"\n name: 'p' op: 'InPolymorphicTwice' input: 'p/b_0' input: 'p/b_1'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 0 } }\n attr { key: 'M' value { i: 2 } }\n \"\"\", op.node_def)\n\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[], b=[3.0, 4.0], name=\"q\")\n self.assertProtoEquals(\"\"\"\n name: 'q' op: 'InPolymorphicTwice' input: 'q/b_0' input: 'q/b_1'\n attr { key: 'T' value { type: DT_FLOAT } }\n attr { key: 'N' value { i: 0 } }\n attr { key: 'M' value { i: 2 } }\n \"\"\", op.node_def)\n\n # Empty input lists: assume default type for T.\n op = op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[], b=[], name=\"r\")\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'InPolymorphicTwice'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 0 } }\n attr { key: 'M' value { i: 0 } }\n \"\"\", op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"InPolymorphicTwice\", a=[1, 2], b=[\"one\", \"two\"])\n self.assertEqual(\n str(cm.exception),\n \"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op \"\n \"have types [string, string] that do not match type int32 \"\n \"inferred from earlier arguments.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\n \"InPolymorphicTwice\",\n a=[self.Tensor(dtypes.int32)],\n b=[self.Tensor(dtypes.string)])\n self.assertEqual(str(cm.exception),\n \"Tensors in list passed to 'b' of 'InPolymorphicTwice' \"\n \"Op have types [string] that do not match type int32 \"\n \"inferred from earlier arguments.\")\n\n def testNIntsOut(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\"NIntsOut\", N=2, name=\"n\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3, out4, out5 = op_def_library.apply_op(\n \"NIntsOut\", N=5, name=\"o\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertEqual(dtypes.int32, out3.dtype)\n self.assertEqual(dtypes.int32, out4.dtype)\n self.assertEqual(dtypes.int32, out5.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }\n \"\"\", out5.op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NIntsOut\", N=1)\n self.assertEqual(\n str(cm.exception),\n \"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NIntsOut\", N=[3])\n self.assertEqual(str(cm.exception),\n \"Expected int for argument 'N' not [3].\")\n\n def testNIntsOutDefault(self):\n with ops.Graph().as_default():\n out1, out2, out3 = op_def_library.apply_op(\n \"NIntsOutDefault\", N=None, name=\"z\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertEqual(dtypes.int32, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n out1, out2 = op_def_library.apply_op(\"NIntsOutDefault\", N=2, name=\"y\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }\n \"\"\", out2.op.node_def)\n\n def testNPolymorphicOut(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\n \"NPolymorphicOut\", N=2, T=dtypes.int32, name=\"n\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'n' op: 'NPolymorphicOut'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicOut\", T=dtypes.string, N=3, name=\"o\")\n self.assertEqual(dtypes.string, out1.dtype)\n self.assertEqual(dtypes.string, out2.dtype)\n self.assertEqual(dtypes.string, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'NPolymorphicOut'\n attr { key: 'T' value { type: DT_STRING } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out3.op.node_def)\n\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"NPolymorphicOut\", N=1, T=dtypes.string)\n self.assertEqual(str(cm.exception),\n \"Attr 'N' of 'NPolymorphicOut' Op \"\n \"passed 1 less than minimum 2.\")\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicOut\", N=3, T=[dtypes.string])\n self.assertEqual(\n str(cm.exception),\n \"Expected DataType for argument 'T' not [tf.string].\")\n\n def testNPolymorphicOutDefault(self):\n with ops.Graph().as_default():\n out1, out2 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=None, T=None, name=\"r\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'r' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=3, T=None, name=\"s\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertEqual(dtypes.bool, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 's' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n out1, out2 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=None, T=dtypes.int32, name=\"t\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertProtoEquals(\"\"\"\n name: 't' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 2 } }\n \"\"\", out1.op.node_def)\n\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicOutDefault\", N=3, T=dtypes.int32, name=\"u\")\n self.assertEqual(dtypes.int32, out1.dtype)\n self.assertEqual(dtypes.int32, out2.dtype)\n self.assertEqual(dtypes.int32, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'NPolymorphicOutDefault'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n def testNPolymorphicRestrictOut(self):\n with ops.Graph().as_default():\n out1, out2, out3 = op_def_library.apply_op(\n \"NPolymorphicRestrictOut\", N=3, T=dtypes.bool, name=\"u\")\n self.assertEqual(dtypes.bool, out1.dtype)\n self.assertEqual(dtypes.bool, out2.dtype)\n self.assertEqual(dtypes.bool, out3.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'u' op: 'NPolymorphicRestrictOut'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: 'N' value { i: 3 } }\n \"\"\", out1.op.node_def)\n\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"NPolymorphicRestrictOut\", N=2, T=dtypes.int32)\n self.assertEqual(str(cm.exception),\n \"Value passed to parameter 'T' has DataType int32 \"\n \"not in list of allowed values: string, bool\")\n\n def testRef(self):\n with ops.Graph().as_default():\n out = op_def_library.apply_op(\"RefOut\", T=dtypes.bool, name=\"o\")\n self.assertEqual(dtypes.bool_ref, out.dtype)\n self.assertProtoEquals(\"\"\"\n name: 'o' op: 'RefOut'\n attr { key: 'T' value { type: DT_BOOL } }\n \"\"\", out.op.node_def)\n\n op = op_def_library.apply_op(\"RefIn\", a=out, name=\"i\")\n self.assertProtoEquals(\"\"\"\n name: 'i' op: 'RefIn' input: 'o'\n attr { key: 'T' value { type: DT_BOOL } }\n attr { key: \"_class\" value { list { s: \"loc:@o\" } } }\n \"\"\", op.node_def)\n\n # Can pass ref to non-ref input.\n out = op_def_library.apply_op(\"RefOut\", T=dtypes.int32, name=\"r\")\n out = op_def_library.apply_op(\"Simple\", a=out, name=\"s\")\n self.assertProtoEquals(\"\"\"\n name: 's' op: 'Simple' input: 'r'\n \"\"\", out.op.node_def)\n\n # Can't pass non-ref to ref input.\n with self.assertRaises(TypeError) as cm:\n op_def_library.apply_op(\"RefIn\", a=2)\n self.assertEqual(\n str(cm.exception),\n \"'RefIn' Op requires that input 'a' be a mutable tensor \" +\n \"(e.g.: a tf.Variable)\")\n\n input_a = op_def_library.apply_op(\"RefOut\", T=dtypes.int32, name=\"t\")\n input_b = op_def_library.apply_op(\"RefOut\", T=dtypes.int32, name=\"u\")\n op = op_def_library.apply_op(\"TwoRefsIn\", a=input_a, b=input_b, name=\"v\")\n # NOTE(mrry): The order of colocation constraints is an implementation\n # detail.\n self.assertProtoEquals(\"\"\"\n name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'\n attr { key: 'T' value { type: DT_INT32 } }\n attr { key: \"_class\" value { list { s: \"loc:@t\" s: \"loc:@u\" } } }\n \"\"\", op.node_def)\n\n def testSpecifyDevice(self):\n graph = ops.Graph()\n with graph.as_default():\n with graph.device(\"/job:ADevice\"):\n op_def_library.apply_op(\"Simple\", a=3)\n # We look at the whole graph here to make sure the Const op is also given\n # the specified device.\n graph_def = graph.as_graph_def()\n self.assertEqual(len(graph_def.node), 2)\n for node in graph_def.node:\n self.assertDeviceEqual(node.device, \"/job:ADevice\")\n\n def testStructuredOutputSingleList(self):\n with ops.Graph().as_default():\n for n_a in [0, 1, 3]:\n a = op_def_library.apply_op(\"SimpleStruct\", n_a=n_a)\n self.assertIsInstance(a, list)\n self.assertEqual(n_a, len(a))\n\n def testStructuredOutputListAndSingle(self):\n with ops.Graph().as_default():\n for n_a in [0, 1, 3]:\n a, b = op_def_library.apply_op(\"MixedStruct\", n_a=n_a)\n self.assertIsInstance(a, list)\n self.assertEqual(n_a, len(a))\n self.assertTrue(all(x.dtype == dtypes.int32 for x in a))\n self.assertIsInstance(b, ops.Tensor)\n self.assertEqual(dtypes.float32, b.dtype)\n\n def testStructuredOutputMultipleLists(self):\n with ops.Graph().as_default():\n for n_a in [0, 1, 3]:\n for n_b in [0, 1, 3]:\n for t_c in [[],\n [dtypes.int32],\n [dtypes.int32, dtypes.float32]]:\n a, b, c = op_def_library.apply_op(\n \"ComplexStruct\", n_a=n_a, n_b=n_b, t_c=t_c)\n\n self.assertEqual(n_a, len(a))\n self.assertTrue(all(x.dtype == dtypes.int32 for x in a))\n self.assertEqual(n_b, len(b))\n self.assertTrue(all(x.dtype == dtypes.int64 for x in b))\n self.assertEqual(t_c, [x.dtype for x in c])\n\n\nclass OpDefLibraryGraphTest(test_util.TensorFlowTestCase):\n\n def testNoGraph(self):\n out = op_def_library.apply_op(\"Simple\", a=3)\n self.assertEqual(out.graph, ops.get_default_graph())\n\n def testDefaultGraph(self):\n graph = ops.Graph()\n with graph.as_default():\n out = op_def_library.apply_op(\"Simple\", a=3)\n self.assertEqual(out.graph, graph)\n\n def testDifferentGraphFails(self):\n with ops.Graph().as_default():\n a = op_def_library.apply_op(\"Simple\", a=3)\n with ops.Graph().as_default():\n b = op_def_library.apply_op(\"Simple\", a=4)\n with self.assertRaises(ValueError) as cm:\n op_def_library.apply_op(\"Binary\", a=a, b=b)\n self.assertIn(\"must be from the same graph\", str(cm.exception))\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"
] | [
[
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.test_util.run_v1_only",
"numpy.ones",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.sparse_tensor.SparseTensorSpec",
"tensorflow.python.framework.sparse_tensor.SparseTensor.from_value",
"tensorflow.python.ops.sparse_ops.sparse_transpose",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.ops.sparse_ops.sparse_tensor_dense_matmul",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.platform.googletest.main",
"numpy.array",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense"
],
[
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.ops.device"
],
[
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.training.server_lib.ClusterSpec"
],
[
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.tpu.tpu.outside_compilation",
"tensorflow.python.tpu.tpu.initialize_system_for_tpu_embedding",
"tensorflow.python.ops.sparse_ops.sparse_slice",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.saved_model.save_context.in_save_context",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.tpu.ops.tpu_ops.is_tpu_embedding_initialized",
"tensorflow.python.util.tf_inspect.getargspec",
"tensorflow.python.distribute.distribute_utils.select_replica",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.training.tracking.base.ShardInfo",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.util.nest.is_nested_or_composite",
"tensorflow.python.ops.embedding_ops.embedding_lookup_sparse_v2",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.embedding_ops.safe_embedding_lookup_sparse_v2",
"tensorflow.python.util.nest.flatten_with_joined_string_paths",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.ops.embedding_ops.embedding_lookup_v2",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.embedding_ops.embedding_lookup_ragged",
"tensorflow.python.tpu.tpu_embedding_v2_utils.log_tpu_embedding_configuration",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.core.protobuf.tpu.tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.tpu.ops.tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch",
"tensorflow.python.util.nest.pack_sequence_as"
],
[
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.util.compat.as_str",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.framework.op_def_library.apply_op",
"tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto"
]
] |
ChenjunZou/katib | [
"6a07daae796c29d24f63375cce71b75c4eee8d9c"
] | [
"examples/v1alpha3/nas/darts-cnn-cifar10/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom operations import FactorizedReduce, StdConv, MixedOp\n\n\nclass Cell(nn.Module):\n \"\"\" Cell for search\n Each edge is mixed and continuous relaxed.\n \"\"\"\n\n def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):\n \"\"\"\n Args:\n num_nodes: Number of intermediate cell nodes\n c_prev_prev: channels_out[k-2]\n c_prev : Channels_out[k-1]\n c_cur : Channels_in[k] (current)\n reduction_prev: flag for whether the previous cell is reduction cell or not\n reduction_cur: flag for whether the current cell is reduction cell or not\n \"\"\"\n\n super(Cell, self).__init__()\n self.reduction_cur = reduction_cur\n self.num_nodes = num_nodes\n\n # If previous cell is reduction cell, current input size does not match with\n # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing\n if reduction_prev:\n self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)\n else:\n self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)\n self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)\n\n # Generate dag from mixed operations\n self.dag_ops = nn.ModuleList()\n\n for i in range(self.num_nodes):\n self.dag_ops.append(nn.ModuleList())\n # Include 2 input nodes\n for j in range(2+i):\n # Reduction with stride = 2 must be only for the input node\n stride = 2 if reduction_cur and j < 2 else 1\n op = MixedOp(c_cur, stride, search_space)\n self.dag_ops[i].append(op)\n\n def forward(self, s0, s1, w_dag):\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n for edges, w_list in zip(self.dag_ops, w_dag):\n state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))\n states.append(state_cur)\n\n state_out = torch.cat(states[2:], dim=1)\n return state_out\n\n\nclass NetworkCNN(nn.Module):\n\n def __init__(self, init_channels, input_channels, num_classes,\n num_layers, criterion, search_space, num_nodes, stem_multiplier):\n super(NetworkCNN, self).__init__()\n\n self.init_channels = init_channels\n self.num_classes = num_classes\n self.num_layers = num_layers\n self.criterion = criterion\n\n # TODO: Algorithm settings?\n self.num_nodes = num_nodes\n self.stem_multiplier = stem_multiplier\n\n c_cur = self.stem_multiplier*self.init_channels\n\n self.stem = nn.Sequential(\n nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),\n nn.BatchNorm2d(c_cur)\n )\n\n # In first Cell stem is used for s0 and s1\n # c_prev_prev and c_prev - output channels size\n # c_cur - init channels size\n c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels\n\n self.cells = nn.ModuleList()\n\n reduction_prev = False\n for i in range(self.num_layers):\n # For Network with 1 layer: Only Normal Cell\n if self.num_layers == 1:\n reduction_cur = False\n else:\n # For Network with two layers: First layer - Normal, Second - Reduction\n # For Other Networks: [1/3, 2/3] Layers - Reduction cell with double channels\n # Others - Normal cell\n if ((self.num_layers == 2 and i == 1) or\n (self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):\n c_cur *= 2\n reduction_cur = True\n else:\n reduction_cur = False\n\n cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)\n reduction_prev = reduction_cur\n self.cells.append(cell)\n\n c_cur_out = c_cur * self.num_nodes\n c_prev_prev, c_prev = c_prev, c_cur_out\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(c_prev, self.num_classes)\n\n # Initialize alphas parameters\n num_ops = len(search_space.primitives)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n for i in range(self.num_nodes):\n self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))\n if self.num_layers > 1:\n self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))\n\n # Setup alphas list\n self.alphas = []\n for name, parameter in self.named_parameters():\n if \"alpha\" in name:\n self.alphas.append((name, parameter))\n\n def forward(self, x):\n\n weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]\n weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]\n\n s0 = s1 = self.stem(x)\n\n for cell in self.cells:\n weights = weights_reduce if cell.reduction_cur else weights_normal\n s0, s1 = s1, cell(s0, s1, weights)\n\n out = self.global_pooling(s1)\n\n # Make out flatten\n out = out.view(out.size(0), -1)\n\n logits = self.classifier(out)\n return logits\n\n def print_alphas(self):\n\n print(\"\\n>>> Alphas Normal <<<\")\n for alpha in self.alpha_normal:\n print(F.softmax(alpha, dim=-1))\n\n if self.num_layers > 1:\n print(\"\\n>>> Alpha Reduce <<<\")\n for alpha in self.alpha_reduce:\n print(F.softmax(alpha, dim=-1))\n print(\"\\n\")\n\n def getWeights(self):\n return self.parameters()\n\n def getAlphas(self):\n for _, parameter in self.alphas:\n yield parameter\n\n def loss(self, x, y):\n logits = self.forward(x)\n return self.criterion(logits, y)\n\n def genotype(self, search_space):\n gene_normal = search_space.parse(self.alpha_normal, k=2)\n gene_reduce = search_space.parse(self.alpha_reduce, k=2)\n # concat all intermediate nodes\n concat = range(2, 2 + self.num_nodes)\n\n return search_space.genotype(normal=gene_normal, normal_concat=concat,\n reduce=gene_reduce, reduce_concat=concat)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.randn",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.softmax",
"torch.nn.ParameterList",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.cat"
]
] |
enourbakhsh/skylink | [
"83270f3351ff637abeb0af25786412d4dd09134a"
] | [
"tests/test_networkx.py"
] | [
"import os\nimport skylink\nfrom skylink import testing\nimport numpy as np\nfrom astropy.table import Table\nimport FoFCatalogMatching\nimport pytest # noqa\n\n# TODO: test the matching with more than two catalogs\n# TODO: test N-way matching with `linking_lengths` as a dictionary\n# TODO: test if we catch illegal footprints that are not gnomonic-projectable\n# TODO: test MPI implementation\n# TODO: test a wide range of linking lengths\n\ngraph_lib = \"networkx\"\nncpus_max = os.cpu_count() # maximum number of cpus\nlinking_lengths_default = 0.75 # arcsec\nn = 2_000 # number of objects for the mock-up data\n\n\ndef make_mockup():\n def tnormal(mu=None, sigma=None, n=None, lower=-0.5, upper=0.5):\n return np.clip(np.random.normal(np.repeat(mu, n), sigma), lower, upper)\n\n np.random.seed(2)\n ra = np.random.uniform(4, 6, n)\n dec = np.random.uniform(-1, 1, n)\n\n cat_a = Table({\"ra\": ra, \"dec\": dec})\n cat_b = Table(\n {\n \"ra\": np.append(ra + tnormal(0, 0.0004, n), ra + tnormal(0, 0.0001, n)),\n \"dec\": np.append(dec + tnormal(0, 0.0002, n), dec + tnormal(0, 0.0002, n)),\n }\n )\n\n return cat_a, cat_b\n\n\ndef run_FoFCatalogMatching(cat_a, cat_b, return_pandas=False):\n \"\"\" Genetare an output using `FoFCatalogMatching` as our benchmark \"\"\"\n res_fcm = FoFCatalogMatching.match(\n {\"a\": cat_a, \"b\": cat_b}, linking_lengths_default\n )\n if return_pandas:\n return res_fcm.to_pandas()\n else:\n return res_fcm\n\n\ndef test_graph_lib():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl = skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n testing.assert_equal(res_fcm, res_sl)\n\n\ndef run_with_ncpus(cat_a, cat_b, ncpus):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n nprocs=ncpus,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_nprocs():\n # TODO: test equality with more than 2 catalogs\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_ncpus(cat_a, cat_b, 1)\n res_sl2 = run_with_ncpus(cat_a, cat_b, 2)\n res_sl3 = run_with_ncpus(cat_a, cat_b, ncpus_max)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n testing.assert_equal(res_sl2, res_sl3)\n\n\ndef run_with_overlap(cat_a, cat_b, overlap):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n overlap=overlap,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_overlap():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_overlap(cat_a, cat_b, 1.0)\n res_sl2 = run_with_overlap(cat_a, cat_b, 1.1)\n res_sl3 = run_with_overlap(cat_a, cat_b, 1.2)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n testing.assert_equal(res_sl2, res_sl3)\n\n\ndef run_with_linked_mask(cat_a, cat_b, use_linked_mask):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n use_linked_mask=use_linked_mask,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n )\n\n\n@pytest.mark.skip(\n reason=\"FIXME: The `networkx` graph library does not give the right results with use_linked_mask=True\"\n)\ndef test_linked_mask():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_linked_mask(cat_a, cat_b, True)\n res_sl2 = run_with_linked_mask(cat_a, cat_b, False)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n\n\ndef run_with_order(cat_a, cat_b, reverse=False):\n cats = {\"b\": cat_b, \"a\": cat_a} if reverse else {\"a\": cat_a, \"b\": cat_b}\n return skylink.match(\n cats,\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_cat_orders():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_order(cat_a, cat_b, False)\n res_sl2 = run_with_order(cat_a, cat_b, True)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n\n\ndef run_with_sort(cat_a, cat_b, sort):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n sort=sort,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_sort():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl1 = run_with_sort(cat_a, cat_b, True)\n res_sl2 = run_with_sort(cat_a, cat_b, False)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n\n\ndef run_with_storekdtree(cat_a, cat_b, storekdtree):\n return skylink.match(\n {\"a\": cat_a, \"b\": cat_b},\n linking_lengths=linking_lengths_default,\n graph_lib=graph_lib,\n storekdtree=storekdtree,\n nprocs=ncpus_max,\n silent=True,\n return_pandas=True,\n use_linked_mask=False,\n )\n\n\ndef test_storekdtree():\n cat_a, cat_b = make_mockup()\n res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)\n res_sl2 = run_with_storekdtree(cat_a, cat_b, False)\n res_sl1 = run_with_storekdtree(cat_a, cat_b, True)\n testing.assert_equal(res_fcm, res_sl1)\n testing.assert_equal(res_sl1, res_sl2)\n"
] | [
[
"numpy.random.uniform",
"numpy.random.seed",
"numpy.repeat"
]
] |
radiantprism/StarCraft-2 | [
"1f159ae84feaed17c5e0bd70e272c06992ae0c48"
] | [
"pysc2/lib/features_test.py"
] | [
"#!/usr/bin/python\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for features.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport pickle\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom future.builtins import range # pylint: disable=redefined-builtin\nimport numpy\nimport six\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nfrom pysc2.lib import point\n\nfrom google.protobuf import text_format\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\n\n\n# Heavily trimmed, so this is useful for testing actions, but not observations.\nobservation_text_proto = \"\"\"\nplayer_common {\n player_id: 1\n minerals: 0\n vespene: 0\n food_cap: 10\n food_used: 0\n food_army: 0\n food_workers: 0\n idle_worker_count: 0\n army_count: 0\n warp_gate_count: 0\n larva_count: 0\n}\ngame_loop: 20\n\"\"\"\n\n\nRECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))\nSQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)\n\n\nclass AvailableActionsTest(absltest.TestCase):\n\n always_expected = {\n \"no_op\", \"move_camera\", \"select_point\", \"select_rect\",\n \"select_control_group\"\n }\n\n def setUp(self):\n super(AvailableActionsTest, self).setUp()\n self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())\n self.hideSpecificActions(True)\n\n def hideSpecificActions(self, hide_specific_actions): # pylint: disable=invalid-name\n self.features = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n hide_specific_actions=hide_specific_actions))\n\n def assertAvail(self, expected):\n actual = self.features.available_actions(self.obs)\n actual_names = {actions.FUNCTIONS[i].name for i in actual}\n self.assertEqual(actual_names, set(expected) | self.always_expected)\n\n def testAlways(self):\n self.assertAvail([])\n\n def testSelectUnit(self):\n self.obs.ui_data.multi.units.add(unit_type=1)\n self.assertAvail([\"select_unit\"])\n\n def testSelectIdleWorkder(self):\n self.obs.player_common.idle_worker_count = 1\n self.assertAvail([\"select_idle_worker\"])\n\n def testSelectArmy(self):\n self.obs.player_common.army_count = 3\n self.assertAvail([\"select_army\"])\n\n def testSelectWarpGates(self):\n self.obs.player_common.warp_gate_count = 1\n self.assertAvail([\"select_warp_gates\"])\n\n def testSelectLarva(self):\n self.obs.player_common.larva_count = 2\n self.assertAvail([\"select_larva\"])\n\n def testQuick(self):\n self.obs.abilities.add(ability_id=32)\n self.assertAvail([\"Effect_Salvage_quick\"])\n\n def testScreen(self):\n self.obs.abilities.add(ability_id=326, requires_point=True)\n self.assertAvail([\"Build_SensorTower_screen\"])\n\n def testScreenMinimap(self):\n self.obs.abilities.add(ability_id=17, requires_point=True)\n self.assertAvail([\"Patrol_screen\", \"Patrol_minimap\"])\n\n def testScreenAutocast(self):\n self.obs.abilities.add(ability_id=386, requires_point=True)\n self.assertAvail([\"Effect_Heal_screen\", \"Effect_Heal_autocast\"])\n\n def testScreenQuick(self):\n a = self.obs.abilities.add(ability_id=421)\n\n self.hideSpecificActions(True)\n a.requires_point = False\n self.assertAvail([\"Build_TechLab_quick\"])\n a.requires_point = True\n self.assertAvail([\"Build_TechLab_screen\"])\n\n self.hideSpecificActions(False)\n a.requires_point = False\n self.assertAvail([\"Build_TechLab_Barracks_quick\", \"Build_TechLab_quick\"])\n a.requires_point = True\n self.assertAvail([\"Build_TechLab_Barracks_screen\", \"Build_TechLab_screen\"])\n\n def testGeneral(self):\n self.obs.abilities.add(ability_id=1374)\n self.hideSpecificActions(False)\n self.assertAvail([\"BurrowDown_quick\", \"BurrowDown_Baneling_quick\"])\n self.hideSpecificActions(True)\n self.assertAvail([\"BurrowDown_quick\"])\n\n def testGeneralType(self):\n a = self.obs.abilities.add(ability_id=1376)\n self.hideSpecificActions(False)\n self.assertAvail([\"BurrowUp_quick\", \"BurrowUp_Baneling_quick\",\n \"BurrowUp_autocast\", \"BurrowUp_Baneling_autocast\"])\n self.hideSpecificActions(True)\n self.assertAvail([\"BurrowUp_quick\", \"BurrowUp_autocast\"])\n\n a.ability_id = 2110\n self.hideSpecificActions(False)\n self.assertAvail([\"BurrowUp_quick\", \"BurrowUp_Lurker_quick\"])\n self.hideSpecificActions(True)\n self.assertAvail([\"BurrowUp_quick\"])\n\n def testMany(self):\n add = [\n (23, True), # Attack\n (318, True), # Build_CommandCenter\n (320, True), # Build_Refinery\n (319, True), # Build_SupplyDepot\n (316, True), # Effect_Repair_SCV\n (295, True), # Harvest_Gather_SCV\n (16, True), # Move\n (17, True), # Patrol\n (4, False), # Stop\n ]\n for a, r in add:\n self.obs.abilities.add(ability_id=a, requires_point=r)\n self.hideSpecificActions(False)\n self.assertAvail([\n \"Attack_Attack_minimap\",\n \"Attack_Attack_screen\",\n \"Attack_minimap\",\n \"Attack_screen\",\n \"Build_CommandCenter_screen\",\n \"Build_Refinery_screen\",\n \"Build_SupplyDepot_screen\",\n \"Effect_Repair_screen\",\n \"Effect_Repair_autocast\",\n \"Effect_Repair_SCV_autocast\",\n \"Effect_Repair_SCV_screen\",\n \"Harvest_Gather_screen\",\n \"Harvest_Gather_SCV_screen\",\n \"Move_minimap\",\n \"Move_screen\",\n \"Move_Move_minimap\",\n \"Move_Move_screen\",\n \"Patrol_minimap\",\n \"Patrol_screen\",\n \"Patrol_Patrol_minimap\",\n \"Patrol_Patrol_screen\",\n \"Stop_quick\",\n \"Stop_Stop_quick\"\n ])\n self.hideSpecificActions(True)\n self.assertAvail([\n \"Attack_minimap\",\n \"Attack_screen\",\n \"Build_CommandCenter_screen\",\n \"Build_Refinery_screen\",\n \"Build_SupplyDepot_screen\",\n \"Effect_Repair_screen\",\n \"Effect_Repair_autocast\",\n \"Harvest_Gather_screen\",\n \"Move_minimap\",\n \"Move_screen\",\n \"Patrol_minimap\",\n \"Patrol_screen\",\n \"Stop_quick\",\n ])\n\n\nclass ToPointTest(absltest.TestCase):\n\n def testIntAsString(self):\n value = features._to_point(\"32\")\n self.assertEqual(value, point.Point(32, 32))\n\n def testIntStringTwoTuple(self):\n value = features._to_point((\"32\", 64))\n self.assertEqual(value, point.Point(32, 64))\n\n def testNoneInputReturnsNoneOutput(self):\n with self.assertRaises(AssertionError):\n features._to_point(None)\n\n def testNoneAsFirstElementOfTupleRaises(self):\n with self.assertRaises(TypeError):\n features._to_point((None, 32))\n\n def testNoneAsSecondElementOfTupleRaises(self):\n with self.assertRaises(TypeError):\n features._to_point((32, None))\n\n def testSingletonTupleRaises(self):\n with self.assertRaises(ValueError):\n features._to_point((32,))\n\n def testThreeTupleRaises(self):\n with self.assertRaises(ValueError):\n features._to_point((32, 32, 32))\n\n\nclass DimensionsTest(absltest.TestCase):\n\n def testScreenSizeWithoutMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=84)\n\n def testScreenWidthWithoutHeightRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(84, 0), minimap=64)\n\n def testScreenWidthHeightWithoutMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(84, 80))\n\n def testMinimapWidthAndHeightWithoutScreenRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(minimap=(64, 67))\n\n def testNoneNoneRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=None, minimap=None)\n\n def testSingularZeroesRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=0, minimap=0)\n\n def testTwoZeroesRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(0, 0), minimap=(0, 0))\n\n def testThreeTupleScreenRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(1, 2, 3), minimap=32)\n\n def testThreeTupleMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=64, minimap=(1, 2, 3))\n\n def testNegativeScreenRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=-64, minimap=32)\n\n def testNegativeMinimapRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=64, minimap=-32)\n\n def testNegativeScreenTupleRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=(-64, -64), minimap=32)\n\n def testNegativeMinimapTupleRaises(self):\n with self.assertRaises(ValueError):\n features.Dimensions(screen=64, minimap=(-32, -32))\n\n def testEquality(self):\n self.assertEqual(features.Dimensions(screen=64, minimap=64),\n features.Dimensions(screen=64, minimap=64))\n self.assertNotEqual(features.Dimensions(screen=64, minimap=64),\n features.Dimensions(screen=64, minimap=32))\n self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)\n\n\nclass TestParseAgentInterfaceFormat(parameterized.TestCase):\n\n def test_no_arguments_raises(self):\n with self.assertRaises(ValueError):\n features.parse_agent_interface_format()\n\n @parameterized.parameters((32, None), (None, 32))\n def test_invalid_feature_combinations_raise(self, screen, minimap):\n with self.assertRaises(ValueError):\n features.parse_agent_interface_format(\n feature_screen=screen,\n feature_minimap=minimap)\n\n def test_valid_feature_specification_is_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24))\n\n self.assertEqual(\n agent_interface_format.feature_dimensions.screen,\n point.Point(32, 32))\n\n self.assertEqual(\n agent_interface_format.feature_dimensions.minimap,\n point.Point(24, 24))\n\n @parameterized.parameters((32, None), (None, 32), (32, 64))\n def test_invalid_minimap_combinations_raise(self, screen, minimap):\n with self.assertRaises(ValueError):\n features.parse_agent_interface_format(\n rgb_screen=screen,\n rgb_minimap=minimap)\n\n def test_valid_minimap_specification_is_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n rgb_screen=32,\n rgb_minimap=(24, 24))\n\n self.assertEqual(\n agent_interface_format.rgb_dimensions.screen,\n point.Point(32, 32))\n\n self.assertEqual(\n agent_interface_format.rgb_dimensions.minimap,\n point.Point(24, 24))\n\n def test_invalid_action_space_raises(self):\n with self.assertRaises(KeyError):\n features.parse_agent_interface_format(\n feature_screen=64,\n feature_minimap=64,\n action_space=\"UNKNOWN_ACTION_SPACE\")\n\n @parameterized.parameters(actions.ActionSpace.__members__.keys())\n def test_valid_action_space_is_parsed(self, action_space):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24),\n rgb_screen=64,\n rgb_minimap=(48, 48),\n use_raw_units=True,\n action_space=action_space)\n\n self.assertEqual(\n agent_interface_format.action_space,\n actions.ActionSpace[action_space])\n\n def test_camera_width_world_units_are_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24),\n camera_width_world_units=77)\n\n self.assertEqual(agent_interface_format.camera_width_world_units, 77)\n\n def test_use_feature_units_is_parsed(self):\n agent_interface_format = features.parse_agent_interface_format(\n feature_screen=32,\n feature_minimap=(24, 24),\n use_feature_units=True)\n\n self.assertEqual(agent_interface_format.use_feature_units, True)\n\n\nclass FeaturesTest(absltest.TestCase):\n\n def testFunctionsIdsAreConsistent(self):\n for i, f in enumerate(actions.FUNCTIONS):\n self.assertEqual(i, f.id, \"id doesn't match for %s\" % f.id)\n\n def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):\n for ability_id, funcs in six.iteritems(actions.ABILITY_IDS):\n self.assertLen({f.general_id for f in funcs}, 1,\n \"Multiple generals for %s\" % ability_id)\n\n def testValidFunctionsAreConsistent(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n\n valid_funcs = feats.action_spec()\n for func_def in valid_funcs.functions:\n func = actions.FUNCTIONS[func_def.id]\n self.assertEqual(func_def.id, func.id)\n self.assertEqual(func_def.name, func.name)\n self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert\n\n def gen_random_function_call(self, action_spec, func_id):\n args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension\n for arg in action_spec.functions[func_id].args]\n return actions.FunctionCall(func_id, args)\n\n def testIdsMatchIndex(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n action_spec = feats.action_spec()\n for func_index, func_def in enumerate(action_spec.functions):\n self.assertEqual(func_index, func_def.id)\n for type_index, type_def in enumerate(action_spec.types):\n self.assertEqual(type_index, type_def.id)\n\n def testReversingUnknownAction(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n hide_specific_actions=False))\n sc2_action = sc_pb.Action()\n sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer\n func_call = feats.reverse_action(sc2_action)\n self.assertEqual(func_call.function, 0) # No-op\n\n def testSpecificActionsAreReversible(self):\n \"\"\"Test that the `transform_action` and `reverse_action` are inverses.\"\"\"\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n hide_specific_actions=False))\n action_spec = feats.action_spec()\n\n for func_def in action_spec.functions:\n for _ in range(10):\n func_call = self.gen_random_function_call(action_spec, func_def.id)\n\n sc2_action = feats.transform_action(\n None, func_call, skip_available=True)\n func_call2 = feats.reverse_action(sc2_action)\n sc2_action2 = feats.transform_action(\n None, func_call2, skip_available=True)\n if func_def.id == actions.FUNCTIONS.select_rect.id:\n # Need to check this one manually since the same rect can be\n # defined in multiple ways.\n def rect(a):\n return point.Rect(point.Point(*a[1]).floor(),\n point.Point(*a[2]).floor())\n\n self.assertEqual(func_call.function, func_call2.function)\n self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert\n self.assertEqual(func_call.arguments[0], func_call2.arguments[0])\n self.assertEqual(rect(func_call.arguments),\n rect(func_call2.arguments))\n else:\n self.assertEqual(func_call, func_call2, msg=sc2_action)\n self.assertEqual(sc2_action, sc2_action2)\n\n def testRawActionUnitTags(self):\n feats = features.Features(\n features.AgentInterfaceFormat(\n use_raw_units=True,\n action_space=actions.ActionSpace.RAW),\n map_size=point.Point(100, 100))\n\n tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]\n ntags = numpy.array(tags, dtype=numpy.int64)\n tag = tags[0]\n ntag = numpy.array(tag, dtype=numpy.int64)\n\n def transform(fn, *args):\n func_call = actions.RAW_FUNCTIONS[fn](\"now\", *args)\n proto = feats.transform_action(None, func_call, skip_available=True)\n return proto.action_raw.unit_command\n\n self.assertEqual(transform(\"Attack_pt\", tag, [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", ntag, [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", [tag], [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", [ntag], [15, 20]).unit_tags, [tag])\n self.assertEqual(transform(\"Attack_pt\", tags, [15, 20]).unit_tags, tags)\n self.assertEqual(transform(\"Attack_pt\", ntags, [15, 20]).unit_tags, tags)\n # Weird, but needed for backwards compatibility\n self.assertEqual(transform(\"Attack_pt\", [tags], [15, 20]).unit_tags, tags)\n self.assertEqual(transform(\"Attack_pt\", [ntags], [15, 20]).unit_tags, tags)\n\n self.assertEqual(transform(\"Attack_unit\", tag, tag).target_unit_tag, tag)\n self.assertEqual(transform(\"Attack_unit\", tag, ntag).target_unit_tag, tag)\n self.assertEqual(transform(\"Attack_unit\", tag, [tag]).target_unit_tag, tag)\n self.assertEqual(transform(\"Attack_unit\", tag, [ntag]).target_unit_tag, tag)\n\n def testCanPickleSpecs(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=SQUARE_DIMENSIONS))\n action_spec = feats.action_spec()\n observation_spec = feats.observation_spec()\n\n self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))\n self.assertEqual(observation_spec,\n pickle.loads(pickle.dumps(observation_spec)))\n\n def testCanPickleFunctionCall(self):\n func = actions.FUNCTIONS.select_point(\"select\", [1, 2])\n self.assertEqual(func, pickle.loads(pickle.dumps(func)))\n\n def testCanDeepcopyNumpyFunctionCall(self):\n arguments = [numpy.float32] * len(actions.Arguments._fields)\n dtypes = actions.FunctionCall(\n function=numpy.float32,\n arguments=actions.Arguments(*arguments))\n self.assertEqual(dtypes, copy.deepcopy(dtypes))\n\n def testSizeConstructors(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=SQUARE_DIMENSIONS))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 84))\n self.assertEqual(spec.types.screen2.sizes, (84, 84))\n self.assertEqual(spec.types.minimap.sizes, (64, 64))\n\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 80))\n self.assertEqual(spec.types.screen2.sizes, (84, 80))\n self.assertEqual(spec.types.minimap.sizes, (64, 67))\n\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 80))\n self.assertEqual(spec.types.screen2.sizes, (84, 80))\n self.assertEqual(spec.types.minimap.sizes, (64, 67))\n\n # Missing one or the other of game_info and dimensions.\n with self.assertRaises(ValueError):\n features.Features()\n\n # Resolution/action space mismatch.\n with self.assertRaises(ValueError):\n features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n action_space=actions.ActionSpace.RGB))\n with self.assertRaises(ValueError):\n features.Features(features.AgentInterfaceFormat(\n rgb_dimensions=RECTANGULAR_DIMENSIONS,\n action_space=actions.ActionSpace.FEATURES))\n with self.assertRaises(ValueError):\n features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=RECTANGULAR_DIMENSIONS))\n\n def testFlRgbActionSpec(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),\n action_space=actions.ActionSpace.FEATURES))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (84, 80))\n self.assertEqual(spec.types.screen2.sizes, (84, 80))\n self.assertEqual(spec.types.minimap.sizes, (64, 67))\n\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),\n action_space=actions.ActionSpace.RGB))\n spec = feats.action_spec()\n self.assertEqual(spec.types.screen.sizes, (128, 132))\n self.assertEqual(spec.types.screen2.sizes, (128, 132))\n self.assertEqual(spec.types.minimap.sizes, (74, 77))\n\n def testFlRgbObservationSpec(self):\n feats = features.Features(features.AgentInterfaceFormat(\n feature_dimensions=RECTANGULAR_DIMENSIONS,\n rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),\n action_space=actions.ActionSpace.FEATURES))\n obs_spec = feats.observation_spec()\n self.assertEqual(obs_spec[\"feature_screen\"], # pylint: disable=g-generic-assert\n (len(features.SCREEN_FEATURES), 80, 84))\n self.assertEqual(obs_spec[\"feature_minimap\"], # pylint: disable=g-generic-assert\n (len(features.MINIMAP_FEATURES), 67, 64))\n self.assertEqual(obs_spec[\"rgb_screen\"], (132, 128, 3))\n self.assertEqual(obs_spec[\"rgb_minimap\"], (77, 74, 3))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.array",
"numpy.random.randint"
]
] |
ccoulombe/thinc | [
"8d891b61ddef3ca00266ca0ec7c47e2d063a3a83"
] | [
"examples/wrap_pytorch.py"
] | [
"import plac\nimport numpy\n\nimport torch\nfrom torch import autograd\nfrom torch import nn\nimport torch.optim\nimport torch.cuda\nfrom thinc.neural.ops import CupyOps\n\nfrom thinc.extra.wrappers import PyTorchWrapper\nfrom thinc.v2v import Model\n\n\ndef main(length=1000, nO=32, nI=32):\n if CupyOps.xp != None:\n print(\"Use GPU\")\n Model.ops = CupyOps()\n Model.Ops = CupyOps\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n pt_model = nn.Linear(nI, nO)\n optimizer = torch.optim.Adam(pt_model.parameters())\n\n model = PyTorchWrapper(pt_model)\n\n X = Model.ops.xp.ones((length, nI), dtype='f')\n y = 1. / X\n for i in range(10):\n yh, get_dX = model.begin_update(X)\n dY = (yh - y) / len(y)\n dX = get_dX(dY)\n\n\nif __name__ == '__main__':\n plac.call(main)\n"
] | [
[
"torch.set_default_tensor_type",
"torch.nn.Linear"
]
] |
mdraw/AlphaPose | [
"bed8e0798f6deed4789b9ae2646f72b9fd138c5b"
] | [
"video_demo.py"
] | [
"import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\n\nimport torch.nn as nn\nimport torch.utils.data\nimport numpy as np\nfrom opt import opt\n\nfrom dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco\nfrom yolo.util import write_results, dynamic_write_results\nfrom SPPE.src.main_fast_inference import *\n\nimport ntpath\nimport os\nimport sys\nfrom tqdm import tqdm\nimport time\nfrom fn import getTime\nimport cv2\n\nfrom pPose_nms import pose_nms, write_json\n\nargs = opt\nargs.dataset = 'coco'\nif not args.sp:\n torch.multiprocessing.set_start_method('forkserver', force=True)\n torch.multiprocessing.set_sharing_strategy('file_system')\n\nif __name__ == \"__main__\":\n videofile = args.video\n mode = args.mode\n if not os.path.exists(args.outputpath):\n os.mkdir(args.outputpath)\n \n if not len(videofile):\n raise IOError('Error: must contain --video')\n\n # Load input video\n data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()\n (fourcc,fps,frameSize) = data_loader.videoinfo()\n\n # Load detection loader\n print('Loading YOLO model..')\n sys.stdout.flush()\n det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()\n det_processor = DetectionProcessor(det_loader).start()\n \n # Load pose model\n pose_dataset = Mscoco()\n if args.fast_inference:\n print('Using fast inference...')\n pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)\n else:\n print('Using slow, more accurate inference...')\n pose_model = InferenNet(4 * 1 + 1, pose_dataset)\n pose_model\n pose_model.eval()\n\n runtime_profile = {\n 'dt': [],\n 'pt': [],\n 'pn': []\n }\n\n # Data writer\n save_path = os.path.join(args.outputpath, 'AlphaPose_'+ntpath.basename(videofile).split('.')[0]+'.avi')\n writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()\n\n im_names_desc = tqdm(range(data_loader.length()))\n batchSize = args.posebatch\n for i in im_names_desc:\n start_time = getTime()\n with torch.no_grad():\n (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()\n if orig_img is None:\n break\n if boxes is None or boxes.nelement() == 0:\n writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])\n continue\n\n ckpt_time, det_time = getTime(start_time)\n runtime_profile['dt'].append(det_time)\n # Pose Estimation\n \n datalen = inps.size(0)\n leftover = 0\n if (datalen) % batchSize:\n leftover = 1\n num_batches = datalen // batchSize + leftover\n hm = []\n for j in range(num_batches):\n inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)]\n hm_j = pose_model(inps_j)\n hm.append(hm_j)\n hm = torch.cat(hm)\n ckpt_time, pose_time = getTime(ckpt_time)\n runtime_profile['pt'].append(pose_time)\n\n hm = hm.cpu().data\n writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])\n\n ckpt_time, post_time = getTime(ckpt_time)\n runtime_profile['pn'].append(post_time)\n\n if args.profile:\n # TQDM\n im_names_desc.set_description(\n 'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(\n dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))\n )\n\n print('===========================> Finish Model Running.')\n if (args.save_img or args.save_video) and not args.vis_fast:\n print('===========================> Rendering remaining images in the queue...')\n print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')\n while(writer.running()):\n pass\n writer.stop()\n final_result = writer.results()\n write_json(final_result, args.outputpath)\n"
] | [
[
"torch.multiprocessing.set_sharing_strategy",
"torch.no_grad",
"torch.multiprocessing.set_start_method",
"torch.cat",
"numpy.mean"
]
] |
aiedward/OCR-1 | [
"82ce764fb0071917360ea8b1ec5372035d0897b5"
] | [
"ctpn/show_model.py"
] | [
"from tensorflow.python import pywrap_tensorflow\ncheckpoint_path = 'checkpoints/VGGnet_fast_rcnn_iter_50000.ckpt'\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nfor key in var_to_shape_map:\n print(\"tensor_name: \", key)\n"
] | [
[
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader"
]
] |
fligt/inktime | [
"45f20602ef07cc8f62e0192318913cf910eb925b"
] | [
"inktime/rgbkm.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/00_rgbkm.ipynb (unless otherwise specified).\n\n__all__ = ['reflectance']\n\n# Cell\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cv2\n\nimport scipy.optimize as optimize\n\n\ndef reflectance(K, S, D, Rg):\n '''Calculates reflectance for single colorant Kubelka-Munk model.\n\n Based on Nobbs (1997) formulation with modified Saunderson expression for infinite reflectance.\n Function works for single channel, 3 RGB channels, and spectral data/images with muliple wavelength channels.\n\n\n Parameters:\n -----------\n K: tuple-like (n channels)\n Colorant absorption coefficients for wavelength or RGB channels\n S: tuple-like (n channels)\n Colorant scattering coefficients for wavelength or RGB channels\n D: array ( height x width)\n Colorant thickness image\n Rg: array (height x width x n) or rgb tuple with shape (3,)\n Background reflectance image or background color\n\n Returns:\n --------\n refl: array (height x width x n)\n n-channel reflectance image\n\n '''\n\n Rg = np.array(Rg)\n shape = Rg.shape\n\n\n # create uniform background image if Rg is rgb tuple\n\n if len(shape) == 1: # understood as rgb tuple\n\n h, w = D.shape\n\n Rg_img = np.ones([h, w, 3])\n Rg_img[:,:] = Rg\n Rg = Rg_img\n\n shape = Rg.shape\n\n #print('created uniform rgb background image Rg with shape: {}'.format(shape))\n\n\n n_channels = shape[-1]\n\n K = np.array(K).reshape(1, n_channels)\n S = np.array(S).reshape(1, n_channels)\n\n D = np.array(D).reshape(-1, 1)\n Rg = Rg.reshape(-1, n_channels)\n\n # need to return infinity for K =< 0 or S < 0 in optimization code\n #pos_S = S >= 0\n #pos_K = K > 0 # also non-zero\n #ok = pos_S & pos_K\n\n #Rinf = np.zeros([1, n_channels])\n Rinf = (S/K) / ((S/K) + 1 + np.sqrt(1 + 2 * (S/K)))\n #Rinf[ok] = (S[ok]/K[ok]) / ((S[ok]/K[ok]) + 1 + np.sqrt(1 + 2 * (S[ok]/K[ok])))\n #Rinf[~ok] = np.infty\n\n Z = D * np.sqrt(K * (K + 2 * S))\n\n Z = np.clip(Z, a_min=0, a_max=50)\n\n beta = np.exp(2 * Z) - 1\n alpha = (1 - Rinf**2) / (1 - Rg * Rinf)\n\n refl = (alpha * Rg + beta * Rinf) / (alpha + beta)\n refl = refl.reshape(shape)\n\n return refl"
] | [
[
"numpy.sqrt",
"numpy.ones",
"numpy.exp",
"numpy.clip",
"numpy.array"
]
] |
beldonl/gpkit | [
"4c422d3f3b65b85f5baacc36305064aee4341ebe"
] | [
"gpkit/constraints/sgp.py"
] | [
"\"\"\"Implement the SequentialGeometricProgram class\"\"\"\nfrom time import time\nfrom collections import OrderedDict\nimport numpy as np\nfrom ..exceptions import InvalidGPConstraint, Infeasible, UnnecessarySGP\nfrom ..keydict import KeyDict\nfrom ..nomials import Variable\nfrom .gp import GeometricProgram\nfrom ..nomials import PosynomialInequality\nfrom .. import NamedVariables\nfrom .costed import CostedConstraintSet\n\n\nEPS = 1e-6 # 1 +/- this is used in a few relative differences\n\n# pylint: disable=too-many-instance-attributes\nclass SequentialGeometricProgram(CostedConstraintSet):\n \"\"\"Prepares a collection of signomials for a SP solve.\n\n Arguments\n ---------\n cost : Posynomial\n Objective to minimize when solving\n constraints : list of Constraint or SignomialConstraint objects\n Constraints to maintain when solving (implicitly Signomials <= 1)\n verbosity : int (optional)\n Currently has no effect: SequentialGeometricPrograms don't know\n anything new after being created, unlike GeometricPrograms.\n\n Attributes with side effects\n ----------------------------\n `gps` is set during a solve\n `result` is set at the end of a solve\n\n Examples\n --------\n >>> gp = gpkit.geometric_program.SequentialGeometricProgram(\n # minimize\n x,\n [ # subject to\n 1/x - y/x, # <= 1, implicitly\n y/10 # <= 1\n ])\n >>> gp.solve()\n \"\"\"\n gps = solver_outs = _results = result = model = None\n _gp = _spvars = _lt_approxs = pccp_penalty = None\n with NamedVariables(\"SGP\"):\n slack = Variable(\"PCCPslack\")\n\n def __init__(self, cost, model, substitutions, *,\n use_pccp=True, pccp_penalty=2e2, **initgpargs):\n # pylint: disable=super-init-not-called,non-parent-init-called\n if cost.any_nonpositive_cs:\n raise UnnecessarySGP(\"\"\"Sequential GPs need Posynomial objectives.\n\n The equivalent of a Signomial objective can be constructed by constraining\n a dummy variable `z` to be greater than the desired Signomial objective `s`\n (z >= s) and then minimizing that dummy variable.\"\"\")\n self.model = model\n self._original_cost = cost\n self.externalfn_vars = \\\n frozenset(Variable(v) for v in self.model.varkeys if v.externalfn)\n if not self.externalfn_vars:\n try:\n sgpconstraints = {\"SP constraints\": [], \"GP constraints\": []}\n self._lt_approxs = []\n for cs in model.flat():\n try:\n if not isinstance(cs, PosynomialInequality):\n cs.as_hmapslt1(substitutions) # gp-compatible?\n sgpconstraints[\"GP constraints\"].append(cs)\n except InvalidGPConstraint:\n sgpconstraints[\"SP constraints\"].append(cs)\n if use_pccp:\n lts = [lt/self.slack for lt in cs.as_approxlts()]\n else:\n lts = cs.as_approxlts()\n self._lt_approxs.append(lts)\n if not sgpconstraints[\"SP constraints\"]:\n raise UnnecessarySGP(\"\"\"Model valid as a Geometric Program.\n\n SequentialGeometricPrograms should only be created with Models containing\n Signomial Constraints, since Models without Signomials have global\n solutions and can be solved with 'Model.solve()'.\"\"\")\n if use_pccp:\n self.pccp_penalty = pccp_penalty\n self.cost = cost * self.slack**pccp_penalty\n sgpconstraints[\"GP constraints\"].append(self.slack >= 1)\n else:\n self.cost = cost\n self.idxlookup = {k: i for i, k in enumerate(sgpconstraints)}\n list.__init__(self, sgpconstraints.values())\n self.substitutions = substitutions\n self._gp = self.init_gp(**initgpargs)\n self.blackboxconstraints = False\n return\n except AttributeError:\n pass # some constraint lacked\n self.blackboxconstraints = True\n self.__bare_init__(cost, model, substitutions)\n\n # pylint: disable=too-many-locals,too-many-branches\n # pylint: disable=too-many-arguments\n # pylint: disable=too-many-statements\n def localsolve(self, solver=None, *, verbosity=1, x0=None, reltol=1e-4,\n iteration_limit=50, mutategp=True, **solveargs):\n \"\"\"Locally solves a SequentialGeometricProgram and returns the solution.\n\n Arguments\n ---------\n solver : str or function (optional)\n By default uses one of the solvers found during installation.\n If set to \"mosek\", \"mosek_cli\", or \"cvxopt\", uses that solver.\n If set to a function, passes that function cs, A, p_idxs, and k.\n verbosity : int (optional)\n If greater than 0, prints solve time and number of iterations.\n Each GP is created and solved with verbosity one less than this, so\n if greater than 1, prints solver name and time for each GP.\n x0 : dict (optional)\n Initial location to approximate signomials about.\n reltol : float\n Iteration ends when this is greater than the distance between two\n consecutive solve's objective values.\n iteration_limit : int\n Maximum GP iterations allowed.\n mutategp: boolean\n Prescribes whether to mutate the previously generated GP\n or to create a new GP with every solve.\n **solveargs :\n Passed to solver function.\n\n Returns\n -------\n result : dict\n A dictionary containing the translated solver result.\n \"\"\"\n self.gps, self.solver_outs, self._results = [], [], []\n # if there's external functions we can't mutate the GP\n mutategp = mutategp and not self.blackboxconstraints\n if not mutategp and not x0:\n raise ValueError(\"Solves with arbitrary constraint generators\"\n \" must specify an initial starting point x0.\")\n if mutategp:\n if x0:\n self._gp = self.init_gp(x0)\n gp = self._gp\n starttime = time()\n if verbosity > 0:\n print(\"Starting a sequence of GP solves\")\n if self.externalfn_vars:\n print(\" for %i variables defined by externalfns\"\n % len(self.externalfn_vars))\n elif mutategp:\n print(\" for %i free variables\" % len(self._spvars))\n print(\" in %i signomial constraints\"\n % len(self[\"SP constraints\"]))\n print(\" and for %i free variables\" % len(gp.varlocs))\n print(\" in %i posynomial inequalities.\" % len(gp.k))\n prevcost, cost, rel_improvement = None, None, None\n while rel_improvement is None or rel_improvement > reltol:\n prevcost = cost\n if len(self.gps) > iteration_limit:\n raise Infeasible(\n \"Unsolved after %s iterations. Check `m.program.results`;\"\n \" if they're converging, try `.localsolve(...,\"\n \" iteration_limit=NEWLIMIT)`.\" % len(self.gps))\n if mutategp:\n self.update_gp(x0)\n else:\n gp = self.gp(x0)\n gp.model = self.model\n self.gps.append(gp) # NOTE: SIDE EFFECTS\n if verbosity > 1:\n print(\"\\nGP Solve %i\" % len(self.gps))\n if verbosity > 2:\n print(\"===============\")\n solver_out = gp.solve(solver, verbosity=verbosity-1,\n gen_result=False, **solveargs)\n self.solver_outs.append(solver_out)\n cost = float(solver_out[\"objective\"])\n x0 = dict(zip(gp.varlocs, np.exp(solver_out[\"primal\"])))\n if verbosity > 2 and self._spvars:\n result = gp.generate_result(solver_out, verbosity=verbosity-3)\n self._results.append(result)\n print(result.table(self._spvars))\n elif verbosity > 1:\n print(\"Solved cost was %.4g.\" % cost)\n if prevcost is None:\n continue\n rel_improvement = (prevcost - cost)/(prevcost + cost)\n if cost*(1 - EPS) > prevcost + EPS and verbosity > -1:\n print(\"SGP not convergent: Cost rose by %.2g%% on GP solve %i.\"\n \" Details can be found in `m.program.results` or by\"\n \" solving at a higher verbosity. Note that convergence is\"\n \" not guaranteed for models with SignomialEqualities.\\n\"\n % (100*(cost - prevcost)/prevcost, len(self.gps)))\n rel_improvement = cost = None\n # solved successfully!\n self.result = gp.generate_result(solver_out, verbosity=verbosity-3)\n self.result[\"soltime\"] = time() - starttime\n if verbosity > 1:\n print()\n if verbosity > 0:\n print(\"Solving took %.3g seconds and %i GP solves.\"\n % (self.result[\"soltime\"], len(self.gps)))\n self.model.process_result(self.result)\n if self.externalfn_vars:\n for v in self.externalfn_vars:\n self[0].insert(0, v.key.externalfn) # for constraint senss\n if self.slack.key in self.result[\"variables\"]:\n excess_slack = self.result[\"variables\"][self.slack.key] - 1\n if excess_slack <= EPS:\n del self.result[\"freevariables\"][self.slack.key]\n del self.result[\"variables\"][self.slack.key]\n del self.result[\"sensitivities\"][\"variables\"][self.slack.key]\n slackconstraint = self[\"GP constraints\"][-1]\n del self.result[\"sensitivities\"][\"constraints\"][slackconstraint]\n elif verbosity > -1:\n print(\"Final solution let signomial constraints slacken by\"\n \" %.2g%%. Calling .localsolve with a higher\"\n \" `pccp_penalty` (it was %.3g this time) will reduce\"\n \" final slack if the model is solvable with less. If\"\n \" you think it might not be, check by solving with \"\n \"`use_pccp=False, x0=(this model's final solution)`.\\n\"\n % (100*excess_slack, self.pccp_penalty))\n return self.result\n\n # pylint: disable=too-many-locals\n def localsolveonce(self, solver=None, verbosity=1, x0=None, reltol=1e-4,\n iteration_limit=50, mutategp=True, **kwargs):\n \"\"\"Locally solves a SequentialGeometricProgram ONCE and returns the solution.\n\n Arguments\n ---------\n solver : str or function (optional)\n By default uses one of the solvers found during installation.\n If set to \"mosek\", \"mosek_cli\", or \"cvxopt\", uses that solver.\n If set to a function, passes that function cs, A, p_idxs, and k.\n verbosity : int (optional)\n If greater than 0, prints solve time and number of iterations.\n Each GP is created and solved with verbosity one less than this, so\n if greater than 1, prints solver name and time for each GP.\n x0 : dict (optional)\n Initial location to approximate signomials about.\n reltol : float\n Iteration ends when this is greater than the distance between two\n consecutive solve's objective values.\n iteration_limit : int\n Maximum GP iterations allowed.\n *args, **kwargs :\n Passed to solver function.\n\n\n Returns\n -------\n result : dict\n A dictionary containing the translated solver result.\n \"\"\"\n starttime = time()\n if verbosity > 0:\n print(\"Beginning signomial solve.\")\n self.gps = [] # NOTE: SIDE EFFECTS\n self.results = []\n if x0 and mutategp:\n self._gp = self.init_gp(self.substitutions, x0)\n slackvar = Variable()\n prevcost, cost, rel_improvement = None, None, None\n while (rel_improvement is None or rel_improvement > reltol) and len(self.gps) < iteration_limit:\n if len(self.gps) > iteration_limit:\n raise RuntimeWarning(\"\"\"problem unsolved after %s iterations.\n\n The last result is available in Model.program.gps[-1].result. If the gps\n appear to be converging, you may wish to increase the iteration limit by\n calling .localsolve(..., iteration_limit=NEWLIMIT).\"\"\" % len(self.gps))\n gp = self.gp(x0, mutategp)\n self.gps.append(gp) # NOTE: SIDE EFFECTS\n try:\n result = gp.solve(solver, verbosity-1,\n warn_on_check=True, **kwargs)\n self.results.append(result)\n except (RuntimeWarning, ValueError):\n feas_constrs = ([slackvar >= 1] +\n [posy <= slackvar\n for posy in gp.posynomials[1:]])\n primal_feas = GeometricProgram(slackvar**100 * gp.cost,\n feas_constrs, None)\n self.gps.append(primal_feas)\n result = primal_feas.solve(solver, verbosity-1, **kwargs)\n result[\"cost\"] = None # reset the cost-counting\n x0 = result[\"freevariables\"]\n prevcost, cost = cost, result[\"cost\"]\n if prevcost is None or cost is None:\n rel_improvement = None\n elif prevcost < (1-reltol)*cost:\n print(\"SP is not converging! Last GP iteration had a higher\"\n \" cost (%.2g) than the previous one (%.2g). Results for\"\n \" each iteration are in (Model).program.results. If your\"\n \" model contains SignomialEqualities, note that\"\n \" convergence is not guaranteed: try replacing any\"\n \" SigEqs you can and solving again.\" % (cost, prevcost))\n else:\n rel_improvement = abs(prevcost-cost)/(prevcost + cost)\n # solved successfully!\n soltime = time() - starttime\n if verbosity > 0:\n print(\"Solving took %i GP solves\" % len(self.gps)\n + \" and %.3g seconds.\" % soltime)\n self.process_result(result)\n self.result = SolutionArray(result.copy()) # NOTE: SIDE EFFECTS\n self.result[\"soltime\"] = soltime\n if self.externalfn_vars:\n for v in self.externalfn_vars:\n self[0].insert(0, v.key.externalfn) # for constraint senss\n return self.result\n\n @property\n def results(self):\n \"Creates and caches results from the raw solver_outs\"\n if not self._results:\n self._results = [o[\"generate_result\"]() for o in self.solver_outs]\n return self._results\n\n\n def _fill_x0(self, x0):\n \"Returns a copy of x0 with subsitutions added.\"\n x0kd = KeyDict()\n x0kd.varkeys = self.varkeys\n if x0:\n x0kd.update(x0) # has to occur after the setting of varkeys\n x0kd.update(self.substitutions)\n return x0kd\n\n def init_gp(self, x0=None, **initgpargs):\n \"Generates a simplified GP representation for later modification\"\n x0 = self._fill_x0(x0)\n constraints = OrderedDict({\"SP approximations\": []})\n constraints[\"GP constraints\"] = self[\"GP constraints\"]\n self._spvars = set([self.slack])\n for cs, lts in zip(self[\"SP constraints\"], self._lt_approxs):\n for lt, gt in zip(lts, cs.as_approxgts(x0)):\n constraint = (lt <= gt)\n constraint.generated_by = cs\n constraints[\"SP approximations\"].append(constraint)\n self._spvars.update({vk for vk in gt.varkeys\n if vk not in self.substitutions})\n gp = GeometricProgram(self.cost, constraints, self.substitutions,\n **initgpargs)\n gp.x0 = x0\n return gp\n\n def update_gp(self, x0):\n \"Update self._gp for x0.\"\n if not self.gps:\n return # we've already generated the first gp\n gp = self._gp\n gp.x0.update({k: v for (k, v) in x0.items() if k in self._spvars})\n hmap_idx = 0\n for sp_constraint, lts in zip(self[\"SP constraints\"], self._lt_approxs):\n for lt, gt in zip(lts, sp_constraint.as_approxgts(gp.x0)):\n unsubbed = lt/gt\n gp[\"SP approximations\"][hmap_idx].unsubbed = [unsubbed]\n hmap = unsubbed.hmap.sub(self.substitutions, unsubbed.varkeys)\n hmap.parent = gp[\"SP approximations\"][hmap_idx]\n hmap_idx += 1 # here because gp.hmaps[0] is the cost hmap\n gp.hmaps[hmap_idx] = hmap\n gp.gen()\n\n def gp(self, x0=None, **gpinitargs):\n \"The GP approximation of this SP at x0.\"\n x0 = self._fill_x0(x0)\n constraints = OrderedDict(\n {\"SP constraints\": [c.as_gpconstr(x0) for c in self.model.flat()]})\n if self.externalfn_vars:\n constraints[\"Generated by externalfns\"] = []\n for v in self.externalfn_vars:\n constraint = v.key.externalfn(v, x0)\n constraint.generated_by = v.key.externalfn\n constraints[\"Generated by externalfns\"].append(constraint)\n gp = GeometricProgram(self._original_cost,\n constraints, self.substitutions, **gpinitargs)\n gp.x0 = x0\n return gp\n"
] | [
[
"numpy.exp"
]
] |
romain-fontugne/roa-counter | [
"35413f036a0a75088ae318dfa3df58b3cbce6095"
] | [
"count.py"
] | [
"from datetime import datetime\nfrom matplotlib import pylab as plt\nfrom requests_cache import CachedSession\n\nCACHE_EXPIRATION_SECS = 3600*24*356\nYEAR_RANGE = range(2018, 2022)\nMARKERS = [\"o\", \"s\", \"d\", \"+\", \"*\"]\n\nRIRS = {\n 'AFRINIC': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/',\n 'marker': 'o',\n },\n 'APNIC': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/',\n 'marker': 's',\n },\n 'ARIN': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/',\n 'marker': 'd'\n },\n 'LACNIC': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/',\n 'marker': '+',\n },\n 'RIPE': {\n 'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/',\n 'marker': '*',\n }\n }\n\nsession = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS)\nplt.figure(figsize=(7,4))\n\nfor rir, rir_info in RIRS.items():\n x = []\n y = []\n for year in YEAR_RANGE:\n for month in range(1,13):\n\n roa_count = -1 # skip the header\n parsed_url = f'{rir_info[\"url\"]}/{year}/{month:02d}/15/roas.csv'\n csv = session.get( parsed_url )\n if csv.status_code != 200:\n print(parsed_url)\n print(csv.status_code)\n continue\n\n for line in csv.iter_lines(decode_unicode=True):\n roa_count += 1\n\n\n if roa_count > 0:\n x.append( datetime(year, month, 15) )\n y.append( roa_count )\n \n\n plt.plot(x, y, label=rir, marker=rir_info['marker'])\n\nplt.grid( True )\nplt.legend()\nplt.ylabel('Number of ROAs')\nplt.xticks(rotation=45)\nplt.tight_layout()\nplt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png')\nplt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')\n"
] | [
[
"matplotlib.pylab.savefig",
"matplotlib.pylab.grid",
"matplotlib.pylab.legend",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.figure",
"matplotlib.pylab.xticks",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.plot"
]
] |
NagisaZj/ProMP | [
"539739ae2b7d5fdcad00855da695f643b23df4b3"
] | [
"rlkit/torch/networks.py"
] | [
"\"\"\"\nGeneral networks for pytorch.\n\nAlgorithm-specific networks should go else-where.\n\"\"\"\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom rlkit.policies.base import Policy\nfrom rlkit.torch import pytorch_util as ptu\nfrom rlkit.torch.core import PyTorchModule\nfrom rlkit.torch.data_management.normalizer import TorchFixedNormalizer\nfrom rlkit.torch.modules import LayerNorm\nimport math\n\ndef identity(x):\n return x\n\n\nclass Mlp(PyTorchModule):\n def __init__(\n self,\n hidden_sizes,\n output_size,\n input_size,\n init_w=3e-3,\n hidden_activation=F.relu,\n output_activation=identity,\n hidden_init=ptu.fanin_init,\n b_init_value=0.1,\n layer_norm=False,\n layer_norm_kwargs=None,\n ):\n self.save_init_params(locals())\n super().__init__()\n\n if layer_norm_kwargs is None:\n layer_norm_kwargs = dict()\n\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_sizes = hidden_sizes\n self.hidden_activation = hidden_activation\n self.output_activation = output_activation\n self.layer_norm = layer_norm\n self.fcs = []\n self.layer_norms = []\n in_size = input_size\n\n for i, next_size in enumerate(hidden_sizes):\n fc = nn.Linear(in_size, next_size)\n in_size = next_size\n hidden_init(fc.weight)\n fc.bias.data.fill_(b_init_value)\n self.__setattr__(\"fc{}\".format(i), fc)\n self.fcs.append(fc)\n\n if self.layer_norm:\n ln = LayerNorm(next_size)\n self.__setattr__(\"layer_norm{}\".format(i), ln)\n self.layer_norms.append(ln)\n\n self.last_fc = nn.Linear(in_size, output_size)\n self.last_fc.weight.data.uniform_(-init_w, init_w)\n self.last_fc.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, input, return_preactivations=False):\n h = input\n for i, fc in enumerate(self.fcs):\n h = fc(h)\n if self.layer_norm and i < len(self.fcs) - 1:\n h = self.layer_norms[i](h)\n h = self.hidden_activation(h)\n preactivation = self.last_fc(h)\n output = self.output_activation(preactivation)\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n\nclass FlattenMlp(Mlp):\n \"\"\"\n if there are multiple inputs, concatenate along dim 1\n \"\"\"\n\n def forward(self, *inputs, **kwargs):\n flat_inputs = torch.cat(inputs, dim=1)\n return super().forward(flat_inputs, **kwargs)\n\n\nclass MlpPolicy(Mlp, Policy):\n \"\"\"\n A simpler interface for creating policies.\n \"\"\"\n\n def __init__(\n self,\n *args,\n obs_normalizer: TorchFixedNormalizer = None,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.obs_normalizer = obs_normalizer\n\n def forward(self, obs, **kwargs):\n if self.obs_normalizer:\n obs = self.obs_normalizer.normalize(obs)\n return super().forward(obs, **kwargs)\n\n def get_action(self, obs_np):\n actions = self.get_actions(obs_np[None])\n return actions[0, :], {}\n\n def get_actions(self, obs):\n return self.eval_np(obs)\n\n\nclass TanhMlpPolicy(MlpPolicy):\n \"\"\"\n A helper class since most policies have a tanh output activation.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.save_init_params(locals())\n super().__init__(*args, output_activation=torch.tanh, **kwargs)\n\n\nclass MlpEncoder(FlattenMlp):\n '''\n encode context via MLP\n '''\n\n def reset(self, num_tasks=1):\n pass\n\n def forward_seq(self,context):\n t,b,_ = context.size()\n input = context.view(t*b,-1)\n out = self.forward(input)\n return out.view(t,b,-1)\n\nclass RecurrentEncoder(FlattenMlp):\n '''\n encode context via recurrent network\n '''\n\n def __init__(self,\n *args,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.hidden_dim = self.hidden_sizes[-1]\n self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))\n\n # input should be (task, seq, feat) and hidden should be (task, 1, feat)\n\n self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)\n\n def forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out[:, -1, :]\n\n # output layer\n preactivation = self.last_fc(out)\n output = self.output_activation(preactivation)\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n def reset(self, num_tasks=1):\n self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)\n\n\nclass RNN(FlattenMlp):\n '''\n encode context via recurrent network\n '''\n\n def __init__(self,\n *args,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.hidden_dim = self.hidden_sizes[-1]\n self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))\n\n # input should be (task, seq, feat) and hidden should be (task, 1, feat)\n\n self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)\n\n def inner_forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out.contiguous()\n out = out.view(task * seq, -1)\n\n # output layer\n #preactivation = self.last_fc(out)\n #output = self.output_activation(preactivation)\n if return_preactivations:\n return out, out\n else:\n return out\n\n def forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))\n self.hidden = hn\n # take the last hidden state to predict z\n out = out.contiguous()\n out = out.view(task * seq, -1)\n\n # output layer\n preactivation = self.last_fc(out)\n output = self.output_activation(preactivation)\n if return_preactivations:\n return output, output\n else:\n return output\n\n def inner_reset(self, num_tasks=1):\n self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)\n\nclass SnailEncoder(FlattenMlp):\n def __init__(self,\n input_length,\n *args,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(*args, **kwargs)\n self.hidden_dim = self.hidden_sizes[-1]\n self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))\n self.input_length = input_length\n # input should be (task, seq, feat) and hidden should be (1, task, feat)\n\n #self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)\n layer_count = math.ceil(math.log(input_length)/math.log(2))\n self.TC1 = TCBlock(self.hidden_dim,input_length,16)\n self.atten1 = AttentionBlock(self.hidden_dim+16*layer_count,32,32)\n self.TC2 = TCBlock(self.hidden_dim+16*layer_count+32,input_length,16)\n self.atten2 = AttentionBlock(self.hidden_dim+16*layer_count*2+32,32,32)\n self.out_layer = nn.Linear(self.hidden_dim+16*layer_count*2+32+32,self.output_size)\n self.var_start = int(self.output_size / 2)\n\n def forward(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out = out.permute(0,2,1)\n #print(out.shape)\n out = self.TC1(out)\n out = self.atten1(out)\n out = self.TC2(out)\n out = self.atten2(out)\n out = out[:, :, -1]\n #print('o',out.shape)\n # output layer\n preactivation = self.out_layer(out)\n output = self.output_activation(preactivation)\n #temp = F.softplus(output[..., self.var_start:])\n #output[..., self.var_start:] = temp\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n def forward_seq(self, in_, return_preactivations=False):\n # expects inputs of dimension (task, seq, feat)\n task, seq, feat = in_.size()\n in_ = in_.contiguous()\n out = in_.view(task * seq, feat)\n\n # embed with MLP\n for i, fc in enumerate(self.fcs):\n out = fc(out)\n out = self.hidden_activation(out)\n\n out = out.view(task, seq, -1)\n out = out.permute(0,2,1)\n #print(out.shape)\n out = self.TC1(out)\n out = self.atten1(out)\n out = self.TC2(out)\n out = self.atten2(out)\n out = out.permute(0,2,1)\n out = out.view(task * seq,-1)\n\n\n preactivation = self.out_layer(out)\n output = self.output_activation(preactivation)\n #temp = F.softplus(output[..., self.var_start:])\n #output[..., self.var_start:] = temp\n #output = output.view(task,seq,-1)\n if return_preactivations:\n return output, preactivation\n else:\n return output\n\n def reset(self,num_tasks=1):\n return\n\nclass MyMlpEncoder(FlattenMlp):\n '''\n encode context via MLP\n '''\n\n def reset(self, num_tasks=1):\n pass\n\n def forward_seq(self,context):\n t,b,_ = context.size()\n input = context.view(t*b,-1)\n out = self.forward(input)\n return out\n\n def forward(self,context):\n t,b,_ = context.size()\n input = context.view(t*b,-1)\n out = self.forward(input)\n return out\n\nclass CausalConv1d(nn.Module):\n \"\"\"A 1D causal convolution layer.\n\n Input: (B, D_in, T), where B is the minibatch size, D_in is the number of\n dimensions per step, and T is the number of steps.\n Output: (B, D_out, T), where B is the minibatch size, D_out is the number\n of dimensions in the output, and T is the number of steps.\n\n Arguments:\n in_channels (int): number of input channels\n out_channels (int): number of output channels\n \"\"\"\n def __init__(self, in_channels, out_channels, dilation=1):\n super(CausalConv1d, self).__init__()\n self.padding = dilation\n self.causal_conv = nn.Conv1d(\n in_channels,\n out_channels,\n 2,\n padding = self.padding,\n dilation = dilation\n )\n\n def forward(self, minibatch):\n return self.causal_conv(minibatch)[:, :, :-self.padding]\n\n\nclass DenseBlock(nn.Module):\n \"\"\"Two parallel 1D causal convolution layers w/tanh and sigmoid activations\n\n Input: (B, D_in, T), where B is the minibatch size, D_in is the number of\n dimensions of the input, and T is the number of steps.\n Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the\n number of dimensions of the input, `F` is the number of filters, and `T`\n is the length of the input sequence.\n\n Arguments:\n in_channels (int): number of input channels\n filters (int): number of filters per channel\n \"\"\"\n def __init__(self, in_channels, filters, dilation=1):\n super(DenseBlock, self).__init__()\n self.causal_conv1 = CausalConv1d(\n in_channels,\n filters,\n dilation=dilation\n )\n self.causal_conv2 = CausalConv1d(\n in_channels,\n filters,\n dilation=dilation\n )\n\n def forward(self, minibatch):\n tanh = F.tanh(self.causal_conv1(minibatch))\n sig = F.sigmoid(self.causal_conv2(minibatch))\n out = torch.cat([minibatch, tanh*sig], dim=1)\n return out\n\n\nclass TCBlock(nn.Module):\n \"\"\"A stack of DenseBlocks which dilates to desired sequence length\n\n The TCBlock adds `ceil(log_2(seq_len))*filters` channels to the output.\n\n Input: (B, D_in, T), where B is the minibatch size, D_in is the number of\n dimensions of the input, and T is the number of steps.\n Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the\n number of dimensions of the input, `F` is the number of filters, and `T`\n is the length of the input sequence.\n\n Arguments:\n in_channels (int): channels for the input\n seq_len (int): length of the sequence. The number of denseblock layers\n is log base 2 of `seq_len`.\n filters (int): number of filters per channel\n \"\"\"\n def __init__(self, in_channels, seq_len, filters):\n super(TCBlock, self).__init__()\n layer_count = math.ceil(math.log(seq_len)/math.log(2))\n blocks = []\n channel_count = in_channels\n for layer in range(layer_count):\n block = DenseBlock(channel_count, filters, dilation=2**layer)\n blocks.append(block)\n channel_count += filters\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, minibatch):\n return self.blocks(minibatch)\n\n\nclass AttentionBlock(nn.Module):\n \"\"\"An attention mechanism similar to Vaswani et al (2017)\n\n The input of the AttentionBlock is `BxDxT` where `B` is the input\n minibatch size, `D` is the dimensions of each feature, `T` is the length of\n the sequence.\n\n The output of the AttentionBlock is `Bx(D+V)xT` where `V` is the size of the\n attention values.\n\n Arguments:\n input_dims (int): the number of dimensions (or channels) of each element\n in the input sequence\n k_size (int): the size of the attention keys\n v_size (int): the size of the attention values\n \"\"\"\n def __init__(self, input_dims, k_size, v_size):\n super(AttentionBlock, self).__init__()\n self.key_layer = nn.Linear(input_dims, k_size)\n self.query_layer = nn.Linear(input_dims, k_size)\n self.value_layer = nn.Linear(input_dims, v_size)\n self.sqrt_k = math.sqrt(k_size)\n\n def forward(self, minibatch):\n minibatch = minibatch.permute(0,2,1)\n keys = self.key_layer(minibatch)\n queries = self.query_layer(minibatch)\n values = self.value_layer(minibatch)\n logits = torch.bmm(queries, keys.transpose(2,1))\n mask = logits.data.new(logits.size(1), logits.size(2)).fill_(1).byte()\n mask = torch.triu(mask, 1)\n mask = mask.unsqueeze(0).expand_as(logits)\n logits.data.masked_fill_(mask, float('-inf'))\n probs = F.softmax(logits / self.sqrt_k, dim=2)\n read = torch.bmm(probs, values)\n return torch.cat([minibatch, read], dim=2).permute(0,2,1)"
] | [
[
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.nn.functional.softmax",
"torch.bmm",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.triu",
"torch.zeros",
"torch.cat"
]
] |
ShellySrivastava/Machine-Learning | [
"bfdea30c06abe4228c103ae525adcf990015983f"
] | [
"ML_CW1/assgn_1_part_1/2_multiple_variables/plot_cost.py"
] | [
"import matplotlib.pyplot as plt\nimport os\n\ndef plot_cost(cost):\n \n fig, ax1 = plt.subplots()\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Cost')\n plt.plot(cost)\n fig.tight_layout()\n plot_filename = os.path.join(os.getcwd(), 'figures', 'cost.png')\n plt.savefig(plot_filename)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] |
e2thenegpii/EnergyCalc | [
"6036b08d01eafae33e80e8754c0e0215c78db6fe"
] | [
"src/TOU.py"
] | [
"from enum import Enum\n\nfrom datetime import datetime, date\nfrom dateutil.relativedelta import relativedelta, MO\nimport argparse\nimport holidays\nimport pandas as pd\n\nclass BGEHolidays(holidays.HolidayBase):\n def _populate(self, year):\n holidays.UnitedStates._populate(self, year)\n\n # Remove Martin Luther King Day\n self.pop(date(year, 1, 1) + relativedelta(weekday=MO(+3)), None)\n\n # Remove Columbus Day\n self.pop(date(year, 10, 1) + relativedelta(weekday=MO(+2)), None)\n\n # Remove Veterans Day\n self.pop(date(year, 11, 11), None)\n\n # Add good friday\n self[holidays.easter(year) + relativedelta(days=-2)] = 'Good Friday'\n\nclass TimeOfUse(Enum):\n peak = 0\n shoulder = 1\n offpeak = 2\n\nclass Season(Enum):\n Winter = 0\n Summer = 1\n\n @classmethod\n def get(cls, dt):\n d = dt.date()\n if date(dt.year, 6, 1) <= d and date(dt.year, 9, 30) >= d:\n return cls.Summer\n return cls.Winter\n\nclass Schedule(Enum):\n R = 'R'\n RL = 'RL'\n EV = 'EV'\n EVP = 'EVP'\n\n def getTOU(self, dt):\n d = dt.date()\n t = dt.time()\n bge_holidays = BGEHolidays(dt.year)\n\n if self == self.R:\n return TimeOfUse.offpeak\n elif self == self.RL:\n if Season.get(dt) == Season.Summer:\n if (t.hour >=10 and t.hour < 20) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n elif ((t.hour >= 7 and t.hour < 10) or (t.hour >= 20 and t.hour < 23)) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.shoulder\n else:\n return TimeOfUse.offpeak\n else:\n if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n elif (t.hour >= 11 and t.hour < 17) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.shoulder\n else:\n return TimeOfUse.offpeak\n\n elif self in (self.EV, self.EVP):\n if Season.get(dt) == Season.Summer:\n if (t.hour >= 10 and t.hour < 20) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n else:\n return TimeOfUse.offpeak\n else:\n if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \\\n (dt.weekday() < 5) and \\\n (d not in bge_holidays):\n return TimeOfUse.peak\n else:\n return TimeOfUse.offpeak\n\nrates = {\n (Schedule.R, Season.Summer, TimeOfUse.offpeak): .06722,\n (Schedule.R, Season.Winter, TimeOfUse.offpeak): .07805,\n (Schedule.RL, Season.Summer, TimeOfUse.peak): .08465,\n (Schedule.RL, Season.Summer, TimeOfUse.shoulder): .06069,\n (Schedule.RL, Season.Summer, TimeOfUse.offpeak): .05744,\n (Schedule.RL, Season.Winter, TimeOfUse.peak): .09053,\n (Schedule.RL, Season.Winter, TimeOfUse.shoulder): .07944,\n (Schedule.RL, Season.Winter, TimeOfUse.offpeak): .07166,\n (Schedule.EV, Season.Summer, TimeOfUse.peak): .1227,\n (Schedule.EV, Season.Summer, TimeOfUse.offpeak): .03886,\n (Schedule.EV, Season.Winter, TimeOfUse.peak): .18474,\n (Schedule.EV, Season.Winter, TimeOfUse.offpeak): .0426,\n (Schedule.EVP, Season.Summer, TimeOfUse.peak): .03886,\n (Schedule.EVP, Season.Summer, TimeOfUse.offpeak): .03886,\n (Schedule.EVP, Season.Winter, TimeOfUse.peak): .0426,\n (Schedule.EVP, Season.Winter, TimeOfUse.offpeak): .0426\n}\n\ndef get_rate(dt, schedule = Schedule.R):\n bge_holidays = BGEHolidays(dt.year)\n\n season = Season.get(dt)\n tou = schedule.getTOU(dt)\n\n return rates[(schedule, season, tou)]\n\ndef process_row(x):\n dt = x['DATE_START TIME']\n val = x['USAGE']\n return pd.Series([dt] + [get_rate(dt, x) * (val + .0700) for x in Schedule], index=['DATE_START TIME'] + [x.value for x in Schedule])\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_file', type=argparse.FileType('r'))\n\n args = parser.parse_args()\n\n df = pd.read_csv(args.input_file, parse_dates=[['DATE', 'START TIME']])[['DATE_START TIME', 'USAGE']]\n\n schedules = df.apply(process_row, axis=1)\n print(schedules[['R', 'RL', 'EV', 'EVP']].sum())\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv"
]
] |
AshwinRameshP/AttendanceSystem_FaceRecognition | [
"23c590c10ac296816d7cff23445d28c3863d0138"
] | [
"FRAMS_STUDENT.py"
] | [
"import tkinter as tk\nfrom tkinter import *\nimport cv2\nimport csv\nimport os\nimport numpy as np\nfrom PIL import Image,ImageTk\nimport pandas as pd\nimport datetime\nimport time\n\n\n##Error screen2\ndef del_sc2():\n sc2.destroy()\ndef err_screen1():\n global sc2\n sc2 = tk.Tk()\n sc2.geometry('300x100')\n sc2.iconbitmap('FRAMS.ico')\n sc2.title('Warning!!')\n sc2.configure(background='snow')\n Label(sc2,text='Please enter your subject name!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack()\n Button(sc2,text='OK',command=del_sc2,fg=\"black\" ,bg=\"lawn green\" ,width=9 ,height=1, activebackground = \"Red\" ,font=('times', 15, ' bold ')).place(x=90,y= 50)\n\ndef Fillattendances():\n sub = tx.get()\n now = time.time() ###For calculate seconds of video\n future = now + 20\n if time.time() < future:\n if sub == '':\n err_screen1()\n else:\n recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()\n try:\n recognizer.read(\"TrainingImageLabel\\Trainner.yml\")\n except:\n e = 'Model not found,Please train model'\n Notifica.configure(text=e, bg=\"red\", fg=\"black\", width=33, font=('times', 15, 'bold'))\n Notifica.place(x=20, y=250)\n\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(harcascadePath)\n df = pd.read_csv(\"StudentDetails\\StudentDetails.csv\")\n cam = cv2.VideoCapture(0)\n font = cv2.FONT_HERSHEY_SIMPLEX\n col_names = ['Enrollment', 'Name', 'Date', 'Time']\n attendance = pd.DataFrame(columns=col_names)\n while True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n for (x, y, w, h) in faces:\n global Id\n\n Id, conf = recognizer.predict(gray[y:y + h, x:x + w])\n if (conf < 70):\n print(conf)\n global Subject\n global aa\n global date\n global timeStamp\n Subject = tx.get()\n ts = time.time()\n date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n aa = df.loc[df['Enrollment'] == Id]['Name'].values\n global tt\n tt = str(Id) + \"-\" + aa\n En = '15624031' + str(Id)\n attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]\n cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7)\n cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4)\n\n else:\n Id = 'Unknown'\n tt = str(Id)\n cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7)\n cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4)\n if time.time() > future:\n break\n\n attendance = attendance.drop_duplicates(['Enrollment'], keep='first')\n cv2.imshow('Filling attedance..', im)\n key = cv2.waitKey(30) & 0xff\n if key == 27:\n break\n\n ts = time.time()\n date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n Hour, Minute, Second = timeStamp.split(\":\")\n fileName = \"Attendance/\" + Subject + \"_\" + date + \"_\" + Hour + \"-\" + Minute + \"-\" + Second + \".csv\"\n attendance = attendance.drop_duplicates(['Enrollment'], keep='first')\n print(attendance)\n attendance.to_csv(fileName, index=False)\n\n M = 'Attendance filled Successfully'\n Notifica.configure(text=M, bg=\"Green\", fg=\"white\", width=33, font=('times', 15, 'bold'))\n Notifica.place(x=20, y=250)\n cam.release()\n cv2.destroyAllWindows()\n\n import csv\n import tkinter\n root = tkinter.Tk()\n root.title(\"Attendance of \" + Subject)\n root.configure(background='snow')\n cs = './' + fileName\n with open(cs, newline=\"\") as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n # i've added some styling\n label = tkinter.Label(root, width=8, height=1, fg=\"black\", font=('times', 15, ' bold '),\n bg=\"lawn green\", text=row, relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\n root.mainloop()\n print(attendance)\n\n\nif __name__ == '__main__':\n\n ###windo is frame for subject choosing\n windo = tk.Tk()\n windo.iconbitmap('FRAMS.ico')\n windo.title(\"Enter subject name...\")\n windo.geometry('580x320')\n windo.configure(background='snow')\n Notifica = tk.Label(windo, text=\"Attendance filled Successfully\", bg=\"Green\", fg=\"white\", width=33,\n height=2, font=('times', 15, 'bold'))\n\n\n def Attf():\n import subprocess\n subprocess.Popen(\n r'explorer /select,\".\\Attendance\\Manually Attendance\\\"') # open attendance sheet window\n\n\n attf = tk.Button(windo, text=\"Check Sheets\", command=Attf, fg=\"black\", bg=\"lawn green\", width=12, height=1,\n activebackground=\"Red\", font=('times', 14, ' bold '))\n attf.place(x=430, y=255)\n\n sub = tk.Label(windo, text=\"Enter Subject\", width=15, height=2, fg=\"white\", bg=\"blue2\",\n font=('times', 15, ' bold '))\n sub.place(x=30, y=100)\n\n tx = tk.Entry(windo, width=20, bg=\"yellow\", fg=\"red\", font=('times', 23, ' bold '))\n tx.place(x=250, y=105)\n\n fill_a = tk.Button(windo, text=\"Fill Attendance\", fg=\"white\", command=Fillattendances, bg=\"deep pink\", width=20,\n height=2,\n activebackground=\"Red\", font=('times', 15, ' bold '))\n fill_a.place(x=250, y=160)\n windo.mainloop()"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
hanneshapke/text | [
"8bebbbe28749de5509be474bc475cef83490f013"
] | [
"tensorflow_text/python/ops/bert_tokenizer.py"
] | [
"# coding=utf-8\n# Copyright 2020 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic tokenization ops for BERT preprocessing.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow_text.python.ops import regex_split_ops\nfrom tensorflow_text.python.ops.normalize_ops import case_fold_utf8\nfrom tensorflow_text.python.ops.normalize_ops import normalize_utf8\nfrom tensorflow_text.python.ops.tokenization import TokenizerWithOffsets\nfrom tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer\n\n_DELIM_REGEX = [\n r\"\\s+\",\n r\"|\".join([\n r\"[!-/]\",\n r\"[:-@]\",\n r\"[\\[-`]\",\n r\"[{-~]\",\n r\"[\\p{P}]\",\n ]),\n r\"|\".join([\n r\"[\\x{4E00}-\\x{9FFF}]\",\n r\"[\\x{3400}-\\x{4DBF}]\",\n r\"[\\x{20000}-\\x{2A6DF}]\",\n r\"[\\x{2A700}-\\x{2B73F}]\",\n r\"[\\x{2B740}-\\x{2B81F}]\",\n r\"[\\x{2B820}-\\x{2CEAF}]\",\n r\"[\\x{F900}-\\x{FAFF}]\",\n r\"[\\x{2F800}-\\x{2FA1F}]\",\n ]),\n]\n\n_DELIM_REGEX_PATTERN = \"|\".join(_DELIM_REGEX)\n_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)\n_KEEP_DELIM_NO_WHITESPACE.remove(r\"\\s+\")\n_UNUSED_TOKEN_REGEX = \"\\\\[unused\\\\d+\\\\]\"\n_KEEP_DELIM_NO_WHITESPACE_PATTERN = \"|\".join(_KEEP_DELIM_NO_WHITESPACE)\n\n\nclass BasicTokenizer(TokenizerWithOffsets):\n r\"\"\"Basic tokenizer for for tokenizing text.\n\n A basic tokenizer that tokenizes using some deterministic rules:\n - For most languages, this tokenizer will split on whitespace.\n - For Chinese, Japanese, and Korean characters, this tokenizer will split on\n Unicode characters.\n\n Attributes:\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead of\n stripping them away.\n normalization_form: If true and lower_case=False, the input text will be\n normalized to `normalization_form`. See normalize_utf8() op for a list of\n valid values.\n preserve_unused_token: If true, text in the regex format \"\\\\[unused\\\\d+\\\\]\"\n will be treated as a token and thus remain preserved as is to be looked up\n in the vocabulary.\n \"\"\"\n\n def __init__(self,\n lower_case=False,\n keep_whitespace=False,\n normalization_form=None,\n preserve_unused_token=False):\n self._lower_case = lower_case\n if not keep_whitespace:\n self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN\n else:\n self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN\n self._normalization_form = normalization_form\n\n if preserve_unused_token:\n self._delim_regex_pattern = \"|\".join(\n [_UNUSED_TOKEN_REGEX, _DELIM_REGEX_PATTERN])\n self._keep_delim_regex_pattern = \"|\".join(\n [_UNUSED_TOKEN_REGEX, self._keep_delim_regex_pattern])\n else:\n self._delim_regex_pattern = _DELIM_REGEX_PATTERN\n\n def tokenize(self, text_input):\n tokens, _, _ = self.tokenize_with_offsets(text_input)\n return tokens\n\n def tokenize_with_offsets(self, text_input):\n \"\"\"Performs basic word tokenization for BERT.\n\n Args:\n text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.\n\n Returns:\n A `RaggedTensor` of tokenized strings from text_input.\n \"\"\"\n # lowercase and strip accents (if option is set)\n if self._lower_case:\n text_input = case_fold_utf8(text_input)\n text_input = normalize_utf8(text_input, \"NFD\")\n text_input = string_ops.regex_replace(text_input, r\"\\p{Mn}\", \"\")\n else:\n # utf8 normalization\n if self._normalization_form is not None:\n text_input = normalize_utf8(text_input, self._normalization_form)\n\n # strip out control characters\n text_input = string_ops.regex_replace(text_input, r\"\\p{Cc}|\\p{Cf}\", \" \")\n return regex_split_ops.regex_split_with_offsets(\n text_input, self._delim_regex_pattern, self._keep_delim_regex_pattern,\n \"BertBasicTokenizer\")\n\n\nclass BertTokenizer(TokenizerWithOffsets):\n r\"\"\"Tokenizer used for BERT.\n\n This tokenizer applies an end-to-end, text string to wordpiece tokenization.\n It first applies basic tokenization, and then follwed by wordpiece\n tokenization.\n\n See BasicTokenizer and WordpieceTokenizer for their respective details.\n\n Attributes:\n vocab_lookup_table: A lookup table implementing the LookupInterface\n containing the vocabulary of subwords or a string which is the file path\n to the vocab.txt file.\n suffix_indicator: (optional) The characters prepended to a wordpiece to\n indicate that it is a suffix to another subword. Default is '##'.\n max_bytes_per_word: (optional) Max size of input token. Default is 100.\n max_chars_per_token: (optional) Max size of subwords, excluding suffix\n indicator. If known, providing this improves the efficiency of decoding\n long words.\n token_out_type: (optional) The type of the token to return. This can be\n `tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.\n unknown_token: (optional) The value to use when an unknown token is found.\n Default is \"[UNK]\". If this is set to a string, and `token_out_type` is\n `tf.int64`, the `vocab_lookup_table` is used to convert the\n `unknown_token` to an integer. If this is set to `None`, out-of-vocabulary\n tokens are left as is.\n split_unknown_characters: (optional) Whether to split out single unknown\n characters as subtokens. If False (default), words containing unknown\n characters will be treated as single unknown tokens.\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead of\n stripping them away.\n normalization_form: If true and lower_case=False, the input text will be\n normalized to `normalization_form`. See normalize_utf8() op for a list of\n valid values.\n preserve_unused_token: If true, text in the regex format `\\\\[unused\\\\d+\\\\]`\n will be treated as a token and thus remain preserved as is to be looked up\n in the vocabulary.\n \"\"\"\n\n def __init__(self,\n vocab_lookup_table,\n suffix_indicator=\"##\",\n max_bytes_per_word=100,\n max_chars_per_token=None,\n token_out_type=dtypes.int64,\n unknown_token=\"[UNK]\",\n split_unknown_characters=False,\n lower_case=False,\n keep_whitespace=False,\n normalization_form=None,\n preserve_unused_token=False):\n if isinstance(vocab_lookup_table, str) or isinstance(\n vocab_lookup_table, ops.Tensor):\n init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)\n vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(\n init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)\n\n print(\"Before \", type(lower_case))\n if isinstance(lower_case, ops.Tensor): \n lower_case = tf.compat.v1.get_default_session().run(lower_case)\n print(\"After \", type(lower_case))\n\n self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,\n normalization_form,\n preserve_unused_token)\n self._wordpiece_tokenizer = WordpieceTokenizer(\n vocab_lookup_table, suffix_indicator, max_bytes_per_word,\n max_chars_per_token, token_out_type, unknown_token,\n split_unknown_characters)\n\n def tokenize_with_offsets(self, text_input):\n tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)\n wordpieces, wp_begin, wp_end = (\n self._wordpiece_tokenizer.tokenize_with_offsets(tokens))\n begin_expanded = array_ops.expand_dims(begin, axis=2)\n final_begin = begin_expanded + wp_begin\n final_end = begin_expanded + wp_end\n return wordpieces, final_begin, final_end\n\n def tokenize(self, text_input):\n \"\"\"Performs untokenized text to wordpiece tokenization for BERT.\n\n Args:\n text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8\n strings.\n\n Returns:\n A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string\n contents (or ID in the vocab_lookup_table representing that string)\n of the `jth` token in `input[i1...iN]`\n \"\"\"\n tokens = self._basic_tokenizer.tokenize(text_input)\n return self._wordpiece_tokenizer.tokenize(tokens)\n"
] | [
[
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.lookup_ops.StaticVocabularyTableV1",
"tensorflow.python.ops.string_ops.regex_replace",
"tensorflow.python.ops.lookup_ops.TextFileIdTableInitializer"
]
] |
riotu-lab/tf2trt_with_onnx | [
"f9828ed99af5530836bf6ee608e631502dfb0f02"
] | [
"inference.py"
] | [
"import tensorrt as trt\nimport pycuda.driver as cuda\nimport numpy as np\nimport pycuda.autoinit \n\ndef allocate_buffers(engine, batch_size, data_type):\n\n \"\"\"\n This is the function to allocate buffers for input and output in the device\n Args:\n engine : The path to the TensorRT engine. \n batch_size : The batch size for execution time.\n data_type: The type of the data for input and output, for example trt.float32. \n \n Output:\n h_input_1: Input in the host.\n d_input_1: Input in the device. \n h_output_1: Output in the host. \n d_output_1: Output in the device. \n stream: CUDA stream.\n\n \"\"\"\n\n # Determine dimensions and create page-locked memory buffers (which won't be swapped to disk) to hold host inputs/outputs.\n h_input_1 = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(data_type))\n h_output = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(data_type))\n # Allocate device memory for inputs and outputs.\n d_input_1 = cuda.mem_alloc(h_input_1.nbytes)\n\n d_output = cuda.mem_alloc(h_output.nbytes)\n # Create a stream in which to copy inputs/outputs and run inference.\n stream = cuda.Stream()\n return h_input_1, d_input_1, h_output, d_output, stream \n\ndef load_images_to_buffer(pics, pagelocked_buffer):\n preprocessed = np.asarray(pics).ravel()\n np.copyto(pagelocked_buffer, preprocessed) \n\ndef do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width):\n \"\"\"\n This is the function to run the inference\n Args:\n engine : Path to the TensorRT engine \n pics_1 : Input images to the model. \n h_input_1: Input in the host \n d_input_1: Input in the device \n h_output_1: Output in the host \n d_output_1: Output in the device \n stream: CUDA stream\n batch_size : Batch size for execution time\n height: Height of the output image\n width: Width of the output image\n \n Output:\n The list of output images\n\n \"\"\"\n print('load images to buffer')\n load_images_to_buffer(pics_1, h_input_1)\n\n with engine.create_execution_context() as context:\n context.debug_sync = False\n # Transfer input data to the GPU.\n cuda.memcpy_htod_async(d_input_1, h_input_1, stream)\n\n # Run inference.\n print('load profiler')\n context.profiler = trt.Profiler()\n print('execute')\n context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)])\n print('Transfer predictions back from the GPU.')\n # Transfer predictions back from the GPU.\n cuda.memcpy_dtoh_async(h_output, d_output, stream)\n # Synchronize the stream\n stream.synchronize()\n # Return the host output.\n print(h_output.shape)\n out = h_output.reshape((1,-1))\n return out \n"
] | [
[
"numpy.asarray",
"numpy.copyto"
]
] |
adines/imagepy | [
"d7cdf3273d25e06046626ef2ef9200b1846ea49a"
] | [
"imagepy/menus/File/Import/roi_plg.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 12/21/2018\n@author: BioinfoTongLI\n\"\"\"\nimport numpy as np\nimport read_roi\nfrom imagepy.core.engine import Free\nfrom imagepy import IPy\nfrom skimage.draw import polygon\n\nclass Plugin(Free):\n \"\"\"load_ij_roi: use read_roi and th pass to shapely objects\"\"\"\n title = 'Import Rois from IJ'\n\n para = {'path': '', 'name': 'Undefined', 'width': 512, 'height': 512}\n\n view = [(str, 'name', 'name', ''),\n (int, 'width', (1, 3000), 0, 'width', 'pix'),\n (int, 'height', (1, 3000), 0, 'height', 'pix')]\n\n def load(self):\n filt = '|'.join(['%s files (*.%s)|*.%s' % (i.upper(), i, i) for i in [\"zip\"]])\n return IPy.getpath(self.title, filt, 'open', self.para)\n\n def run(self, para=None):\n ls = read_roi.read_roi_zip(para['path'])\n img = np.zeros((para['height'], para['width']), dtype=np.int32)\n for i in ls:\n img[polygon(ls[i]['y'], ls[i]['x'], img.shape)] = int(i)\n IPy.show_img([img], para['name'])\n"
] | [
[
"numpy.zeros"
]
] |
bagustris/emotion | [
"5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36"
] | [
"ertk/stats.py"
] | [
"from functools import partial\nfrom typing import Callable, List, Union\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import friedmanchisquare, rankdata\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom statsmodels.stats.libqsturng import qsturng\n\nMatrix = List[List[float]]\n\n\ndef friedman_nemenyi(table: pd.DataFrame, alpha: float = 0.05):\n \"\"\"Runs Friedman test on given table and optionally graphs a\n critical-difference diagram.\n\n Args:\n -----\n table: DataFrame\n The data table, with subjects as rows and independent variable\n (condition) as columns.\n alpha: float\n Significance level, must be in the range (0, 1), default is\n 0.05.\n\n Returns:\n --------\n pval: float\n The p-value for the Friedman test.\n cd: float\n The critical difference from the Nemenyi post-hoc test.\n df: pd.DataFrame\n A table containing statistics relating to ranking and average\n values of the condiions. The dataframe has these columns:\n \"mean_rank\", \"mean\", \"std\", \"median\", \"mad\", \"effect_size\".\n \"\"\"\n _, pval = friedmanchisquare(*table.transpose().to_numpy())\n names = list(table.columns)\n avgrank = rankdata(-table.to_numpy(), axis=1).mean(0)\n df = pd.DataFrame(\n {\n \"mean_rank\": avgrank,\n \"mean\": table.mean(),\n \"std\": table.std(),\n \"median\": table.median(),\n \"mad\": table.mad(),\n },\n index=names,\n ).sort_values(\"mean_rank\")\n\n topclf = df.index[0]\n n, k = table.shape\n # Effect size is calculated in terms of differences in MAD\n df[\"effect_size\"] = (df.loc[topclf, \"median\"] - df[\"median\"]) / np.sqrt(\n ((n - 1) * df.loc[topclf, \"mad\"] ** 2 + (n - 1) * df[\"mad\"] ** 2) / (2 * n - 2)\n )\n cd = qsturng(1 - alpha, k, np.inf) * np.sqrt((k * (k + 1)) / (12 * n))\n return pval, cd, df\n\n\ndef _get_dist_func(metric: Union[Callable, str], **kwargs):\n if callable(metric):\n return partial(metric, **kwargs)\n else:\n if metric != \"minkowski\" and \"p\" in kwargs:\n del kwargs[\"p\"]\n if metric != \"mahalanobis\" and \"VI\" in kwargs:\n del kwargs[\"VI\"]\n return partial(pairwise_distances, metric=metric, **kwargs)\n\n\ndef bhattacharyya_dist(x: np.ndarray, y: np.ndarray, pinv: bool = False):\n \"\"\"Calculate Bhattacharyya distance between multivariate Gaussian\n distributions.\n\n Args:\n -----\n x: array-like\n Data matrix of shape (n1_samples, n_features) corresponding to\n the first group.\n y: array-like\n Data matrix of shape (n2_samples, n_features) corresponding to\n the second group.\n pinv: bool\n Use pseudoinverse instead of inverse. This is useful if the\n covariance matrices don't have full rank or otherwise aren't\n invertible.\n \"\"\"\n mu1 = np.expand_dims(np.mean(x, axis=0), 1)\n mu2 = np.expand_dims(np.mean(y, axis=0), 1)\n cov1 = np.cov(x, rowvar=False)\n cov2 = np.cov(y, rowvar=False)\n cov = (cov1 + cov2) / 2\n _, ldet1 = np.linalg.slogdet(cov1)\n _, ldet2 = np.linalg.slogdet(cov2)\n _, ldet = np.linalg.slogdet(cov)\n if pinv:\n covinv = np.linalg.pinv(cov, hermitian=True, rcond=1e-8)\n else:\n covinv = np.linalg.inv(cov)\n db = (mu1 - mu2).T.dot(covinv).dot(mu1 - mu2) / 8 + ldet / 2 - ldet1 / 4 - ldet2 / 4\n\n return db.item()\n\n\ndef corr_ratio(x: np.ndarray, groups: Union[List[int], np.ndarray]):\n \"\"\"Calculates correlation ratio for each feature using the given\n groups.\n\n Args:\n -----\n data: numpy.ndarray\n Data matrix, with shape (n_instances, n_features).\n groups: list or numpy.ndarray\n 1D array of groups assignments of length n_instances. Groups\n should be labelled from 0 to G - 1 inclusive, where G is the\n number of groups.\n\n Returns:\n --------\n eta: numpy.ndarray\n 1D array of correlation coefficients of length n_features. Each\n value is in [0, 1] except if a feature takes only one value, in\n which case eta will be nan.\n \"\"\"\n groups = np.array(groups)\n n_groups = groups.max() + 1\n counts = np.bincount(groups)\n mean = x.mean(0)\n g_means = np.empty((n_groups, x.shape[1]))\n for g in range(n_groups):\n g_means[g, :] = x[groups == g].mean(0)\n num = np.sum(counts[:, None] * (g_means - mean) ** 2, axis=0)\n den = np.sum((x - mean) ** 2, axis=0)\n old_err = np.seterr(divide=\"ignore\", invalid=\"ignore\")\n eta2 = num / den\n np.seterr(**old_err)\n return np.sqrt(eta2)\n\n\ndef dunn(\n x: np.ndarray,\n clusters: Union[List[int], np.ndarray],\n intra_method: str = \"mean\",\n inter_method: str = \"cent\",\n metric: Union[Callable, str] = \"l2\",\n p: int = 2,\n):\n \"\"\"Calculates the Dunn index for cluster \"goodness\".\n\n Args:\n -----\n data: numpy.ndarray\n Data matrix, with shape (n_instances, n_features).\n clusters: list or numpy.ndarray\n 1D array of cluster assignments of length n_instances. Clusters\n should be labelled from 0 to C - 1 inclusive, where C is the\n number of clusters.\n intra_method: str\n Method for calculating intra-cluster distance. One of \"max\",\n \"mean\", \"cent\".\n inter_method: str\n Method for calculating inter-cluster distance. One of \"cent\".\n metric: str or callable\n Distance metric. If str, must be one of the sklearn or scipy\n distance methods. If callable, must take one positional argument\n and return a pairwise distance matrix.\n p: int\n Value of p for p-norm when using \"lp\" distance metric.\n\n Returns:\n --------\n dunn: float\n The Dunn index for this data and cluster assignment.\n \"\"\"\n clusters = np.array(clusters, dtype=int)\n n_clusters = clusters.max() + 1\n d = _get_dist_func(metric, p=p)\n\n intra = np.zeros(n_clusters)\n for c in range(n_clusters):\n clust_data = x[clusters == c]\n if intra_method == \"max\":\n idx = np.triu_indices(len(clust_data))\n intra[c] = d(clust_data)[idx].max()\n elif intra_method == \"mean\":\n idx = np.triu_indices(len(clust_data))\n intra[c] = d(clust_data)[idx].mean()\n elif intra_method == \"cent\":\n mean = clust_data.mean(0)\n intra[c] = d(clust_data, mean[None, :]).mean()\n\n inter = np.zeros((n_clusters, n_clusters))\n for i in range(n_clusters):\n inter[i, i] = np.inf # To avoid min = 0\n for j in range(i + 1, n_clusters):\n if inter_method == \"cent\":\n mean_i = x[clusters == i].mean(0)\n mean_j = x[clusters == j].mean(0)\n inter[i, j] = inter[j, i] = d(mean_i[None, :], mean_j[None, :])\n\n return inter.min() / intra.max()\n\n\ndef kappa(data: np.ndarray):\n \"\"\"Calculates Fleiss' kappa for inter-rater agreement.\n\n Args:\n -----\n data: numpy.ndarray\n The data matrix, in the form (raters x units).\n \"\"\"\n cats = np.unique(data)\n n, N = data.shape\n\n counts = np.stack([np.sum(data == c, 0) for c in cats], 1)\n\n p_j = np.sum(counts, axis=0) / (N * n)\n assert np.isclose(np.sum(p_j), 1)\n Pe = np.sum(p_j ** 2)\n\n P = (np.sum(counts ** 2, 1) - n) / (n * (n - 1))\n Pbar = np.mean(P)\n\n return (Pbar - Pe) / (1 - Pe)\n\n\nclass Deltas:\n @staticmethod\n def nominal(c: int, k: int):\n return float(c != k)\n\n @staticmethod\n def interval(c: float, k: float):\n return (c - k) ** 2\n\n\ndef alpha(\n data: np.ndarray,\n delta: Union[Callable[[int, int], float], List[List[float]], str] = \"nominal\",\n):\n \"\"\"Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for\n inter-rater agreement.\n\n [1] K. Krippendorff, Content analysis: An introduction to its\n methodology. Sage publications, 2004.\n\n Args:\n -----\n data: numpy.ndarray\n The data matrix, shape (n_raters, n_units). Each cell (i, j)\n represents the value assigned to unit j by rater i, or 0\n representing no response.\n delta: callable, 2-D array-like or str\n The delta metric. Default is the nominal metric, which takes the\n value 1 in case c != k and 0 otherwise.\n \"\"\"\n # The following implementation was based off the Wikipedia article:\n # https://en.wikipedia.org/wiki/Krippendorff%27s_alpha\n\n # Response categories go from 1 to R, 0 represents no response\n R = np.max(data)\n counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T\n count_sum = np.sum(counts, 0)\n assert len(count_sum) == R + 1\n\n def ordinal(c: int, k: int):\n if k < c:\n c, k = k, c\n s = (\n sum(count_sum[g] for g in range(c, k + 1))\n - (count_sum[c] + count_sum[k]) / 2\n )\n return s ** 2\n\n if isinstance(delta, str):\n delta = {\n \"nominal\": Deltas.nominal,\n \"ordinal\": ordinal,\n \"interval\": Deltas.interval,\n }[delta]\n\n if not callable(delta):\n try:\n delta[0][0]\n except IndexError:\n raise TypeError(\"delta must be either str, callable or 2D array.\")\n\n def _delta(c, k):\n new_delta = delta\n return new_delta[c][k]\n\n delta = _delta\n\n m_u = np.sum(counts[:, 1:], 1)\n\n valid = m_u >= 2\n counts = counts[valid]\n m_u = m_u[valid]\n data = data[:, valid]\n\n n = np.sum(m_u)\n\n n_cku = np.matmul(counts[:, :, None], counts[:, None, :])\n for i in range(R + 1):\n n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1)\n\n D_o = 0\n for c in range(1, R + 1):\n for k in range(1, R + 1):\n D_o += delta(c, k) * n_cku[:, c, k]\n D_o = np.sum(D_o / (n * (m_u - 1)))\n\n D_e = 0\n P_ck = np.bincount(data.flat)\n for c in range(1, R + 1):\n for k in range(1, R + 1):\n D_e += delta(c, k) * P_ck[c] * P_ck[k]\n D_e /= n * (n - 1)\n\n return 1 - D_o / D_e\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.matmul",
"numpy.bincount",
"numpy.empty",
"numpy.zeros",
"numpy.mean",
"numpy.linalg.inv",
"numpy.seterr",
"numpy.linalg.slogdet",
"numpy.max",
"numpy.array",
"numpy.linalg.pinv",
"numpy.unique",
"numpy.cov"
]
] |
sdickler/FINE | [
"3114fd009e80a7eadacffe26bf5ff8e6a126ac61"
] | [
"FINE/expansionModules/robustPipelineSizing.py"
] | [
"\"\"\"\nLast edited: January 20 2020\n\n|br| @author: FINE Developer Team (FZJ IEK-3) \\n\\n\nThe approaches used are described in\nRobinius et. al. (2019) \"Robust Optimal Discrete Arc Sizing for Tree-Shaped Potential Networks\"\nand they are further developed with the help of\nTheorem 10 of Labbé et. al. (2019) \"Bookings in the European gas market: characterisation of feasibility and\ncomputational complexity results\"\nand Lemma 3.4 and 3.5 of Schewe et. al. (preprint 2020) \"Computing Technical Capacities in the European Entry-Exit\nGas Market is NP-Hard\"\n\"\"\"\nimport pandas as pd\nfrom FINE import utils\nimport networkx as nx\nimport math\nimport pyomo.environ as py\nimport warnings\nfrom pyomo.opt import SolverFactory, SolverStatus, TerminationCondition\nimport numpy as np\nimport copy\nfrom scipy.optimize import fsolve\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport shapely as shp\nimport time\nfrom multiprocessing import Pool\nimport sys\nfrom functools import partial\n\ntry:\n import geopandas as gpd\nexcept ImportError:\n warnings.warn('The GeoPandas python package could not be imported.')\n\n\n# local type und value checker\n\ndef isPandasDataFrameNumber(dataframe):\n # check if dataframe is a pandas dataframe and if each value is float or int\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError(\"The input argument has to be a pandas DataFrame\")\n else:\n if not dataframe.select_dtypes(exclude=[\"float\", \"int\"]).empty:\n raise ValueError(\"The input pandas DataFrame has to contain only floats or ints\")\n\n\ndef isPandasSeriesPositiveNumber(pandasSeries):\n # Check if the input argument is a pandas series and it contains only positive numbers\n if not isinstance(pandasSeries, pd.Series):\n raise TypeError(\"The input argument has to be a pandas series\")\n else:\n for index in pandasSeries.index:\n utils.isPositiveNumber(pandasSeries[index])\n\n\ndef isNetworkxGraph(graph):\n # Check if the input argument is a networkx graph\n if not isinstance(graph, nx.Graph):\n raise TypeError(\"The input argument has to be a networkx graph\")\n\n\ndef isDictionaryPositiveNumber(dictionary):\n # Check if the input argument is a dictionary with positive numbers as values\n if not isinstance(dictionary, dict):\n raise TypeError(\"The input argument has to be a dictionary\")\n else:\n for key in dictionary.keys():\n utils.isPositiveNumber(dictionary[key])\n\n\ndef checkLowerUpperBoundsOfDicts(lowerDict, upperDict):\n # check if lowerDict and upperDict have the same keys and if lowerDict[key] <= upperDict[key] holds\n if not (lowerDict.keys() == upperDict.keys()):\n raise ValueError(\"The input arguments have to have the same keys\")\n else:\n for key in lowerDict.keys():\n if lowerDict[key] > upperDict[key]:\n raise ValueError(\"The lower bound has to be the smaller than the upper bound\")\n\n\ndef isListOfStrings(strings):\n # check if strings is list of strings\n if not isinstance(strings, list):\n raise TypeError(\"The input argument has to be a list\")\n else:\n for string in strings:\n utils.isString(string)\n\n\ndef isBool(boolean):\n # check if boolean is a bool\n if not isinstance(boolean, bool):\n raise TypeError(\"The input argument has to be a bool\")\n\n\n# End utils checks\n\n\ndef getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None):\n \"\"\"\n Determines the injection and withdrawal rates into a network from a component in an\n EnergySystemModel object or based on the fluid flow data.\n\n :param componentName: name of the network component in the EnergySystemModel class\n (only required the fluid flows are to be obtained from the EnergySystemModel class)\n |br| * the default value is ''\n :type componentName: string\n\n :param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be\n specified if the operationVariablesOptimumData are to be obtained from the\n EnergySystemModel object) \n |br| * the default value is None\n :type esM: FINE EnergySystemModel\n\n :param operationVariablesOptimumData: the injection and withdrawal rates into and out of the\n network can either be obtained from a DataFrame with the original fluid flows or an\n EnergySystemModel with an optimized Pyomo instance.\n In the former case, the argument is a pandas DataFrame with two index columns (specifying\n the names of the start and end node of a pipeline) and one index row (for the time steps).\n The data in the DataFrame denotes the flow coming from the start node and going to the end\n node [e.g. in kWh or Nm^3]. Example:\n\n 0 1 ... 8759\n node1 node2 0.1 0.0 ... 0.9\n node2 node3 0.0 0.3 ... 0.4\n node2 node1 0.9 0.9 ... 0.2\n node3 node2 1.1 0.2 ... 0.9\n\n |br| * the default value is None\n :type operationVariablesOptimumData: pandas DataFrame with non-negative floats\n\n :return: injection and withdrawal rates (withdrawals from the network are positive while\n injections are negative)\n :rtype: pandas DataFrame\n \"\"\"\n #TODO check type and value correctness\n\n # Get the original optimal operation variables\n if operationVariablesOptimumData is not None:\n op = operationVariablesOptimumData\n else:\n op = esM.componentModelingDict[esM.componentNames[componentName]]. \\\n getOptimalValues('operationVariablesOptimum')['values'].loc[componentName]\n\n # Get a map of the component's network\n if esM is None:\n mapN = {}\n for conn in operationVariablesOptimumData.index:\n loc, loc_ = conn\n mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_})\n mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc})\n else:\n mapN = esM.getComponent(componentName)._mapL\n\n # Initialize list for nodal injection and withdrawal time series data\n injectionWithdrawalRates, nodeIx = [], []\n\n # Reset connections set (not all indices might be in the operationVariablesOptimumData data)\n connections = set()\n\n # For each node loc, compute the injection and withdrawal rates \n for loc, locConn in mapN.items():\n # As in a few cases zero columns/ rows are dropped from data frames, two lists\n # of eligible connection indices are created.\n ixIn, ixOut = [], []\n for loc_, conn in locConn.items():\n if (loc, loc_) in op.index:\n ixOut.append((loc, loc_)), connections.add((loc, loc_))\n if (loc_, loc) in op.index:\n ixIn.append((loc_, loc)), connections.add((loc_, loc))\n # If either list has at least one entry, the incoming and outgoing flows are selected\n # from the original optimal flow variables and aggregated. The resulting commodity\n # withdrawals from the network are positive while injections are negative.\n if (len(ixIn) != 0) | (len(ixOut) != 0):\n injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum())\n nodeIx.append(loc)\n\n # Concat data to a pandas dataframe\n injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1)\n\n return injectionWithdrawalRates\n\ndef getNetworkLengthsFromESM(componentName, esM):\n \"\"\"\n Obtains the pipeline lengths of a transmission component in an EnergySystemModel class.\n \n :param componentName: name of the network component in the EnergySystemModel class\n (only required if the fluid flows are to be obtained from the EnergySystemModel class)\n |br| * the default value is ''\n :type componentName: string\n\n :param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be\n specified if the operationVariablesOptimumData are to be obtained from the\n EnergySystemModel object) \n |br| * the default value is None\n :type esM: FINE EnergySystemModel\n\n :return: pipeline distances in the length unit specified in the esM object\n :rtype: pandas series\n \"\"\"\n utils.isString(componentName)\n utils.isEnergySystemModelInstance(esM)\n\n distances = esM.getComponent(componentName).distances.copy()\n indexMap = esM.getComponent(componentName)._mapC\n distances.index = [indexMap[ix] for ix in distances.index]\n\n return distances\n\n\ndef getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength):\n \"\"\" \n If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,\n i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1\n\n :param shapeFilePath: path to a shape file which connects the gas injection/ withdrawal nodes with each other. The rows of the\n file describe connections between the injection/ withdrawal nodes. The required geometry of these connections is a shapely\n LineString. Additionally, the file has two columns holding the names of the two injection/ withdrawal nodes (start and end\n point of the LineString).\n :type shapeFilePath: string\n\n :param regColumn1: name of the column which holds the name of the injection/ withdrawal node at the beginning of the line\n :type regColumn1: string\n\n :param regColumn2: name of the column which holds the name of the injection/ withdrawal node at the end of the line\n :type regColumn2: string\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar].\n It holds: dic_node_minPress[index] <= dic_node_maxPress[index].\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n :param minPipeLength: desired minimum length of a pipe in [m], note: not always possible to achieve.\n :type minPipeLength: positive number\n\n :param maxPipeLength: determines the maximal length of a pipe in [m].\n :type maxPipeLength: positive number\n\n :return: distances_new - pipeline distances in m\n :rtype: pandas series\n\n :return: dic_node_minPress_new - dictionary that contains for every node of the network its lower pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n\n :return: dic_node_maxPress_new - dictionary that contains for every node of the network its upper pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n\n :return: gdfNodes - GeoDataFrame with the nodes of the network and their names\n :rtype: geopandas GeoDataFrame\n\n :return: gdfEdges - GeoDataFrame with the edges of the network and the names of their start and end nodes\n :rtype: geopandas GeoDataFrame\n \"\"\"\n # type and value check\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n utils.isString(regColumn1), utils.isString(regColumn2)\n utils.isStrictlyPositiveNumber(maxPipeLength)\n utils.isStrictlyPositiveNumber(minPipeLength)\n\n # Read shape file with linestrings connecting the entry/ exit nodes of the gas\n gdf=gpd.read_file(shapeFilePath)\n if not (gdf.geometry.type == 'LineString').all():\n raise ValueError(\"Geometries of the shape file have to be LineStrings\")\n\n print('Number of edges before segmentation:', len(gdf))\n originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2])\n print('Number of nodes before segmentation:', len(originalNodesSet))\n\n # Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates\n coordNames, coords = [], []\n pMin, pMax = [], []\n lines = []\n\n # Break linestrings into linear pieces\n for i, row in gdf.iterrows():\n # Simplify linestring (to increase the minimum length of pipeline connections wherever possible)\n line = row.geometry.simplify(minPipeLength) \n lines.append(line)\n row.geometry = line\n\n # Get new nodes\n coords_ = [i for i in line.coords]\n coords.extend(coords_)\n\n coordNames_ = [row[regColumn1]]\n coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j)\n for j in range(len(coords_)-2)])\n coordNames_.append(row[regColumn2])\n coordNames.extend(coordNames_)\n\n # Get averaged lower and upper pressure levels \n pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) +\n dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])\n \n pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) +\n dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])\n\n gdf['geometry'] = lines\n\n # Create DataFrame of old and new nodes and drop duplicates \n dfNodes = pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T\n dfNodes = dfNodes.drop_duplicates(subset='lon_lat')\n dfNodes = dfNodes.drop_duplicates(subset='nodeName')\n\n # Obtain edges from shape file, assign names to them, delete duplicates\n nodesIn_nodesOut = []\n nodesIn = []\n nodesOut = []\n lineStrings = []\n\n for i, row in gdf.iterrows():\n coords_ = [i for i in row.geometry.coords]\n for j in range(len(coords_)-1):\n nodeIn = dfNodes.loc[dfNodes['lon_lat'] == coords_[j],'nodeName'].iloc[0]\n nodeOut = dfNodes.loc[dfNodes['lon_lat'] == coords_[j+1],'nodeName'].iloc[0]\n nodesIn.append(nodeIn), nodesOut.append(nodeOut)\n nodes = [nodeIn,nodeOut]\n nodes.sort()\n nodesIn_nodesOut.append('edge_' + nodes[0] + '_' + nodes[1])\n lineStrings.append(shp.geometry.LineString([coords_[j],coords_[j+1]]))\n \n dfEdges = pd.DataFrame([nodesIn, nodesOut, nodesIn_nodesOut, lineStrings],\n index=['nodeIn', 'nodeOut','edgeName','geometry']).T\n dfEdges = dfEdges.drop_duplicates(subset='edgeName')\n gdfEdges = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})\n\n print('Number of edges after 1. segmentation:', len(gdfEdges))\n print('Number of nodes after 1. segmentation:', len(dfNodes))\n\n # Add nodes when line distances are too long\n newNodes, newLines, newNodesName, newLinesName = [], [], [], []\n nodesIn, nodesOut, coords = [], [], []\n pMin, pMax = [], []\n\n for i, row in gdfEdges.iterrows():\n # If lines are two long, segment them\n if np.round(row['geometry'].length,2) > maxPipeLength:\n nbNewNodes = int(np.floor(row['geometry'].length/maxPipeLength))\n line = row.geometry \n newNodes_, newLines_, newNodesName_, newLinesName_ = [], [], [], []\n nodesIn_, nodesOut_, coords_ = [], [], []\n pMin_, pMax_ = [], []\n nodeStart, nodeEnd = line.interpolate(0), line.interpolate(line.length)\n nodeStartName = row['nodeIn']\n \n pMinIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMin'].iloc[0]\n pMinOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMin'].iloc[0]\n pMaxIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMax'].iloc[0]\n pMaxOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMax'].iloc[0]\n \n spacing = row['geometry'].length/(nbNewNodes+1)\n for j in range(1,nbNewNodes+1):\n newNode = line.interpolate(j*spacing)\n newNodes_.append(newNode)\n coords_.append((newNode.x, newNode.y))\n \n newNodeName = row['nodeIn'] + '_' + row['nodeOut'] + '_a_' + str(j)\n newNodesName_.append(newNodeName)\n \n newLine = shp.geometry.LineString([nodeStart,newNode])\n newLines_.append(newLine)\n newLinesName_.append('temp'), nodesIn_.append(nodeStartName), nodesOut_.append(newNodeName)\n\n pMin_.append((pMinIn*(nbNewNodes-j+1) + pMinOut*j)/(nbNewNodes+1)) \n pMax_.append((pMaxIn*(nbNewNodes-j+1) + pMaxOut*j)/(nbNewNodes+1))\n \n nodeStart, nodeStartName = newNode, newNodeName\n \n newLines_.append(shp.geometry.LineString([newNode,nodeEnd]))\n newLinesName_.append('temp')\n nodesIn_.append(newNodeName), nodesOut_.append(row['nodeOut'])\n \n newNodes.extend(newNodes_), newLines.extend(newLines_), newNodesName.extend(newNodesName_)\n newLinesName.extend(newLinesName_), pMin.extend(pMin_), pMax.extend(pMax_)\n nodesIn.extend(nodesIn_), nodesOut.extend(nodesOut_), coords.extend(coords_) \n\n if len(newNodes) > 0:\n dfNodes = dfNodes.append(pd.DataFrame([newNodesName, pMin, pMax, coords],\n index=['nodeName','pMin','pMax','lon_lat']).T)\n\n dfEdges = pd.DataFrame([nodesIn, nodesOut, newLinesName, newLines],\n index=['nodeIn', 'nodeOut','edgeName','geometry']).T\n gdfEdgesNew = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})\n gdfEdges = gdfEdges.append(gdfEdgesNew)\n gdfEdges = gdfEdges[gdfEdges.geometry.length.round(2) <= maxPipeLength]\n\n del gdfEdges['edgeName']\n\n renameDict = {name: 'auxNode' + str(i) for i, name in enumerate(dfNodes.nodeName.values)\n if name not in originalNodesSet}\n\n for node in originalNodesSet:\n renameDict.update({node:node})\n\n gdfEdges['nodeIn'] = gdfEdges.apply(lambda x: renameDict[x['nodeIn']], axis=1)\n gdfEdges['nodeOut'] = gdfEdges.apply(lambda x: renameDict[x['nodeOut']], axis=1)\n\n gdfEdges['distances'] = gdfEdges['geometry'].length\n\n print('Number of edges after 2. segmentation:', len(gdfEdges))\n\n dfNodes['nodeName'] = dfNodes.apply(lambda x: renameDict[x['nodeName']], axis=1)\n dfNodes['geometry'] = dfNodes.apply(lambda x: shp.geometry.Point(x['lon_lat']), axis=1)\n\n del dfNodes['lon_lat']\n\n gdfNodes = gpd.GeoDataFrame(dfNodes,crs=gdf.crs).to_crs({'init': 'epsg:3035'})\n print('Number of nodes after 2. segmentation:', len(gdfNodes))\n\n print('Minimum length [m]:', gdfEdges.distances.min(), 'Maximum length [m]:', gdfEdges.distances.max())\n\n distances_new = pd.Series(gdfEdges['distances'].values,\n index = [(n1, n2) for n1, n2 in zip(gdfEdges['nodeIn'],gdfEdges['nodeOut'])])\n \n dic_node_minPress_new = {n:pMin for n, pMin in zip(gdfNodes['nodeName'], gdfNodes['pMin'])}\n dic_node_maxPress_new = {n:pMax for n, pMax in zip(gdfNodes['nodeName'], gdfNodes['pMax'])}\n\n return distances_new, dic_node_minPress_new, dic_node_maxPress_new, gdfNodes, gdfEdges\n\n\n\ndef createNetwork(distances):\n \"\"\"\n Creates undirected network/graph from given distances; updates distances such that\n either (u,v) or (v,u) are contained\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :return: graph of the network corresponding to the distances\n :rtype: graph object of networkx\n\n :return: pipeline distances in the length unit specified in the esM object\n :rtype: pandas series\n \"\"\"\n # type and value check\n isPandasSeriesPositiveNumber(distances)\n for index in distances.index:\n if not isinstance(index, tuple):\n raise TypeError(\"Index of pandas series has to be a tuple\")\n\n # first check if distances are consistent, i.e. if (u,v) and (v,u) are in distances they have to have the same\n # length and we will delete one of them\n # tmp list for reversed edges that we will be delete\n tmp_edges = []\n for edge in distances.index:\n if (edge[1], edge[0]) in distances.index and (edge[1], edge[0]) not in tmp_edges:\n assert (distances[edge] == distances[(edge[1], edge[0])])\n tmp_edges.append(edge)\n # delete tmp_edges because reversed edges are already contained and we consider an undirected graph\n distances = distances.drop(tmp_edges)\n\n # get edges for graph\n edges = distances.index\n # create empty graph\n G = nx.Graph()\n # create graph from given edges and add length as edge attribute\n for edge in edges:\n G.add_edge(edge[0], edge[1], length=distances[edge])\n return G, distances\n\n\ndef createSteinerTree(graph, distances, inner_nodes):\n \"\"\"\n Computes a steiner tree with minimal sum of pipeline lengths;\n updates distances such that only arcs of the spanning tree are contained with corresponding length\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :return spanning tree with sum of lengths of pipelines is minimal\n :rtype: graph object of networkx\n \"\"\"\n from networkx.algorithms import approximation\n \n # type and value check\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n\n # compute spanning tree with minimal sum of pipeline lengths\n S = approximation.steiner_tree(graph, terminal_nodes=inner_nodes, weight='length')\n # TODO check why function fails when MST function is not called here\n S = nx.minimum_spanning_tree(S, weight='length')\n # delete edges that are in graph but not in the tree from the distance matrix\n edgesToDelete = []\n for edge in distances.index:\n # check if edge or its reversed edge are contained in the tree\n # you have to check both directions because we have an undirected graph\n if edge not in S.edges and (edge[1], edge[0]) not in S.edges:\n edgesToDelete.append(edge)\n distances = distances.drop(edgesToDelete)\n\n return S, distances\n\n\ndef _generateRobustScenarios(startNode_endNode, **kwargs):\n startNode = startNode_endNode[0]\n endNode = startNode_endNode[1]\n return startNode_endNode, computeSingleSpecialScenario(startNode=startNode, endNode=endNode, **kwargs)\n\n\ndef generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress,\n solver='glpk', threads=1, verbose=0):\n \"\"\"\n Compute for every node combination a special robust scenario according to Robinius et. al. (2019)\n and Labbé et. al. (2019)\n\n :param injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while\n injections are negative) for every time step and node; unit [kg/s]\n :type: pandas dataframe\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :param threads: number of threads used for parallelization\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n :return dictionary that contains for every node pair a dictionary containing all arc flows of the corresponding\n special scenario\n :rtype: dictionary key: (node1,node2), value: dictionary: key: arc, value: arc flow in [kg/s]\n\n :return list of entry node\n :rtype: list of strings\n\n :return list of exit node\n :rtype: list of strings\n \"\"\"\n # Type and value checks\n isPandasDataFrameNumber(injectionWithdrawalRates)\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n\n # get for every entry/exit node the minimal and maximal injection rate and save it in a\n # dictionary: key: node, value: min Rate; respectively max Rate in [kg/s]\n # we note that inner nodes a handled separately in the computation of the special scenario\n dic_nodes_MinCapacity = {}\n dic_nodes_MaxCapacity = {}\n # list of entry nodes and exit nodes; note node can be in both for example storages\n entries = []\n exits = []\n inners = []\n for node in list(injectionWithdrawalRates.columns.values):\n minRate = injectionWithdrawalRates[node].min()\n maxRate = injectionWithdrawalRates[node].max()\n assert (minRate <= maxRate)\n dic_nodes_MinCapacity[node] = minRate\n dic_nodes_MaxCapacity[node] = maxRate\n # if minRate is negative, then node is an entry; if maxRate is positive, then node is an exit\n if minRate < 0.0:\n entries.append(node)\n if maxRate > 0.0:\n exits.append(node)\n elif maxRate > 0:\n exits.append(node)\n else:\n inners.append(node)\n\n maxPressuresAreEqual = True if len(set(dic_node_maxPress.values())) == 1 else False\n\n p_exits = [dic_node_minPress[exit] for exit in exits]\n p_entries_inners = [dic_node_minPress[node] for node in entries]\n p_inners = [dic_node_minPress[node] for node in inners]\n p_entries_inners.extend(p_inners)\n minPressureExitsIsLarger = True if min(p_exits) >= max(p_entries_inners) else False\n\n # compute special scenario for each node combination; see Paper Robinius et. al.(2019); Labbé et. al. (2019)\n # save arc flows of special scenarios for each node combination;\n # dictionary: key: node pair, value: dictionary: key: arc, value: arc flow\n dic_nodePair_flows = {}\n\n if maxPressuresAreEqual and minPressureExitsIsLarger:\n if verbose == 0:\n print('Reduced robust scenario set can be generated' +\n ' (pMax is equal at all nodes & pMin at exits is >= at inner and entry nodes).')\n nodes = [(startNode, endNode) for startNode in entries for endNode in exits if startNode != endNode]\n else:\n nodes = [(startNode, endNode) for startNode in graph.nodes for endNode in graph.nodes if startNode != endNode]\n\n pool = Pool(threads)\n for i, values in enumerate(pool.imap(partial(_generateRobustScenarios, graph=graph, distances=distances,\n entries=entries, exits=exits, dic_nodes_MinCapacity=dic_nodes_MinCapacity,\n dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, solver=solver),\n nodes), 1):\n if verbose == 0:\n sys.stderr.write('\\rPercentage simulated: {:d}%'.format(int(i / len(nodes) * 100)))\n dic_nodePair_flows[values[0]] = values[1]\n pool.close()\n pool.join()\n\n return dic_nodePair_flows, entries, exits\n\n\ndef computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity,\n dic_nodes_MaxCapacity, specialScenario=True, solver='glpk'):\n \"\"\"\n Compute special robust scenario for given node combination according to Robinius et. al. (2019)\n and Labbé et. al. (2019)\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :param entries: list of entry nodes of the network\n :type entries: list of strings\n\n :param exits: list of exit nodes of the network\n :type exits: list of strings\n\n :param startNode: node of the network (starting node of the special scenario)\n :type startNode: string\n\n :param endNode: node of the network (end node of special scenario)\n :type endNode: string\n\n :param dic_nodes_MinCapacity: dictionary containing minimal capacity for each node\n :type dic_nodes_MinCapacity: dictionary: key: node of the network, value: float\n\n :param dic_nodes_MaxCapacity: dictionary containing maximal capacity for each node\n :type dic_nodes_MaxCapacity: dictionary: key: node of the network, value: float\n\n :param specialScenario: bool: True if we compute special robust scenario; False if we compute scenario for fixed\n demand vector, e.g., for scenario of a time step\n :type specialScenario: bool\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :return dictionary that contains for every arc the corresponding arc flows of the (special) scenario\n :rtype: dictionary key: arc, value: arc flow\n \"\"\"\n # Type and value check\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n isListOfStrings(entries)\n isListOfStrings(exits)\n utils.isString(startNode)\n utils.isString(endNode)\n if isinstance(dic_nodes_MinCapacity, dict) and isinstance(dic_nodes_MaxCapacity, dict):\n if not (dic_nodes_MinCapacity.keys() == dic_nodes_MaxCapacity.keys()):\n raise TypeError(\"Dictionaries for min and max capacity need same keys\")\n for node in dic_nodes_MinCapacity.keys():\n if not (isinstance(dic_nodes_MinCapacity[node], float) or isinstance(dic_nodes_MinCapacity[node], int)):\n raise TypeError(\"The input argument has to be an number\")\n if not (isinstance(dic_nodes_MaxCapacity[node], float) or isinstance(dic_nodes_MaxCapacity[node], int)):\n raise TypeError(\"The input argument has to be an number\")\n if dic_nodes_MaxCapacity[node] < dic_nodes_MinCapacity[node]:\n raise ValueError(\"minimal node capacity has to be equal or smaller than maximal node capacity\")\n else:\n raise TypeError(\"dic_nodes_MinCapacity and dic_nodes_MinCapacity have to be dictionaries\")\n isBool(specialScenario)\n\n # we build concrete Pyomo Model\n model = py.ConcreteModel()\n\n # Description model: we have a simple directed graph. We allow negative flows because a pipe can be used in both\n # directions by the flows\n model.Nodes = py.Set(initialize=graph.nodes)\n # important to use distances.keys() instead of graph.edges such that we do not have key errors later on because\n # the edges in graph are undirected and in distances.keys() directed\n model.Arcs = py.Set(initialize=distances.keys(), dimen=2)\n\n # create demand variables for every node;\n # if specialScenario is true, then we compute special scenario, i.e. entry/exit demand variables are bounded by\n # min(0,minimal_capacity) <= demandVariable <= max(0, maximal_capacity)\n # demand variables for inner nodes are set to zero\n # if specialScenario is false, the demand variable is just bounded by the minimal and maximal capacity\n if specialScenario:\n def demandCapacities(model, node):\n if node in entries or node in exits:\n return min(0, dic_nodes_MinCapacity[node]), max(0, dic_nodes_MaxCapacity[node])\n else:\n return 0, 0\n\n model.Demand = py.Var(model.Nodes, bounds=demandCapacities)\n else:\n # we do not compute special scenarios; we just compute flows for given, possibly fixed, demands\n def demandCapacities(model, node):\n return dic_nodes_MinCapacity[node], dic_nodes_MaxCapacity[node]\n\n model.Demand = py.Var(model.Nodes, bounds=demandCapacities)\n\n # create arc flow variables for every arc of the network\n model.Flow = py.Var(model.Arcs)\n\n # compute NodesOut, i.e., set of nodes that are connected to considered node by outgoing arc\n def nodes_out_init(model, node):\n retval = []\n for (i, j) in model.Arcs:\n if i == node:\n retval.append(j)\n return retval\n\n model.NodesOut = py.Set(model.Nodes, initialize=nodes_out_init)\n\n # compute NodesIn, i.e., set of nodes connected to considered node by ingoing arc\n def nodes_in_init(model, node):\n retval = []\n for (i, j) in model.Arcs:\n if j == node:\n retval.append(i)\n return retval\n\n model.NodesIn = py.Set(model.Nodes, initialize=nodes_in_init)\n\n # add flow balance constraints corresponding to the node demands\n def flow_balance_rule(model, node):\n return sum(model.Flow[i, node] for i in model.NodesIn[node]) \\\n - sum(model.Flow[node, j] for j in model.NodesOut[node]) \\\n == model.Demand[node]\n\n model.FlowBalance_cons = py.Constraint(model.Nodes, rule=flow_balance_rule)\n\n # compute unique flow-path P(startNode,endNode) from entry to exit; given by list of nodes of the path\n pathNodes = nx.shortest_path(graph, source=startNode, target=endNode)\n # non zero coefficients of objective function\n dic_arc_coef = {}\n # determine coefficients for objective function\n # if for an arc (u,v), u, respectively v, are not in pathNodes, then the coefficient is 0\n # if arc (u,v) of pathNodes satisfies P(startNode, u) subset P(startNode,v), then coefficient is 1, otherwise -1\n for index in range(0, len(pathNodes) - 1):\n # check which direction of the arc is contained in the graph\n if (pathNodes[index], pathNodes[index + 1]) in model.Arcs:\n dic_arc_coef[(pathNodes[index], pathNodes[index + 1])] = 1\n else:\n dic_arc_coef[(pathNodes[index + 1], pathNodes[index])] = -1\n\n # we set objective\n def obj_rule(model):\n return sum(dic_arc_coef[arc] * model.Flow[arc] for arc in dic_arc_coef.keys())\n\n model.Obj = py.Objective(rule=obj_rule, sense=py.maximize)\n\n # Create a solver\n opt = SolverFactory(solver)\n # Solve optimization model\n results = opt.solve(model)\n # status of solver\n status = results.solver.status\n # termination condition\n termCondition = results.solver.termination_condition\n\n # save the solution of the flows in a dictionary key: arcs, values: flow\n dic_scenario_flow = {}\n\n if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:\n utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n elif termCondition == TerminationCondition.infeasibleOrUnbounded or \\\n termCondition == TerminationCondition.infeasible or \\\n termCondition == TerminationCondition.unbounded:\n utils.output('Optimization problem is ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n else:\n # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown\n # status), show a warning message.\n if not termCondition == TerminationCondition.optimal:\n warnings.warn('Output is generated for a non-optimal solution.')\n\n # dic_arcScenario has key (v,w,scenario) and value flow will be needed for MIP\n for arc in model.Arcs:\n dic_scenario_flow[arc] = model.Flow[arc].value\n\n return dic_scenario_flow\n\n\ndef computeLargeMergedDiameters(dic_subSetDiam_costs, nDigits=6):\n \"\"\"\n Compute merged diameters, i.e. compute equivalent single diameter for two looped pipes.\n\n :param dic_subSetDiam_costs: dictionary containing diameters in [m] and costs in [Euro/m]\n :type: dictionary: key: diameter, value: costs\n\n :param nDigits: number of digits used in the round function\n |br| * the default value is 6\n :type nDigits: positive int\n\n :return dic_newDiam_costs: dictionary containing merged diameters in [m] and costs in [Euro/m]\n :rtype: dictionary: key: diameter, value: costs\n\n :return dic_newDiam_oldDiam: dictionary matching new diameters to old diameters\n :rtype: dictionary: key: new diameter, value: corresponding old diameter, which will be used in the looped pipe\n\n \"\"\"\n # Type and value check\n if isinstance(dic_subSetDiam_costs, dict):\n for diam in dic_subSetDiam_costs.keys():\n utils.isStrictlyPositiveNumber(diam)\n utils.isStrictlyPositiveNumber(dic_subSetDiam_costs[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n utils.isStrictlyPositiveInt(nDigits)\n\n dic_newDiam_costs = {}\n dic_newDiam_oldDiam = {}\n\n for diam in dic_subSetDiam_costs.keys():\n # compute new diameter in [m] and its costs in [Euro/m]\n # for Formula see (1) in Paper Reuß et. al.\n # since at current state we consider the diameter for a looped pipe the above is\n # equivalent to 2^(2/5) * diam and thus, we do not have to transform diam from [m] to [mm]\n newDiam = ((diam ** (5 / 2) + diam ** (5 / 2)) ** (2 / 5)).__round__(nDigits)\n # costs are two times costs of diam because newDiam represents two looped pipe with diameter diam\n newCosts = 2 * dic_subSetDiam_costs[diam]\n dic_newDiam_costs[newDiam] = newCosts\n dic_newDiam_oldDiam[newDiam] = diam\n\n return dic_newDiam_costs, dic_newDiam_oldDiam\n\n\ndef determinePressureDropCoef(dic_scenario_flows, distances, dic_node_minPress, dic_node_maxPress,\n diameters, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965, nDigits=6):\n \"\"\"\n Compute for each scenario, diameter, and each arc the corresponding pressure drop\n\n :param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all\n arc flows in [kg/s] of the corresponding (special) scenario\n :type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param diameters: list of diameters in [m]\n :type: list of strictly positive numbers\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float; optional\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float; optional\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float; optional\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float; optional\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float; optional\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float; optional\n\n :param nDigits: number of digits used in the round function\n |br| * the default value is 6\n :type nDigits: positive int; optional\n\n :return dictionary that contains for every scenario and diameter the corresponding pressure drops\n :rtype: dictionary key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop\n \"\"\"\n # check type and value\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n isPandasSeriesPositiveNumber(distances)\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n if isinstance(diameters, list):\n for diam in diameters:\n utils.isPositiveNumber(diam)\n else:\n raise TypeError(\"Diameters has to be a list\")\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n utils.isStrictlyPositiveInt(nDigits)\n\n # compute for each diameter, scenario, and arc its pressure drop\n # save results in dic: key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop\n dic_pressureDropCoef = {}\n for diameter in diameters:\n for nodePair in dic_scenario_flows.keys():\n # initialize dictionary\n dic_pressureDropCoef[(diameter, nodePair)] = {}\n # compute cross section of considered pipe and diameter\n tmpvalue_A = 0.25 * np.pi * diameter ** 2\n for arc in dic_scenario_flows[nodePair].keys():\n # check if flow is unequal to zero\n if dic_scenario_flows[nodePair][arc] != 0.0:\n # Compute approximation of average pressure flow in pipe (u,v) by\n # if flow((u,v)) is positive then set p_min to lower pressure bound of v and p_max to\n # upper pressure bound u\n # if flow((u,v)) is negative then set p_min to lower pressure bound of u and p_max to\n # upper pressure bound v\n if dic_scenario_flows[nodePair][arc] > 0:\n p_min = dic_node_minPress[arc[1]]\n p_max = dic_node_maxPress[arc[0]]\n else:\n p_min = dic_node_minPress[arc[0]]\n p_max = dic_node_maxPress[arc[1]]\n # compute approximation of average pressure\n p_m = (2 / 3) * (p_max + p_min - (p_max * p_min) / (p_max + p_min))\n # approximation for density\n rho = 0.11922 * p_m ** 0.91192 - 0.17264\n # approximation of the realgasfactor\n Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050\n K_m = Z_m / Z_n\n # approximation of the dynamic viscosity\n eta = 1.04298 * 10 ** (-10) * p_m ** 1.53560 + 8.79987 * 10 ** (-6)\n nue = eta / rho\n # compute velocity\n tmpvalue_w = (abs(dic_scenario_flows[nodePair][arc]) / rho) / tmpvalue_A\n # compute reynolds number\n tmpvalue_Re = tmpvalue_w * (diameter / nue)\n tmpvalue_alpha = np.exp(-np.exp(6.75 - 0.0025 * tmpvalue_Re))\n tmpvalue_Lambda = (64 / tmpvalue_Re) * (1 - tmpvalue_alpha) + tmpvalue_alpha * (\n -2 * np.log10(2.7 * (np.log10(tmpvalue_Re) ** 1.2 / tmpvalue_Re) + ir / (3.71 * 1000 *\n diameter))) ** (-2)\n # note p_n is in [bar] instead of [PA], thus we divide tmpvalue_C by 10**5\n # explanation: we have p_i^2-p_j^2=C. If p_i is in [PA] and we want p_i in [bar] then this leads to\n # (p_i/10^5)^2-(p_j/10^5)^2=C/10^10\n # but we changed p_n in computation C from [PA] to [bar] hence we only divide C by 10^5\n tmpvalue_C_bar = tmpvalue_Lambda * 16 * rho_n * T_m * p_n * K_m / (np.pi ** 2 * T_n * 10 ** 5)\n # compute final pressure drop coefficient depending on the flow\n tmp_value_C_coef = (distances[arc] / rho_n ** 2) * \\\n (tmpvalue_C_bar * dic_scenario_flows[nodePair][arc] *\n abs(dic_scenario_flows[nodePair][arc]) / diameter ** 5)\n # save pressure drop for considered diameter, scenario, and arc\n dic_pressureDropCoef[(diameter, nodePair)][arc] = tmp_value_C_coef\n else:\n dic_pressureDropCoef[(diameter, nodePair)][arc] = 0\n\n return dic_pressureDropCoef\n\n\ndef determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureDropCoef, specialScenarioNames,\n dic_node_minPress, dic_node_maxPress, dic_diam_costs, robust=True,\n solver='glpk', threads=4, verbose=0):\n \"\"\"\n Model of optimal pipeline sizing (diameter selection) w.r.t. to the given scenarios\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_pressureDropCoef: dictionary that contains for every scenario and diameter the\n corresponding pressure drops in [bar]\n :type dic_pressureDropCoef: dictionary: keys: scenarioName; value: dict: key: arc, value: pressure drop in [bar]\n\n :param specialScenarioNames: list of names of scenarios. In robust case tuples (startNode, endNode).\n :type specialScenarioNames: list of tuples in the robust case, otherwise list of time Steps\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param dic_diam_costs: dictionary that contains for every diameter in [m] its costs [Euro/m]\n :type dic_diam_costs: dictionary key: diameter, value: non-negative float\n\n :param robust: Bool that is true, if we optimize w.r.t. robust scenarios, otherwise False.\n :type robust: bool\n\n :return dictionary that contains for every arc the optimal diameter in [m]\n :rtype dictionary: key: arc, value: optimal diameter\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :param threads: number of threads used for optimization (if gurobi is used)\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n :return dictionary that contains for every scenario the corresponding pressure levels\n :rtype dictionary: key: scenarioName, value: dict: key: node, value: pressure level of node\n \"\"\"\n # type and value checks\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n if not isinstance(dic_pressureDropCoef, dict):\n raise TypeError(\"The input has to be a dictionary\")\n\n if isinstance(specialScenarioNames, list):\n if robust:\n for scenario in specialScenarioNames:\n isinstance(scenario, tuple)\n else:\n raise TypeError(\"The input argument has to be a list\")\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n if isinstance(dic_diam_costs, dict):\n for diam in dic_diam_costs.keys():\n utils.isStrictlyPositiveNumber(diam)\n utils.isStrictlyPositiveNumber(dic_diam_costs[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n if not isinstance(robust, bool):\n raise TypeError(\"The input has to be a bool\")\n utils.isString(solver)\n utils.isPositiveNumber(verbose)\n\n # set list of available diameters\n diameters = dic_diam_costs.keys()\n\n # build concrete pyomo model\n model = py.ConcreteModel()\n\n # sets for nodes, arcs, diameters, scenarios\n model.nodes = py.Set(initialize=graph.nodes)\n model.arcs = py.Set(initialize=list(distances.keys()), dimen=2)\n # diameters assuming that each pipe has the same diameter options\n model.diameters = py.Set(initialize=diameters)\n # if we have special scenarios, scenario names are tuples, otherwise not\n if robust:\n # set indices for each scenario by its nodePair = (startnode, endnode)\n model.scenarios = py.Set(initialize=specialScenarioNames, dimen=2)\n else:\n # set indices for each timeStep number\n model.scenarios = py.Set(initialize=specialScenarioNames, dimen=1)\n\n # create variables binaries x are the same for each scenario\n # pressure variables are different for each scenario\n model.x = py.Var(model.arcs, model.diameters, domain=py.Binary)\n if robust:\n def pressureBounds(model, node, startnode, endnode):\n return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2\n\n model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)\n else:\n def pressureBounds(model, node, timeStep):\n return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2\n\n model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)\n\n # objective: minimize the costs\n def obj_rule(model):\n return sum(\n sum(dic_diam_costs[diam] * distances[arc] * model.x[arc, diam] for diam in model.diameters)\n for arc in model.arcs)\n\n model.Obj = py.Objective(rule=obj_rule)\n\n # pressure drop for each cons and each scenario\n if robust:\n def pressure_drop(model, arc0, arc1, scenarioStart, scenarioEnd):\n return model.pi[arc1, (scenarioStart, scenarioEnd)] - model.pi[arc0, (scenarioStart, scenarioEnd)] == \\\n -sum(dic_pressureDropCoef[(diam, (scenarioStart, scenarioEnd))][(arc0, arc1)] *\n model.x[arc0, arc1, diam] for diam in model.diameters)\n\n model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_drop)\n else:\n def pressure_dropNotRobust(model, arc0, arc1, timeStep):\n return model.pi[arc1, timeStep] - model.pi[arc0, timeStep] == \\\n -sum(dic_pressureDropCoef[(diam, timeStep)][(arc0, arc1)] *\n model.x[arc0, arc1, diam] for diam in model.diameters)\n\n model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_dropNotRobust)\n\n # ensure that a single diameter per arc is chosen\n def selection_diameter(model, arc0, arc1):\n return sum(model.x[arc0, arc1, diam] for diam in model.diameters) == 1\n\n model.SelectionDiameter_cons = py.Constraint(model.arcs, rule=selection_diameter)\n\n # Create a solver\n\n opt = SolverFactory(solver)\n # Set the specified solver options\n # Solve optimization problem. The optimization solve time is stored and the solver information is printed.\n if (verbose == 2) & (solver == 'gurobi'):\n optimizationSpecs = ' LogToConsole=0'\n opt.set_options('Threads=' + str(threads) + optimizationSpecs)\n results = opt.solve(model, tee=True, keepfiles=False)\n else:\n results = opt.solve(model, tee=True, report_timing=True, keepfiles=False)\n\n # status of solver\n status = results.solver.status\n # termination condition\n termCondition = results.solver.termination_condition\n # write diameter solution to dictionary: key: arc, value: optimal diameter\n # write pressure solutions to dictionary; key: scenarioName, value: dict: key: node, value: pressure level in [bar]\n dic_arc_diam = {}\n dic_scen_node_press = {}\n\n if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:\n utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n elif termCondition == TerminationCondition.infeasibleOrUnbounded or \\\n termCondition == TerminationCondition.infeasible or \\\n termCondition == TerminationCondition.unbounded:\n utils.output('Optimization problem is ' + str(termCondition) +\n '. No output is generated.', 0, 0)\n else:\n # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown\n # status), show a warning message.\n if not termCondition == TerminationCondition.optimal:\n warnings.warn('Output is generated for a non-optimal solution.')\n\n # initialize dict with empty dict\n for scenario in specialScenarioNames:\n dic_scen_node_press[scenario] = {}\n\n for v in model.component_objects(py.Var, active=True):\n varobject = getattr(model, str(v))\n for index in varobject:\n # round because sometimes we are nearly one\n if str(varobject) == 'x' and round(varobject[index].value) == 1:\n dic_arc_diam.update({(index[0], index[1]): index[2]})\n elif str(varobject) == 'pi':\n if robust:\n # need sqrt() because in model pressure is quadratic because of the transformation\n dic_scen_node_press[(index[1], index[2])].update({index[0]: np.sqrt(varobject[index].value)})\n else:\n # need sqrt() because in model pressure is quadratic because of the transformation\n dic_scen_node_press[(index[1])].update({index[0]: np.sqrt(varobject[index].value)})\n\n return dic_arc_diam, dic_scen_node_press\n\n\ndef _postprocessing(scenario, dic_scenario_flows, graph, **kwargs):\n dic_scen_PressLevel = {}\n dic_scen_MaxViolPress = math.inf\n # copy a list of nodes\n tmp_nodes = copy.deepcopy(list(graph.nodes))\n # we now set iteratively the pressure level of a single node to its upper pressure bound and then compute the\n # unique pressure levels until we find valid pressure levels or have tested all nodes\n while tmp_nodes:\n # we have not found valid pressure levels for this scenario\n # temporary pressure levels\n dic_tmp_pressure = {}\n for node in list(graph.nodes):\n dic_tmp_pressure[node] = None\n # choose the node which pressure level is fixed to the upper pressure bound\n current_node = tmp_nodes[0]\n validation, tmp_viol = computePressureAtNode(graph=graph, node=current_node, nodeUpperBound=current_node,\n dic_scenario_flows=dic_scenario_flows[scenario], dic_node_pressure=dic_tmp_pressure, **kwargs)\n # if validation true, then we have feasible pressure levels; empty list of nodes that have to be\n # considered\n if validation:\n tmp_nodes = []\n # we have feasible pressure level and save them\n dic_scen_PressLevel = dic_tmp_pressure\n dic_scen_MaxViolPress = tmp_viol\n else:\n # remove considered entry from list of nodes that will be considered for fixing the pressure level\n tmp_nodes.remove(tmp_nodes[0])\n # we update the maximal pressure level violation\n if tmp_viol < dic_scen_MaxViolPress:\n # save currently best pressure levels\n dic_scen_PressLevel = copy.deepcopy(dic_tmp_pressure)\n dic_scen_MaxViolPress = tmp_viol\n\n return scenario, dic_scen_PressLevel, dic_scen_MaxViolPress\n\n\ndef postprocessing(graph, distances, dic_arc_diam, dic_scenario_flows, dic_node_minPress, dic_node_maxPress,\n threads=1, verbose=0):\n \"\"\"\"\n Compute \"more\" accurate pressure levels for the considered scenarios in the network with optimal diameters\n Apply postprocessing of Master's thesis with adaption that we possibly consider every node for fixing its\n pressure level to the upper pressure bound.\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all\n arc flows in [kg/s] of the corresponding (special) scenario\n :type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n :param threads: number of threads used for parallelization\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :return: dictionary that contains for every scenario the corresponding pressure levels in [bar]\n :rtype: dictionary key: scenarioName, value: dic: key: arc, value pressure level\n\n :return: dictionary that contains for every scenario the maximal pressure bound violation in [bar]\n :rtype: dictionary key: scenarioName, value: float = maximal pressure bound violation\n \"\"\"\n # Type and value check\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n\n # best found pressure levels for scenarios; dic key: scenario, value: dic: key: node, value: pressure level in [bar]\n dic_scen_PressLevel = {}\n # maximal violation of pressure bounds; zero if no violation exists; dic: key: scenario, value: pressure violation\n dic_scen_MaxViolPress = {}\n # we compute \"precise\" pressure levels for every scenarios\n\n pool = Pool(threads)\n scenarios = [scenario for scenario in dic_scenario_flows.keys()]\n\n for i, values in enumerate(pool.imap(partial(_postprocessing, validation=True, graph=graph, dic_arc_diam=dic_arc_diam,\n distances=distances, dic_node_minPress=dic_node_minPress, dic_node_maxPress=dic_node_maxPress, tmp_violation=0,\n dic_scenario_flows=dic_scenario_flows), scenarios), 1):\n if verbose == 0:\n sys.stderr.write('\\rPercentage simulated: {:d}%'.format(int(i / len(scenarios) * 100))) \n dic_scen_PressLevel[values[0]] = values[1]\n dic_scen_MaxViolPress[values[0]] = values[2]\n pool.close()\n pool.join()\n\n return dic_scen_PressLevel, dic_scen_MaxViolPress\n\n\ndef computePressureAtNode(validation, node, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows,\n dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure,\n ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965, nDigits=6):\n \"\"\"\"\n Compute pressure levels recursive for given scenario and node that is fixed to its upper pressure level\n\n :param validation: boolean that is False, if the computed pressure levels are infeasible\n :rtype validation: bool\n\n :param node: node of the network for which we currently consider for computing the pressure levels\n :type node: str\n\n :param nodeUpperBound: node which pressure level is fixed to the upper bound\n :type node: str\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]\n :type: dictionary: key: arc, value: arc flow\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param tmp_violation: violation of the current pressure bounds in [bar]\n :type tmp_violation: float\n\n :param dic_node_pressure: dictionary that contains node pressure levels in [bar]\n :type dic_node_pressure: dictionary key: node of the network, value: non-negative float\n\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n :param nDigits: number of digits used in the pandas round function. Is applied to the\n specified or determined injection and withdrawal rates.\n |br| * the default value is 6\n :type nDigits: positive int\n\n :return validation: boolean that is true, if the computed pressure levels are feasible\n :rtype: bool\n\n :return maximal violation of the pressure bounds w.r.t. the computed pressure levels in [bar]\n :rtype: float\n \"\"\"\n # Type and value check\n isBool(validation)\n utils.isString(node)\n utils.isString(nodeUpperBound)\n isNetworkxGraph(graph)\n isPandasSeriesPositiveNumber(distances)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n else:\n raise TypeError(\"The input has to be a dictionary\")\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n utils.isPositiveNumber(tmp_violation)\n if not isinstance(dic_node_pressure, dict):\n raise TypeError(\"The Input has to a dictionary\")\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n utils.isStrictlyPositiveInt(nDigits)\n\n # if node is equal to nodeUpperBound, we fix its pressure level to the upper bound; base case in recursion\n if node == nodeUpperBound:\n dic_node_pressure[node] = dic_node_maxPress[node]\n # list of arcs\n arcs = list(distances.keys())\n # we now compute the neighbors of the considered node\n neighbors = graph.neighbors(node)\n # compute pressure levels for neighbor nodes\n for neighbor in neighbors:\n # check if pressure is already computed\n if dic_node_pressure[neighbor] is None:\n # check if (node,neighbor) or (neighbor,node) is in graph\n if (node, neighbor) in arcs:\n # check flow direction for arc (node,neighbor)\n if dic_scenario_flows[(node, neighbor)] >= 0.0:\n # we know pressure level of beginning node of arc; compute pressure level for end node of arc\n dic_node_pressure[neighbor] = computePressureEndnodeArc((node, neighbor), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam, distances,\n ir, rho_n, T_m, T_n, p_n, Z_n)\n else:\n # we know pressure level of endnode\n dic_node_pressure[neighbor] = computePressureStartnodeArc((node, neighbor), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam,\n distances,\n ir, rho_n, T_m, T_n, p_n, Z_n,\n tol=10 ** (- nDigits))\n else:\n # we know that arc (neighbor,node) is contained in the graph\n # check flow direction\n if dic_scenario_flows[(neighbor, node)] <= 0.0:\n # we know pressure of start node\n dic_node_pressure[neighbor] = computePressureEndnodeArc((neighbor, node), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam, distances,\n ir, rho_n, T_m, T_n, p_n, Z_n)\n else:\n # we know pressure level of end node\n dic_node_pressure[neighbor] = computePressureStartnodeArc((neighbor, node), dic_node_pressure[node],\n dic_scenario_flows, dic_arc_diam,\n distances,\n ir, rho_n, T_m, T_n, p_n, Z_n,\n tol=10 ** (- nDigits))\n # check if new computed pressure level is feasible\n if dic_node_pressure[neighbor] == - math.inf:\n # pressure violation is really high\n tmp_violation = math.inf\n return False, tmp_violation\n # check if we violate pressure bounds for neighbor node\n if dic_node_pressure[neighbor] < dic_node_minPress[neighbor] \\\n or dic_node_pressure[neighbor] > dic_node_maxPress[neighbor]:\n # pressure level is not valid\n validation = False\n # update pressure bound violation\n if dic_node_pressure[neighbor] < dic_node_minPress[neighbor]:\n # update violation and violation node if it is bigger\n if tmp_violation is None or \\\n abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) > tmp_violation:\n tmp_violation = abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor])\n else:\n if tmp_violation is None or \\\n abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) > tmp_violation:\n tmp_violation = abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor])\n\n # compute value for neighbor of tmp\n validation, tmp_violation = computePressureAtNode(validation, neighbor, nodeUpperBound, graph, dic_arc_diam,\n distances,\n dic_scenario_flows, dic_node_minPress, dic_node_maxPress,\n tmp_violation, dic_node_pressure)\n\n return validation, tmp_violation\n\n\ndef computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2,\n rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965, tol=10 ** (-4)):\n \"\"\"\"\n For given arc and pressure level of endNode compute the pressure of the startNode by solving the corresponding\n equation system\n\n :param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas\n :type arc: tuple\n\n :param pressureEndNode: pressure level of endNode\n :type pressureEndNode: non-negative float\n\n :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]; note arc flow of arc has to be\n positive\n :type: dictionary: key: arc, value: arc flow\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n :param tol: tolerance to which accuracy we solve the equation system\n |br| * the default value is 10^-4\n :type tol: non-negative float\n\n :return: pressure level of startNode in [bar]\n :rtype: float\n \"\"\"\n # Type and Value check\n if not isinstance(arc, tuple):\n raise TypeError(\"The input has to be a tuple\")\n utils.isStrictlyPositiveNumber(pressureEndNode)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n isPandasSeriesPositiveNumber(distances)\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n utils.isStrictlyPositiveNumber(tol)\n\n if dic_scenario_flows[arc] == 0.0:\n return pressureEndNode\n\n # define function of nonlinear equation system f(x) = pressure_start^2-pressure_end^2-C\n # because then root is our valid pressure level solution, because we know pressure_end\n\n def f(pressure_start):\n d = dic_arc_diam[arc]\n A = 0.25 * math.pi * d ** 2\n rho_in = 0.11922 * pressure_start ** 0.91192 - 0.17264\n V_in = abs(dic_scenario_flows[arc]) / rho_in\n w_in = V_in / A\n eta_in = 1.04298 * 10 ** (-10) * pressure_start ** 1.53560 + 8.79987 * 10 ** (-6)\n nue_in = eta_in / rho_in\n Re_in = w_in * (d / nue_in)\n alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))\n Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(\n (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +\n ir / (3.71 * 1000 * d))) ** (-2)\n C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)\n # note pressure_start is in bar\n p_m = pressure_start - C_tilde / 10 ** 5\n if p_m < 0.0:\n # pressure drop too large no valid pressure assignment possible\n return -math.inf\n Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050\n K_m = Z_m / Z_n\n # note flow direction is given by startnode endnode so we square the arcflow\n C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (\n math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * dic_scenario_flows[arc] ** 2\n return pressure_start ** 2 - pressureEndNode ** 2 - C\n\n # find root of f, start value pressure_end + 0.5(bar)\n # x = fsolve(f, pressureEndNode + 0.5)\n # pressureEndnode + guess for solution depending on flow; you can replace this guess by the approximation of the\n # pressure drop of the MIP to probably achieve better results\n x = fsolve(f, pressureEndNode + 0.5 * (dic_scenario_flows[arc] ** 2) / (dic_arc_diam[arc] ** 5))\n # check if tolerance is ok\n assert isinstance(tol, float)\n # check tolerance of first solution\n if f(x[0]) <= tol:\n # value is ok\n # because x is an array return first entry, we only have one solution for the nonlinear equation system\n return x[0]\n else:\n print('nonlinear equation system failed')\n # this warning means we could not solve the system, this could be the case if the pressure drop is too large\n # or when the start value for the nonlinear equation solver is too far away from the solution\n print(\"Nonlinear equation system in Postprocessing failed. Try another node which pressure level is\"\n \" set to the upper bound\")\n return -math.inf\n\n\ndef computePressureEndnodeArc(arc, pressureStartNode, dic_scenario_flows, dic_arc_diam, distances,\n ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,\n Z_n=1.00062387922965):\n \"\"\"\"\n For given arc and pressure level of startNode compute the pressure of the endNode\n\n :param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas\n :type arc: tuple\n\n :param pressureStartNode: pressure level of endNode\n :type pressureStartNode: non-negative float\n\n :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]\n :type: dictionary: key: arc, value: arc flow\n\n :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]\n :type: dictionary: key: arc, value: optimal diameter\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n :return: pressure level of endNode in [bar]\n :rtype: float\n \"\"\"\n # Type and Value check\n if not isinstance(arc, tuple):\n raise TypeError(\"The input has to be a tuple\")\n utils.isStrictlyPositiveNumber(pressureStartNode)\n if not isinstance(dic_scenario_flows, dict):\n raise TypeError(\"The input has to be a dictionary\")\n if isinstance(dic_arc_diam, dict):\n for diam in dic_arc_diam.keys():\n utils.isStrictlyPositiveNumber(dic_arc_diam[diam])\n isPandasSeriesPositiveNumber(distances)\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n\n arcFlow = dic_scenario_flows[arc]\n if arcFlow != 0:\n d = dic_arc_diam[arc]\n A = 0.25 * math.pi * d ** 2\n rho_in = 0.11922 * pressureStartNode ** 0.91192 - 0.17264\n V_in = abs(arcFlow) / rho_in\n w_in = V_in / A\n eta_in = 1.04298 * 10 ** (-10) * pressureStartNode ** 1.53560 + 8.79987 * 10 ** (-6)\n nue_in = eta_in / rho_in\n Re_in = w_in * (d / nue_in)\n alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))\n Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(\n (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +\n ir / (3.71 * 1000 * d))) ** (-2)\n C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)\n # note pressure_start is in bar\n p_m = pressureStartNode - C_tilde / 10 ** 5\n if p_m < 0.0:\n # pressure drop too large no valid pressure assignment possible\n return -math.inf\n Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050\n K_m = Z_m / Z_n\n # note flow direction is given by startnode endnode so we square the arcflow\n C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (math.pi ** 2 * T_n * rho_n * 10 ** 5 *\n dic_arc_diam[arc] ** 5) * arcFlow ** 2\n else:\n # flow is zero therefore pressure drop is zero\n C = 0\n\n if pressureStartNode ** 2 - C >= 0:\n return math.sqrt(pressureStartNode ** 2 - C)\n else:\n # pressure drop is too big return negative value, which is a invalid pressure value\n return -math.inf\n\ndef _computeTimeStepFlows(index, injectionWithdrawalRates, graph, **kwargs):\n # compute flows corresponding to demand by fixing demand for every node to given value and then compute\n # flows by LP\n dic_nodes_MinCapacity = {}\n dic_nodes_MaxCapacity = {}\n activeNodes = injectionWithdrawalRates.columns\n\n for node in graph.nodes:\n if node in activeNodes:\n dic_nodes_MinCapacity[node] = injectionWithdrawalRates.at[index, node]\n dic_nodes_MaxCapacity[node] = injectionWithdrawalRates.at[index, node]\n else:\n dic_nodes_MinCapacity[node] = 0\n dic_nodes_MaxCapacity[node] = 0\n # compute flows\n return index, computeSingleSpecialScenario(dic_nodes_MinCapacity=dic_nodes_MinCapacity,\n dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, graph=graph, **kwargs)\n\n\ndef computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, threads=1, verbose=0, solver='glpk'):\n \"\"\"\"\n Compute for each timeStep and demands given by injectionWithdrawalRates the corresponding flow values\n\n :param: injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while\n injections are negative) in [kg^3/s]\n :type injectionWithdrawalRates: pandas DataFrame\n\n :param distances: pipeline distances in the length unit specified in the esM object ([m])\n :type distances: pandas series\n\n :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]\n :type graph: networkx graph object\n\n :param entries: list of entry nodes of the network\n :type entries: list of str\n\n :param exits: list of exit nodes of the network\n :type exits: list of str\n\n :param threads: number of threads used for parallelization\n :type threads: positive integer\n\n :param verbose: if > 0, parallelization progress is displayed\n :type verbose: int\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :return: dictionary that contains for every time step the corresponding flows in [kg/s]\n :rtype: dictionary key: timeStep, value: dict: key: arc, value: arc flow\n \"\"\"\n # Type and value check\n isPandasDataFrameNumber(injectionWithdrawalRates)\n isPandasSeriesPositiveNumber(distances)\n isNetworkxGraph(graph)\n isListOfStrings(entries)\n isListOfStrings(exits)\n\n # compute for every time step the corresponding flows; dict: key: timeStep, value: dict: key: arc, value: flow\n dic_timeStep_flows = {}\n # nodes with nonzero demand are given by columns of dataframe\n activeNodes = injectionWithdrawalRates.columns\n pool = Pool(threads)\n\n indexList = list(injectionWithdrawalRates.index)\n\n for i, values in enumerate(pool.imap(partial(_computeTimeStepFlows, graph=graph, distances=distances,\n entries=entries, exits=exits, startNode=activeNodes[0],\n endNode=activeNodes[1], specialScenario=False,\n injectionWithdrawalRates=injectionWithdrawalRates,\n solver=solver),\n indexList), 1):\n if verbose == 0:\n sys.stderr.write('\\rPercentage simulated: {:d}%'.format(int(i / len(indexList) * 100)))\n dic_timeStep_flows[values[0]] = values[1]\n pool.close()\n pool.join()\n\n return dic_timeStep_flows\n\n\ndef networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress):\n \"\"\"\n If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,\n i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1\n # TODO this function is only used for testing\n\n :param distances: pipeline distances in the length unit specified in the esM object\n :type distances: pandas series\n\n :param maxPipeLength: determines the maximal length of a pipe in [m].\n :type maxPipeLength: positive number\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :return: graph of the network corresponding to the distances\n :rtype: graph object of networkx\n\n :return: pipeline distances in the length unit specified in the esM object\n :rtype: pandas series\n\n :return: dic_node_minPress dictionary that contains for every node of the network its lower pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n\n :return dic_node_maxPress dictionary that contains for every node of the network its upper pressure bound in [bar]\n :rtype: dictionary key: node of the network, value: non-negative float\n \"\"\"\n # type and value check\n isPandasSeriesPositiveNumber(distances)\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n if maxPipeLength is not None:\n utils.isStrictlyPositiveNumber(maxPipeLength)\n\n # if maximal pipeline length is a positive number we apply the refinement\n if maxPipeLength is not None:\n # we have to check if pipes satisfy maximal pipeline length\n # list of new arcs that will be added\n newPipes = []\n # list of lengths of new added pipes\n newPipesLengths = []\n # list of split original pipes\n splitEdges = []\n for edge in distances.index:\n # get length of pipeline\n pipeLength = distances[edge]\n if pipeLength > maxPipeLength:\n # compute number of necessary artificial nodes\n nArtificialNodes = math.ceil(pipeLength / maxPipeLength) - 1\n # compute length of new pipelines\n newPipeLength = float(pipeLength / (math.ceil(pipeLength / maxPipeLength)))\n # lower and upper pressure bound for new nodes computed by average of nodes of original edge\n lowPress = (dic_node_minPress[edge[0]] + dic_node_minPress[edge[1]]) / 2\n maxPress = (dic_node_maxPress[edge[0]] + dic_node_maxPress[edge[1]]) / 2\n # add first new pipe and its length\n newPipes.append((edge[0], \"v\" + str(1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])))\n # add length of first new pipe\n newPipesLengths.append(newPipeLength)\n # add lower and upper bound for new artificial node\n dic_node_minPress[\"v\" + str(1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = lowPress\n dic_node_maxPress[\"v\" + str(1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = maxPress\n # add intermediate artificial pipes, its length, and lower/upper pressure bounds\n for index in range(1, nArtificialNodes):\n newPipes.append((\"v\" + str(index) + \"_\" + str(edge[0]) + \"_\" + str(edge[1]),\n \"v\" + str(index + 1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])))\n newPipesLengths.append(newPipeLength)\n dic_node_minPress[\"v\" + str(index + 1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = lowPress\n dic_node_maxPress[\"v\" + str(index + 1) + \"_\" + str(edge[0]) + \"_\" + str(edge[1])] = maxPress\n # add last new pipe and its length\n newPipes.append((\"v\" + str(nArtificialNodes) + \"_\" + str(edge[0]) + \"_\" + str(edge[1]),\n edge[1]))\n newPipesLengths.append(newPipeLength)\n # add edge to split edges\n splitEdges.append(edge)\n\n # Now delete edges that have been split\n distances = distances.drop(splitEdges)\n # Add new edges\n distances = distances.append(pd.Series(newPipesLengths, index=newPipes))\n\n # get edges for graph\n edges = distances.index\n # create empty graph\n G = nx.Graph()\n # create graph from given edges and add length as edge attribute\n for edge in edges:\n G.add_edge(edge[0], edge[1], length=distances[edge])\n\n return G, distances, dic_node_minPress, dic_node_maxPress\n\n\ndef determineDiscretePipelineDesign(robust, injectionWithdrawalRates, distances, dic_node_minPress, dic_node_maxPress,\n dic_diameter_costs=None, dic_candidateMergedDiam_costs=None,\n gdfEdges=None, regColumn1='nodeIn', regColumn2='nodeOut', solver='glpk',\n opexForDiameters=None, economicLifetime=30, interestRate=0.08, costUnit='€', ir=0.2,\n rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965,\n originalFluidFlows=None, nDigits=6, verbose=0, threads=1):\n \"\"\"\n We compute a robust (depending on parameter robust) optimal pipeline design,\n i.e. for a given network, we compute a minimal spanning tree w.r.t. its total length.\n Afterward, we compute our robust (special) scenarios, see Robinius et. al..\n Also we compute for every timeStep of injectionWithdrawalRates the corresponding flows.\n We compute merged diameters according to list candidatesMergedDiameter, i.e. we compute a equivalent single diameter\n for two parallel pipes with the same diameter\n If robust is True, then we compute the corresponding pressure drops for every diameter and robust scenario.\n If robust is False, then we compute for every timeStep the corresponding pressure drops for every diameter and\n timeStep.\n If robust is True, then we compute optimal diameters by a MIP for the robust scenarios.\n If robust is False, then we compute optimal diameters by a MIP for the timeStep scenarios. Not Robust Version!\n In a postprocessing step, we compute \"precise\" pressure levels for the robust scenarios and the timeStep scenarios.\n\n Note that if robust is False, then the network may be infeasible for robust scenarios\n which can occur in the network!\n\n :param robust: Bool that is true, we build a robust pipeline network, otherwise not\n :type robust: bool\n\n :param injectionWithdrawalRates: the argument is a pandas DataFrame with the index column\n denoting the timesteps and the index row denoting the name of the network's nodes.\n Injection are denoted with negative floats and withdrawal with positive floats\n in [kg/s]. Example:\n\n node1 node2 node3\n 0 -4 2 2\n 1 3 -1.5 -1.5\n ... ... ... ...\n 8759 0 -1 1.\n\n :type injectionWithdrawalRates: pandas DataFrame with floats\n\n :param distances: the parameter is a pandas Series with the indices being tuples of the\n network's nodes and the values being the lengths of the pipelines in [m]. Example:\n\n (node1, node2) 1000\n (node2, node3) 50000\n (node2, node1) 1000\n (node3, node2) 50000\n\n :type distances: pandas Series\n\n :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]\n :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float\n\n :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]\n :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float\n\n It holds dic_node_minPress[index] <= dic_node_maxPress[index]\n\n :param dic_diameter_costs: dictionary that contains all diameters in [m] as keys and the values are the\n corresponding costs in [Euro/m]. Default Value is a preselection of diameters and its costs.\n if None, then we chose the following preselection of diameters and costs\n dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,\n 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,\n 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,\n 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,\n 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}\n :type dic_diameter_costs: dict with keys: diameters, values: cost for pipeline; optional\n\n :param dic_candidateMergedDiam_costs: dictionary that contains a set of diameters in [m] as keys and\n the values are the corresponding costs in [Euro/m]. This diameters are then used to compute a single equivalent\n diameter for two looped (parallel) pipes with the considered diameter.\n |br| * the default value is empty dictionary {}\n :type dic_candidateMergedDiam_costs: dict with keys: diameters, values: cost for pipeline; optional\n\n :param gdfEdges: GeoDataFrame with the edges of the network and the names of their start and end nodes.\n Required for geo-referenced result visualization. Should be obtained from the getRefinedShapeFile\n function.\n :type gdfEdges: GeoDataFrame or None: optional, default is None\n\n :param regColumn1: name of the column in gdfEdges which holds the name of the injection/ withdrawal node\n at the beginning of the line. Required if gdfEdges is specified.\n :type regColumn1: string, optional, default is 'nodeIn'\n\n :param regColumn2: name of the column in gdfEdges which holds the name of the injection/ withdrawal node\n at the end of the line. Required if gdfEdges is specified.\n :type regColumn2: string, optional, default is 'nodeOut'\n\n :param solver: name of the optimization solver to use\n :type solver: string, default 'glpk'\n\n :param ir: integral roughness of pipe in [mm]\n |br| * the default value is 0.2 (hydrogen, this value can also be used for methane)\n :type ir: positive float\n\n :param rho_n: density at standard state in [kg/m^3]\n |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)\n :type rho_n: positive float\n\n :param T_m: constant temperature in [kelvin]\n |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)\n :type T_m: float\n\n :param T_n: temperature in standard state in [kelvin]\n |br| * the default value is 273.15 (hydrogen, this value can also be used for methane)\n :type T_n: float\n\n :param p_n: pressure at standard state in [bar]\n |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)\n :type p_n: non-negative float\n\n :param Z_n: realgasfactor of hydrogen at standard state\n |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)\n :type Z_n: non-negative float\n\n # TODO @Juelich where to use\n param originalFluidFlows: string that specifies the considered fluid\n |br| * the default value is None\n :type originalFluidFlows: str; optional\n\n :param nDigits: number of digits used in the round function\n |br| * the default value is 6\n :type nDigits: positive int\n\n :param verbose: defines how verbose the console logging is:\\n\n - 0: general model logging, warnings and optimization solver logging are displayed.\n - 1: warnings are displayed.\n - 2: no general model logging or warnings are displayed, the optimization solver logging is set to a\n minimum.\\n\n Note: if required, the optimization solver logging can be separately enabled in the optimizationSpecs\n of the optimize function.\n |br| * the default value is 0\n :type verbose: integer (0, 1 or 2)\n\n\n :return: tuple (dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels,\n dic_timeStep_MaxViolPress, gdfEdges), with:\n - dic_arc_optimalDiameters dictionary\n - pressure levels of postprocessing of robust scenarios dic_scen_PressLevels\n - violation of pressure bounds of robust scenarios in optimized network determined by postprocessing\n - dic_scen_MaxViolPress: maximum pressure violation in robust scenarios\n - pressure levels of postprocessing of timeSteps dic_timeStep_PressLevels\n - violation of pressure bounds of timeStep scenarios in optimized network determined by postprocessing\n - dic_timeStep_MaxViolPress: maximum pressure violation in timestep scenarios\n - geopandas GeoDataFrame (information about diameters in 'diam' column and number of pipelines in\n 'nbPipes'); None if kwarg gdfEdges was specified as being Node\n :rtype: return types:\n - dic_arc_optimalDiameters: dictionary, key: arcs, values: (numberOfPipes, diameter) note usually numberOfPipes\n is 1, but if we have chosen a merged diameter, then we have two parallel pipes with the same diameter,\n i.e. numberOfPipes is 2.\n - dic_scen_PressLevels: dictionary, key: nodePair, value: dict: key: arc, value: pressure level in [bar]\n - dic_scen_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number\n (zero means no pressure violation)\n - dic_timeStep_PressLevels: dictionary, key: timeStep, value: dict: key: arc, value: pressure level in [bar]\n - dic_timeStep_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number\n (zero means no pressure violation)\n - gdfEdges: geopandas geodataframe; None if kwarg gdfEdges was specified as being Node\n \"\"\"\n # Do type and value check of input data:\n isBool(robust)\n isPandasDataFrameNumber(injectionWithdrawalRates)\n isPandasSeriesPositiveNumber(distances)\n isDictionaryPositiveNumber(dic_node_minPress)\n isDictionaryPositiveNumber(dic_node_maxPress)\n checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)\n # extract diameters for the optimization\n if dic_diameter_costs is not None:\n if isinstance(dic_diameter_costs, dict):\n diameters = list(dic_diameter_costs.keys())\n if isinstance(diameters, list):\n for diam in diameters:\n utils.isStrictlyPositiveNumber(diam)\n else:\n raise TypeError(\"The input argument has to be a list\")\n isDictionaryPositiveNumber(dic_diameter_costs)\n if dic_candidateMergedDiam_costs is not None:\n if isinstance(dic_candidateMergedDiam_costs, dict):\n for diam in dic_candidateMergedDiam_costs.keys():\n utils.isStrictlyPositiveNumber(diam)\n utils.isPositiveNumber(dic_candidateMergedDiam_costs[diam])\n else:\n raise TypeError(\"The input argument has to be a list\")\n utils.isString(regColumn1), utils.isString(regColumn2)\n if gdfEdges is not None:\n if isinstance(gdfEdges, gpd.GeoDataFrame):\n if (not regColumn1 in gdfEdges.columns) | (not regColumn2 in gdfEdges.columns):\n raise ValueError(\"regColumn1 or regColumn2 not in columns of gdfEdges\")\n else:\n gdfEdges['nodes'] = gdfEdges.apply(lambda x: (x['nodeIn'], x['nodeOut']), axis=1)\n else:\n raise TypeError(\"gdfEdges has to be a geopandas GeoDataFrame.\")\n if opexForDiameters is not None:\n if isinstance(opexForDiameters, list):\n for opex in opexForDiameters:\n utils.isPositiveNumber(opex)\n else:\n raise TypeError(\"The input argument has to be a list\")\n utils.isPositiveNumber(interestRate)\n utils.isStrictlyPositiveNumber(economicLifetime)\n utils.isString(costUnit)\n utils.isStrictlyPositiveNumber(ir)\n utils.isStrictlyPositiveNumber(rho_n)\n if not isinstance(T_m, float):\n raise TypeError(\"The input argument has to be an number\")\n\n if not isinstance(T_n, float):\n raise TypeError(\"The input argument has to be an number\")\n utils.isPositiveNumber(p_n)\n utils.isPositiveNumber(Z_n)\n if originalFluidFlows is not None:\n utils.isString(originalFluidFlows)\n utils.isStrictlyPositiveInt(nDigits)\n\n if dic_diameter_costs is None:\n print(\"There are no diameters to choose in the optimization. Thus, we consider the diameters and costs:\")\n dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,\n 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,\n 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,\n 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,\n 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}\n print(dic_diameter_costs)\n\n # create graph with respect to distances\n utils.output('Creating graph with respect to given distances', verbose, 0)\n graph, distances = createNetwork(distances)\n # plot graph\n if verbose < 1:\n if gdfEdges is not None:\n gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]\n fig, ax = plt.subplots(figsize=(4,4))\n gdfEdges.plot(ax=ax, color='k'), ax.axis('off')\n else:\n utils.output(\"Original Network Graph:\", verbose, 0)\n nx.draw(graph, with_labels=True)\n plt.show()\n\n # Create a minimum spanning tree of the network with a reasonable logic\n utils.output('Creating a Steiner treee', verbose, 0)\n inner_nodes = list(injectionWithdrawalRates.columns)\n graph, distances = createSteinerTree(graph, distances, inner_nodes)\n\n utils.output(\"Steiner tree:\", verbose, 0)\n if verbose < 1:\n if gdfEdges is not None:\n gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]\n fig, ax = plt.subplots(figsize=(4,4))\n gdfEdges.plot(ax=ax, color='k'), ax.axis('off')\n else:\n nx.draw(graph, with_labels=True)\n plt.show()\n\n # Compute robust scenarios for spanning tree network\n utils.output(\"Compute robust scenario set for tree network (based on \" +\n str(len(graph.nodes)*len(graph.nodes)-len(graph.nodes)) +\n ' node combinations). Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_nodePair_flows, entries, exits = generateRobustScenarios(injectionWithdrawalRates, graph, distances,\n dic_node_minPress, dic_node_maxPress, solver=solver, threads=threads, verbose=verbose)\n utils.output(\"Number of robust scenarios: \" + str(len(dic_nodePair_flows.keys())) , verbose, 0) \n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n\n # Compute scenarios for timeSteps\n utils.output(\"Compute scenarios for each timestep. Number of timestep scenarios: \"\n + str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_timeStep_flows = computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits,\n solver=solver, threads=threads, verbose=verbose)\n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n\n # Compute equivalent single diameters for looped (parallel) pipes\n utils.output(\"Compute equivalent single diameters for looped (parallel) pipes\", verbose, 0)\n # dic_LoopedDiam_costs contains the new computed diameters and its costs\n dic_LoopedDiam_costs = None\n # dic_newDiam_oldDiam merges new and old diameters\n dic_newDiam_oldDiam = None\n if dic_candidateMergedDiam_costs is not None:\n dic_LoopedDiam_costs, dic_newDiam_oldDiam = computeLargeMergedDiameters(dic_candidateMergedDiam_costs)\n\n # merge all diameters to one dictionary for the optimization model\n dic_diameter_costs.update(dic_LoopedDiam_costs)\n\n # Compute pressure drops for each scenario and diameter and the compute optimal diameters\n # depending on robust, we do this w.r.t. robust scenarios or every timeStep\n # dictionary for the pressure coefficients\n dic_pressureCoef = {}\n # dictionary for the optimal diameters\n dic_arc_diam = {}\n if robust:\n # we compute the pressure drops for the robust scenarios\n utils.output(\"Pressure drop coefficients for diameters with respect to robust scenarios\", verbose, 0)\n dic_pressureCoef = determinePressureDropCoef(dic_nodePair_flows, distances, dic_node_minPress,\n dic_node_maxPress, list(dic_diameter_costs.keys()))\n specialScenarionames = list(dic_nodePair_flows.keys())\n\n # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the robust scenarios\n utils.output('Determining optimal robust pipeline design under the consideration of pressure ' +\n 'losses and robust scenarios', verbose, 0)\n # returns dict: key: arc, value: optimal diameter\n # returns dict: key: nodePair, value: dic: key: node, value: pressure level\n dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,\n specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, robust, verbose=verbose,\n solver=solver, threads=threads)\n else:\n # we compute pressure drops for every timeStep scenario. Not robust version!\n # we compute the pressure drops for the robust scenarios and optimize\n utils.output(\"Pressure drop coefficients for diameters with respect to robust scenarios\", verbose, 0)\n dic_pressureCoef = determinePressureDropCoef(dic_timeStep_flows, distances, dic_node_minPress,\n dic_node_maxPress, list(dic_diameter_costs.keys()))\n timeSteps = list(dic_timeStep_flows.keys())\n\n # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the timeStep scenarios\n utils.output('Determining optimal pipeline design under the consideration of pressure losses and every time step',\n verbose, 0)\n utils.output('This network design is necessarily robust!', verbose, 0)\n # returns dict: key: arc, value: optimal diameter\n # returns dict: key: timeStep, value: dic: key: node, value: pressure level\n dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,\n timeSteps, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, False, verbose=verbose,\n solver=solver, threads=threads)\n\n if not dic_arc_diam:\n utils.output(\"No feasible diameter selections exits\", verbose, 0)\n return None\n\n # Do postprocessing: Use a \"more\" accurate pressure model and apply Postprocessing of master's thesis:\n # first do postprocessing for special scenarios\n utils.output(\"Do postprocessing for robust (special) scenarios. Number of scenarios: \" + str(len(dic_nodePair_flows)) +\n '. Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_scen_PressLevels, dic_scen_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_nodePair_flows,\n dic_node_minPress, dic_node_maxPress,\n threads=threads, verbose=verbose)\n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n # print if some of these scenarios are not feasible for the \"more\" precise pressure model\n for scenario in dic_scen_MaxViolPress.keys():\n if dic_scen_MaxViolPress[scenario] > 0:\n utils.output(\"Robust Scenario \" + str(scenario) + \" violates pressure bounds by \" +\n str(dic_scen_MaxViolPress[scenario]), verbose, 0)\n\n # compute pressure levels for each time step\n utils.output(\"Do postprocessing for each timestep scenarios. Number of scenarios: \" +\n str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)\n timeStart = time.time()\n dic_timeStep_PressLevels, dic_timeStep_MaxViolPress = postprocessing(graph, distances, dic_arc_diam,\n dic_timeStep_flows, dic_node_minPress,\n dic_node_maxPress,\n threads=threads, verbose=verbose)\n utils.output(\"\\t\\t(%.4f\" % (time.time() - timeStart) + \" sec)\\n\", verbose, 0)\n for timeStep in dic_timeStep_MaxViolPress.keys():\n if dic_timeStep_MaxViolPress[timeStep] > 0:\n utils.output(\"Time Step \" + str(timeStep) + \" violates pressure bounds by \" +\n str(dic_timeStep_MaxViolPress[timeStep]), verbose, 0)\n\n # now determine final output, i.e. dictionary: key: arcs, values: (numberOfPipes, diameter)\n # note usually numberOfPipes is 1, but if we have chosen a merged diameter, then we have two parallel pipes with\n # the same diameter, i.e. numberOfPipes is 2.\n dic_arc_optimalDiameters = {}\n for arc in dic_arc_diam.keys():\n if dic_LoopedDiam_costs is not None:\n if dic_arc_diam[arc] in dic_LoopedDiam_costs.keys():\n dic_arc_optimalDiameters[arc] = (2, dic_newDiam_oldDiam[dic_arc_diam[arc]])\n else:\n dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])\n else:\n dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])\n\n if verbose < 1:\n if gdfEdges is not None:\n gdfEdges = gdfEdges[gdfEdges.nodes.isin(dic_arc_optimalDiameters)]\n gdfEdges['diam'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][1], axis=1)\n gdfEdges['nbPipes'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][0], axis=1)\n\n plotOptimizedNetwork(gdfEdges)\n\n else:\n # plot network with new diameters\n utils.output(\"Network with optimized diameters, looped pipes are indicated by two colored edges, \" +\n \"Thicker edge means larger diameter\", verbose, 0)\n finalG = nx.MultiGraph()\n\n for arc in dic_arc_optimalDiameters.keys():\n if dic_arc_optimalDiameters[arc][0] == 1:\n # we have a single not looped pipe\n finalG.add_edge(arc[0], arc[1], color='black', weight=5 * dic_arc_optimalDiameters[arc][1])\n else:\n # we have a looped pipe\n finalG.add_edge(arc[0], arc[1], color='r',\n weight=10 * dic_arc_optimalDiameters[arc][1])\n finalG.add_edge(arc[0], arc[1], color='b',\n weight=5 * dic_arc_optimalDiameters[arc][1])\n # pos = nx.circular_layout(finalG)\n\n edges = finalG.edges()\n\n colors = []\n weight = []\n\n for (u, v, attrib_dict) in list(finalG.edges.data()):\n colors.append(attrib_dict['color'])\n weight.append(attrib_dict['weight'])\n\n nx.draw(finalG, edges=edges, edge_color=colors, width=weight, with_labels=True)\n \n plt.show()\n\n # Add some output which somehow quantifies the difference between the original and the new\n # pipeline design (for this additional input argument are required)\n # TODO @ Juelich just compare original solution to solution dic_arc_optimalDiameters\n\n return dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, \\\n dic_timeStep_MaxViolPress, gdfEdges\n\n\ndef plotOptimizedNetwork(gdf_pipes, figsize=(4,4), nodesColumn='nodes', diamColumn='diam',\n nbPipesColumn='nbPipes', line_scaling=1, gdf_regions=None, pressureLevels=None, pMin=50, pMax=100,\n cmap='Spectral_r', cbxShift=0.32, cbyShift=0.08, cbWidth=0.4, fontsize=10, cbTitle='Pressure [bar]'):\n \"\"\"Plot optimized network, visualizing chosen pipe diameters and, if selected, pressure levels of\n a scenario.\n \n :param gdf_pipes: GeoDataFrame, containing information about the diameters, number of pipes and\n routes of the pipeline network \n :type gdf_pipes: geopandas GeoDataFrame\n\n :param figsize: figure size, defaults to (4,4)\n :type figsize: tuple, optional\n\n :param nodesColumn: name of the column in gdf_pipes containing a tuple (startNode, endNode) with the\n name of the nodes being strings, defaults to 'nodes'\n :type nodesColumn: str, optional\n\n :param diamColumn: name of the column in gdf_pipes containing the diameters of the pipelines in m,\n defaults to 'diam'\n :type diamColumn: str, optional\n\n :param nbPipesColumn: name of the column in gdf_pipes containing the number of parallel pipes along\n a connection (maximum parallel pipes: 2),\n defaults to 'nbPipes'\n :type nbPipesColumn: str, optional\n\n :param line_scaling: scaling factor for line width, defaults to 1\n :type line_scaling: int, optional\n\n :param gdf_regions: GeoDataFrame for background plotting, defaults to None\n :type gdf_regions: geopandas GeoDataFrame, optional\n\n :param pressureLevels: pressure levels at each node for one scenario/ timestep, defaults to None\n :type pressureLevels: dictionary or series with keys/ indices being the nodes of the network, optional\n\n :param pMin: minimum pressure of colorbar, defaults to 50\n :type pMin: int, optional\n\n :param pMax: maximum pressure of colorbar, defaults to 100\n :type pMax: int, optional\n\n :param cmap: colormap name, defaults to 'Spectral_r'\n :type cmap: str, optional\n\n :param cbxShift: colorbar x shift, defaults to 0.32\n :type cbxShift: float, optional\n\n :param cbyShift: colorbar y shift, defaults to 0.08\n :type cbyShift: float, optional\n\n :param cbWidth: colorbar width, defaults to 0.4\n :type cbWidth: float, optional\n\n :param fontsize: fontsize of legend and colorbar, defaults to 10\n :type fontsize: int, optional\n\n :param cbTitle: colorbar title, defaults to 'Pressure [bar]'\n :type cbTitle: str, optional\n\n :return: tuple (fig, ax)\n :rtype:\n - fig: matplotlib figure\n - ax: matplotlib axis\n \"\"\"\n\n fig, ax = plt.subplots(figsize=figsize)\n cmap = mpl.cm.get_cmap(cmap)\n\n if gdf_regions is not None:\n gdf_regions.plot(ax=ax, facecolor='lightgrey', edgecolor='lightgrey')\n diamMin = gdf_pipes[gdf_pipes[diamColumn] > 0][diamColumn].min()\n for i, row in gdf_pipes.iterrows():\n lw = row[diamColumn]/diamMin*line_scaling\n if pressureLevels is not None:\n p = (pressureLevels[row[nodesColumn][0]] + pressureLevels[row[nodesColumn][1]])/2\n color = cmap((p-pMin)/(pMax-pMin))\n else:\n color='k'\n if (row[nbPipesColumn] == 1):\n gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw, capstyle='round')\n else:\n gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw*3, capstyle='round')\n gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color='white', linewidth=lw)\n ax.axis('off') \n\n lines = []\n for diam in sorted(gdf_pipes[diamColumn].unique()):\n line = plt.Line2D(range(1), range(1), linewidth=diam/diamMin*line_scaling, color='k', marker='_',\n label=\"{:>1.5}\".format(str(diam)) + ' m')\n lines.append(line)\n\n leg = ax.legend(handles=lines, prop={'size': fontsize}, loc=6, bbox_to_anchor=(1,0.5), title='Diameters')\n leg.get_frame().set_edgecolor('white')\n\n\n if pressureLevels is not None:\n sm1 = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=pMin, vmax=pMax))\n sm1._A = []\n cax = fig.add_axes([cbxShift, cbyShift, cbWidth, 0.03])\n cb1 = fig.colorbar(sm1, cax=cax, pad=0.05, aspect=7, fraction=0.07, orientation='horizontal')\n cax.tick_params(labelsize=fontsize)\n cax.set_xlabel(cbTitle, size=fontsize)\n cb1.ax.xaxis.set_label_position('top') \n\n plt.show()\n\n return fig, ax\n"
] | [
[
"numpy.sqrt",
"pandas.Series",
"scipy.optimize.fsolve",
"matplotlib.pyplot.Normalize",
"pandas.DataFrame",
"numpy.floor",
"matplotlib.pyplot.subplots",
"numpy.exp",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.show",
"numpy.log10",
"pandas.concat",
"numpy.round"
]
] |
ydai94/tdqn | [
"83c66263cb47016414dbe47ad3b252bb9e681ca8"
] | [
"drrn/drrn.py"
] | [
"import pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom os.path import join as pjoin\nfrom memory import ReplayMemory, Transition, State\nfrom model import DRRN\nfrom util import *\nimport logger\nimport sentencepiece as spm\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass DRRN_Agent:\n def __init__(self, args):\n self.gamma = args.gamma\n self.batch_size = args.batch_size\n self.sp = spm.SentencePieceProcessor()\n self.sp.Load(args.spm_path)\n self.network = DRRN(len(self.sp), args.embedding_dim, args.hidden_dim).to(device)\n self.memory = ReplayMemory(args.memory_size)\n self.save_path = args.output_dir\n self.clip = args.clip\n self.optimizer = torch.optim.Adam(self.network.parameters(),\n lr=args.learning_rate)\n\n\n def observe(self, state, act, rew, next_state, next_acts, done):\n self.memory.push(state, act, rew, next_state, next_acts, done)\n\n\n def build_state(self, obs, infos):\n \"\"\" Returns a state representation built from various info sources. \"\"\"\n obs_ids = [self.sp.EncodeAsIds(o) for o in obs]\n look_ids = [self.sp.EncodeAsIds(info['look']) for info in infos]\n inv_ids = [self.sp.EncodeAsIds(info['inv']) for info in infos]\n return [State(ob, lk, inv) for ob, lk, inv in zip(obs_ids, look_ids, inv_ids)]\n\n\n def encode(self, obs_list):\n \"\"\" Encode a list of observations \"\"\"\n return [self.sp.EncodeAsIds(o) for o in obs_list]\n\n\n def act(self, states, poss_acts, sample=True):\n \"\"\" Returns a string action from poss_acts. \"\"\"\n idxs, values = self.network.act(states, poss_acts, sample)\n act_ids = [poss_acts[batch][idx] for batch, idx in enumerate(idxs)]\n return act_ids, idxs, values\n\n\n def update(self):\n if len(self.memory) < self.batch_size:\n return\n\n transitions = self.memory.sample(self.batch_size)\n batch = Transition(*zip(*transitions))\n\n # Compute Q(s', a') for all a'\n # TODO: Use a target network???\n next_qvals = self.network(batch.next_state, batch.next_acts)\n # Take the max over next q-values\n next_qvals = torch.tensor([vals.max() for vals in next_qvals], device=device)\n # Zero all the next_qvals that are done\n next_qvals = next_qvals * (1-torch.tensor(batch.done, dtype=torch.float, device=device))\n targets = torch.tensor(batch.reward, dtype=torch.float, device=device) + self.gamma * next_qvals\n\n # Next compute Q(s, a)\n # Nest each action in a list - so that it becomes the only admissible cmd\n nested_acts = tuple([[a] for a in batch.act])\n qvals = self.network(batch.state, nested_acts)\n # Combine the qvals: Maybe just do a greedy max for generality\n qvals = torch.cat(qvals)\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(qvals, targets.detach())\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.network.parameters(), self.clip)\n self.optimizer.step()\n return loss.item()\n\n\n def load(self):\n try:\n self.memory = pickle.load(open(pjoin(self.save_path, 'memory.pkl'), 'rb'))\n self.network = torch.load(pjoin(self.save_path, 'model.pt'))\n except Exception as e:\n print(\"Error saving model.\")\n logging.error(traceback.format_exc())\n\n\n def save(self):\n try:\n pickle.dump(self.memory, open(pjoin(self.save_path, 'memory.pkl'), 'wb'))\n torch.save(self.network, pjoin(self.save_path, 'model.pt'))\n except Exception as e:\n print(\"Error saving model.\")\n logging.error(traceback.format_exc())\n"
] | [
[
"torch.cuda.is_available",
"torch.cat",
"torch.tensor"
]
] |
dustalov/mnogoznal | [
"bacea1576d31e0d2ad5456159a57950899a116f6"
] | [
"mnogoznal/wsd.py"
] | [
"import abc\nimport csv\nfrom collections import namedtuple, defaultdict, OrderedDict, Counter\n\nimport numpy as np\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity as sim\nfrom sklearn.pipeline import Pipeline\n\nSTOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'}\n\nSynset = namedtuple('Synset', 'id synonyms hypernyms bag')\n\n\nclass Inventory(object):\n \"\"\"Sense inventory representation and loader.\"\"\"\n\n synsets = {}\n index = defaultdict(list)\n\n def __init__(self, inventory_path):\n \"\"\"\n During the construction, BaseWSD parses the given sense inventory file.\n \"\"\"\n\n def field_to_bag(field):\n return {word: freq for record in field.split(', ')\n for word, freq in (self.lexeme(record),)\n if record}\n\n with open(inventory_path, 'r', encoding='utf-8', newline='') as f:\n reader = csv.reader(f, delimiter='\\t', quoting=csv.QUOTE_NONE)\n\n for row in reader:\n id = row[0]\n\n synonyms = field_to_bag(row[2])\n hypernyms = field_to_bag(row[4])\n\n self.synsets[id] = Synset(\n id=id,\n synonyms=synonyms,\n hypernyms=hypernyms,\n bag={**synonyms, **hypernyms}\n )\n\n for word in self.synsets[id].bag:\n self.index[word].append(id)\n\n def lexeme(self, record):\n \"\"\"\n Parse the sense representations like 'word#sid:freq'.\n Actually, we do not care about the sid field because\n we use synset identifiers instead.\n \"\"\"\n if '#' in record:\n word, tail = record.split('#', 1)\n else:\n word, tail = record, None\n\n if tail:\n if ':' in tail:\n sid, tail = tail.split(':', 1)\n else:\n sid, tail = tail, None\n\n if tail:\n freq = float(tail)\n else:\n freq = 1\n\n return word, freq\n\n\nSpan = namedtuple('Span', 'token pos lemma index')\n\n\nclass BaseWSD(object):\n \"\"\"\n Base class for word sense disambiguation routines. Should not be used.\n Descendant classes must implement the disambiguate_word() method.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, inventory):\n self.inventory = inventory\n\n def lemmatize(self, sentence):\n \"\"\"\n This method transforms the given sentence into the dict that\n maps the word indices to their lemmas. It also excludes those\n words which part of speech is in the stop list.\n \"\"\"\n return {i: lemma for i, (_, lemma, pos) in enumerate(sentence)\n if pos not in STOP_POS}\n\n @abc.abstractmethod\n def disambiguate_word(self, sentence, index):\n \"\"\"\n Return word sense identifier for the given word in the sentence.\n \"\"\"\n if not sentence or not isinstance(sentence, list):\n raise ValueError('sentence should be a list')\n\n if not isinstance(index, int) or index < 0 or index >= len(sentence):\n raise ValueError('index should be in [0...%d]' % len(sentence))\n\n def disambiguate(self, sentence):\n \"\"\"\n Return word sense identifiers corresponding to the words\n in the given sentence.\n \"\"\"\n result = OrderedDict()\n\n for index, span in enumerate(sentence):\n # here, span is (token, pos, lemma), but we also need index\n span = Span(*span, index)\n\n result[span] = self.disambiguate_word(sentence, index)\n\n return result\n\n\nclass OneBaseline(BaseWSD):\n \"\"\"\n A simple baseline that treats every word as monosemeous. Not thread-safe.\n \"\"\"\n\n counter = {}\n\n def __init__(self):\n super().__init__(None)\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n word, _, _ = sentence[index]\n\n if word not in self.counter:\n self.counter[word] = len(self.counter)\n\n return str(self.counter[word])\n\n\nclass SingletonsBaseline(BaseWSD):\n \"\"\"\n A simple baseline that puts every instance into a different cluster. Not thread-safe.\n \"\"\"\n\n counter = 0\n\n def __init__(self):\n super().__init__(None)\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n self.counter += 1\n\n return str(self.counter)\n\n\nclass SparseWSD(BaseWSD):\n \"\"\"\n A simple sparse word sense disambiguation.\n \"\"\"\n\n sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())])\n\n def __init__(self, inventory):\n super().__init__(inventory)\n self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()])\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n lemmas = self.lemmatize(sentence)\n\n if index not in lemmas:\n return\n\n svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector\n\n def search(query):\n \"\"\"\n Map synset identifiers to the cosine similarity value.\n This function calls the function query(id) that retrieves\n the corresponding dict of words.\n \"\"\"\n return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0)\n for id in self.inventory.index[lemmas[index]]})\n\n candidates = search(lambda id: self.inventory.synsets[id].synonyms)\n\n # give the hypernyms a chance if nothing is found\n if not candidates:\n candidates = search(lambda id: self.inventory.synsets[id].bag)\n\n if not candidates:\n return\n\n for id, _ in candidates.most_common(1):\n return id\n\n\nclass DenseWSD(BaseWSD):\n \"\"\"\n A word sense disambiguation approach that is based on SenseGram.\n \"\"\"\n\n class densedict(dict):\n \"\"\"\n A handy dict that transforms a synset into its dense representation.\n \"\"\"\n\n def __init__(self, synsets, sensegram):\n self.synsets = synsets\n self.sensegram = sensegram\n\n def __missing__(self, id):\n value = self[id] = self.sensegram(self.synsets[id].bag.keys())\n return value\n\n def __init__(self, inventory, wv):\n super().__init__(inventory)\n self.wv = wv\n self.dense = self.densedict(self.inventory.synsets, self.sensegram)\n\n def sensegram(self, words):\n \"\"\"\n This is a simple implementation of SenseGram.\n It just averages the embeddings corresponding to the given words.\n \"\"\"\n vectors = self.words_vec(set(words))\n\n if not vectors:\n return\n\n return np.mean(np.vstack(tuple(vectors.values())), axis=0).reshape(1, -1)\n\n def words_vec(self, words, use_norm=False):\n \"\"\"\n Return a dict that maps the given words to their embeddings.\n \"\"\"\n if callable(getattr(self.wv, 'words_vec', None)):\n return self.wv.words_vec(words, use_norm)\n\n return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv}\n\n def disambiguate_word(self, sentence, index):\n super().disambiguate_word(sentence, index)\n\n lemmas = self.lemmatize(sentence)\n\n if index not in lemmas:\n return\n\n svector = self.sensegram(lemmas.values()) # sentence vector\n\n if svector is None:\n return\n\n # map synset identifiers to the cosine similarity value\n candidates = Counter({id: sim(svector, self.dense[id]).item(0)\n for id in self.inventory.index[lemmas[index]]\n if self.dense[id] is not None})\n\n if not candidates:\n return\n\n for id, _ in candidates.most_common(1):\n return id\n\n\nclass LeskWSD(BaseWSD):\n \"\"\"\n A word sense disambiguation approach that is based on Lesk method. \n \"\"\"\n\n def __init__(self, inventory):\n super().__init__(inventory)\n\n def disambiguate_word(self, sentence, word_index):\n super().disambiguate_word(sentence, word_index)\n\n lemmas = self.lemmatize(sentence)\n\n if word_index not in lemmas:\n return\n\n mentions_dict = dict()\n for synset_number in self.inventory.index[lemmas[word_index]]:\n mentions_dict[synset_number] = 0\n for context_word in lemmas.values():\n if context_word != lemmas[word_index]:\n if context_word in self.inventory.synsets[synset_number].synonyms:\n mentions_dict[synset_number] = mentions_dict[synset_number] + 1\n elif context_word in self.inventory.synsets[synset_number].hypernyms:\n mentions_dict[synset_number] = mentions_dict[synset_number] + \\\n self.inventory.synsets[synset_number].hypernyms[context_word]\n\n if len(mentions_dict) > 0:\n return max(mentions_dict, key=mentions_dict.get)\n else:\n return\n"
] | [
[
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.feature_extraction.DictVectorizer"
]
] |
HabibMrad/uncertainty | [
"1646a9b07d1179045dd0375149250d5ac7501004"
] | [
"project/systems/ecgresnet_ensemble_auxout.py"
] | [
"import sys\nimport os\nimport torch\nimport pandas as pd\nimport datetime\nfrom argparse import ArgumentParser\nimport numpy as np\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, random_split\nfrom icecream import ic\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics import functional as FM\n\nfrom network.ecgresnet_auxout import ECGResNet_AuxOut\nfrom utils.helpers import create_results_directory\nfrom utils.focalloss_weights import FocalLoss\n\nclass ECGResNetEnsemble_AuxOutSystem(pl.LightningModule):\n \"\"\"\n This class implements the ECGResNet with ensemble and auxiliary output in PyTorch Lightning.\n It can estimate the epistemic and aleatoric uncertainty of its predictions.\n \"\"\"\n\n def __init__(self, in_channels, n_grps, N, \n num_classes, dropout, first_width, stride, \n dilation, learning_rate, ensemble_size, n_logit_samples, loss_weights=None, \n **kwargs):\n \"\"\"\n Initializes the ECGResNetEnsemble_AuxOutSystem\n\n Args:\n in_channels: number of channels of input\n n_grps: number of ResNet groups\n N: number of blocks per groups\n num_classes: number of classes of the classification problem\n dropout: probability of an argument to get zeroed in the dropout layer\n first_width: width of the first input\n stride: tuple with stride value per block per group\n dilation: spacing between the kernel points of the convolutional layers\n learning_rate: the learning rate of the model\n ensemble_size: the number of models that make up the ensemble\n n_logit_samples: number of logit samples of the auxiliary output\n loss_weights: array of weights for the loss term\n \"\"\"\n\n super().__init__()\n self.save_hyperparameters()\n self.learning_rate = learning_rate\n self.num_classes = num_classes\n self.ensemble_size = ensemble_size\n self.n_logit_samples = n_logit_samples\n\n self.IDs = torch.empty(0).type(torch.LongTensor)\n self.predicted_labels = torch.empty(0).type(torch.LongTensor)\n self.correct_predictions = torch.empty(0).type(torch.BoolTensor)\n self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)\n self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor)\n self.total_uncertainty = torch.empty(0).type(torch.FloatTensor)\n\n self.models = []\n self.optimizers = []\n for i in range(self.ensemble_size):\n self.models.append(ECGResNet_AuxOut(in_channels, \n n_grps, N, num_classes, \n dropout, first_width, \n stride, dilation)\n )\n\n if loss_weights is not None:\n weights = torch.tensor(loss_weights, dtype = torch.float)\n else:\n weights = loss_weights\n\n self.loss = FocalLoss(gamma=1, weights = weights)\n\n def forward(self, x, model_idx):\n \"\"\"Performs a forward through a single ensemble member.\n\n Args:\n x (tensor): Input data.\n model_idx (int): Index of the ensemble member.\n\n Returns:\n output1: Output at the auxiliary point of the ensemble member\n output2: Output at the end of the ensemble member\n output2_log_var: The log variance of the ensemble_member\n \"\"\"\n\n output1, output2_mean, output2_log_var = self.models[model_idx](x)\n \n return output1, output2_mean, output2_log_var\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n \"\"\"Performs a training step for all ensemble members.\n\n Args:\n batch (dict): Output of the dataloader.\n batch_idx (int): Index no. of this batch.\n\n Returns:\n tensor: Total loss for this step.\n \"\"\"\n data, target = batch['waveform'], batch['label']\n\n losses = []\n for model_idx in range(self.ensemble_size):\n # Make prediction\n output1, output2_mean, output2_log_var = self(data, model_idx)\n\n # Sample from logits, returning a vector x_i\n x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)\n\n train_loss1 = self.loss(output1, target)\n train_loss2 = self.loss(x_i, target)\n total_train_loss = (0.3 * train_loss1) + train_loss2\n\n # Update weights for each model using individual optimizers\n self.manual_backward(total_train_loss, self.optimizers[model_idx])\n self.optimizers[model_idx].step()\n self.optimizers[model_idx].zero_grad()\n losses.append(total_train_loss.item())\n\n self.log('model_{}_train_loss'.format(model_idx), total_train_loss)\n\n average_train_loss = np.mean(losses)\n self.log('average_train_loss', average_train_loss)\n\n return {'loss': average_train_loss}\n\n def validation_step(self, batch, batch_idx):\n prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n\n data, target = batch['waveform'], batch['label']\n\n # Predict for each model\n for model_idx in range(self.ensemble_size):\n # Make prediction\n _, output2_mean, output2_log_var = self(data, model_idx)\n\n # Sample from logits, returning avector x_i\n x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)\n\n prediction_individual[:, model_idx] = x_i\n \n # Calculate mean over predictions from individual ensemble members\n prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)\n \n val_loss = self.loss(prediction_ensemble_mean, target)\n acc = FM.accuracy(prediction_ensemble_mean, target)\n\n # loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on'\n metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}\n self.log('val_acc', acc.item())\n self.log('val_loss', val_loss.item())\n return metrics\n\n def test_step(self, batch, batch_idx, save_to_csv=False):\n\n prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)\n data, target = batch['waveform'], batch['label']\n\n # Predict for each model\n for model_idx, model in enumerate(self.models):\n\n # Make prediction\n _, output2_mean, output2_log_var = self(data, model_idx)\n\n # Sample from logits, returning a vector x_i\n x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)\n\n prediction_individual[:, model_idx] = x_i.data\n\n # Take exponent to get the variance\n output2_var = output2_log_var.exp()\n aleatoric_var[:, model_idx] = output2_var.data\n \n # Calculate mean and variance over predictions from individual ensemble members\n prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)\n prediction_ensemble_var = torch.var(prediction_individual, dim=1)\n\n # Get the average aleatoric uncertainty for each prediction\n prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1)\n\n # Select the predicted labels\n predicted_labels = prediction_ensemble_mean.argmax(dim=1)\n\n test_loss = self.loss(prediction_ensemble_mean, target)\n acc = FM.accuracy(prediction_ensemble_mean, target)\n\n # Get the epistemic variance of the predicted labels by selecting the variance of\n # the labels with highest average Softmax value\n predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()\n\n # Get the aleatoric variance of the predicted labels by selecting the variance of\n # the labels with highest average Softmax value\n predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()\n\n total_var = predicted_labels_var + predicted_labels_aleatoric_var\n \n # Log and save metrics\n self.log('test_acc', acc.item())\n self.log('test_loss', test_loss.item())\n\n self.IDs = torch.cat((self.IDs, batch['id']), 0)\n self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)\n self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)\n self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)\n self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0)\n self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)\n\n return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}\n\n def configure_optimizers(self):\n \"\"\"\n Initialize an optimizer for each model in the ensemble\n \"\"\"\n for i in range(self.ensemble_size):\n self.optimizers.append(optim.Adam(self.models[i].parameters(), lr=self.learning_rate))\n \n return self.optimizers\n\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--model_name', type=str, default='ensemble_none')\n parser.add_argument('--ensemble_size', type=int, default=5)\n parser.add_argument('--ensembling_method', type=bool, default=True)\n parser.add_argument('--n_logit_samples', type=int, default=100)\n return parser\n\n def save_results(self):\n \"\"\"\n Combine results into single dataframe and save to disk as .csv file\n \"\"\"\n results = pd.concat([\n pd.DataFrame(self.IDs.numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)\n"
] | [
[
"torch.empty",
"torch.var",
"torch.tensor",
"numpy.mean",
"torch.cat",
"torch.mean"
]
] |
jacobhjkim/ray | [
"936cb5929c455102d5638ff5d59c80c4ae94770f"
] | [
"python/ray/tune/tests/test_function_api.py"
] | [
"import json\nimport os\nimport sys\nimport shutil\nimport tempfile\nimport unittest\n\nimport ray\nimport ray.cloudpickle as cloudpickle\nfrom ray.rllib import _register_all\n\nfrom ray import tune\nfrom ray.tune.logger import NoopLogger\nfrom ray.tune.utils.trainable import TrainableUtil\nfrom ray.tune.function_runner import with_parameters, wrap_function, \\\n FuncCheckpointUtil\nfrom ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION\n\n\ndef creator_generator(logdir):\n def logger_creator(config):\n return NoopLogger(config, logdir)\n\n return logger_creator\n\n\nclass FuncCheckpointUtilTest(unittest.TestCase):\n def setUp(self):\n self.logdir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.logdir)\n\n def testEmptyCheckpoint(self):\n checkpoint_dir = FuncCheckpointUtil.mk_null_checkpoint_dir(self.logdir)\n assert FuncCheckpointUtil.is_null_checkpoint(checkpoint_dir)\n\n def testTempCheckpointDir(self):\n checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)\n assert FuncCheckpointUtil.is_temp_checkpoint_dir(checkpoint_dir)\n\n def testConvertTempToPermanent(self):\n checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)\n new_checkpoint_dir = FuncCheckpointUtil.create_perm_checkpoint(\n checkpoint_dir, self.logdir, step=4)\n assert new_checkpoint_dir == TrainableUtil.find_checkpoint_dir(\n new_checkpoint_dir)\n assert os.path.exists(new_checkpoint_dir)\n assert not FuncCheckpointUtil.is_temp_checkpoint_dir(\n new_checkpoint_dir)\n\n tmp_checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(\n self.logdir)\n assert tmp_checkpoint_dir != new_checkpoint_dir\n\n\nclass FunctionCheckpointingTest(unittest.TestCase):\n def setUp(self):\n self.logdir = tempfile.mkdtemp()\n self.logger_creator = creator_generator(self.logdir)\n\n def tearDown(self):\n shutil.rmtree(self.logdir)\n\n def testCheckpointReuse(self):\n \"\"\"Test that repeated save/restore never reuses same checkpoint dir.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n count = sum(\"checkpoint-\" in path\n for path in os.listdir(checkpoint_dir))\n assert count == 1, os.listdir(checkpoint_dir)\n\n for step in range(20):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"a\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore(checkpoint)\n for i in range(2):\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n assert result[TRAINING_ITERATION] == 10\n\n def testCheckpointReuseObject(self):\n \"\"\"Test that repeated save/restore never reuses same checkpoint dir.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n count = sum(\"checkpoint-\" in path\n for path in os.listdir(checkpoint_dir))\n assert count == 1, os.listdir(checkpoint_dir)\n\n for step in range(20):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"a\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore_from_object(checkpoint)\n for i in range(2):\n result = new_trainable.train()\n checkpoint = new_trainable.save_to_object()\n new_trainable.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 10)\n\n def testCheckpointReuseObjectWithoutTraining(self):\n \"\"\"Test that repeated save/restore never reuses same checkpoint dir.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n count = sum(\"checkpoint-\" in path\n for path in os.listdir(checkpoint_dir))\n assert count == 1, os.listdir(checkpoint_dir)\n\n for step in range(20):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"a\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n new_trainable = wrapped(logger_creator=self.logger_creator)\n for i in range(2):\n result = new_trainable.train()\n checkpoint = new_trainable.save_to_object()\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore_from_object(checkpoint)\n new_trainable2.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore_from_object(checkpoint)\n result = new_trainable2.train()\n new_trainable2.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 3)\n\n def testReuseNullCheckpoint(self):\n def train(config, checkpoint_dir=None):\n assert not checkpoint_dir\n for step in range(10):\n tune.report(test=step)\n\n # Create checkpoint\n wrapped = wrap_function(train)\n checkpoint = None\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n\n # Use the checkpoint a couple of times\n for i in range(3):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.restore(checkpoint)\n new_trainable.stop()\n\n # Make sure the result is still good\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.restore(checkpoint)\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 1)\n\n def testMultipleNullCheckpoints(self):\n def train(config, checkpoint_dir=None):\n assert not checkpoint_dir\n for step in range(10):\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore(checkpoint)\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n self.assertTrue(result[TRAINING_ITERATION] == 1)\n\n def testMultipleNullMemoryCheckpoints(self):\n def train(config, checkpoint_dir=None):\n assert not checkpoint_dir\n for step in range(10):\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n checkpoint = None\n for i in range(5):\n new_trainable = wrapped(logger_creator=self.logger_creator)\n if checkpoint:\n new_trainable.restore_from_object(checkpoint)\n result = new_trainable.train()\n checkpoint = new_trainable.save_to_object()\n new_trainable.stop()\n assert result[TRAINING_ITERATION] == 1\n\n def testFunctionNoCheckpointing(self):\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n assert os.path.exists(checkpoint_dir)\n for step in range(10):\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n\n new_trainable = wrapped(logger_creator=self.logger_creator)\n result = new_trainable.train()\n checkpoint = new_trainable.save()\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore(checkpoint)\n result = new_trainable2.train()\n self.assertEquals(result[TRAINING_ITERATION], 1)\n checkpoint = new_trainable2.save()\n new_trainable2.stop()\n\n def testFunctionRecurringSave(self):\n \"\"\"This tests that save and restore are commutative.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n assert os.path.exists(checkpoint_dir)\n for step in range(10):\n if step % 3 == 0:\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n with open(path, \"w\") as f:\n f.write(json.dumps({\"step\": step}))\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.train()\n checkpoint_obj = new_trainable.save_to_object()\n new_trainable.restore_from_object(checkpoint_obj)\n checkpoint = new_trainable.save()\n\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore(checkpoint)\n new_trainable2.train()\n new_trainable2.stop()\n\n def testFunctionImmediateSave(self):\n \"\"\"This tests that save and restore are commutative.\"\"\"\n\n def train(config, checkpoint_dir=None):\n if checkpoint_dir:\n assert os.path.exists(checkpoint_dir)\n for step in range(10):\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n print(checkpoint_dir)\n path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(step))\n open(path, \"w\").close()\n tune.report(test=step)\n\n wrapped = wrap_function(train)\n new_trainable = wrapped(logger_creator=self.logger_creator)\n new_trainable.train()\n new_trainable.train()\n checkpoint_obj = new_trainable.save_to_object()\n new_trainable.stop()\n\n new_trainable2 = wrapped(logger_creator=self.logger_creator)\n new_trainable2.restore_from_object(checkpoint_obj)\n checkpoint_obj = new_trainable2.save_to_object()\n new_trainable2.train()\n result = new_trainable2.train()\n assert sum(\"tmp\" in path for path in os.listdir(self.logdir)) == 1\n new_trainable2.stop()\n assert sum(\"tmp\" in path for path in os.listdir(self.logdir)) == 0\n assert result[TRAINING_ITERATION] == 4\n\n\nclass FunctionApiTest(unittest.TestCase):\n def setUp(self):\n ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024)\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def testCheckpointError(self):\n def train(config, checkpoint_dir=False):\n pass\n\n with self.assertRaises(ValueError):\n tune.run(train, checkpoint_freq=1)\n with self.assertRaises(ValueError):\n tune.run(train, checkpoint_at_end=True)\n\n def testCheckpointFunctionAtEnd(self):\n def train(config, checkpoint_dir=False):\n for i in range(10):\n tune.report(test=i)\n with tune.checkpoint_dir(step=10) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"hello\")\n\n [trial] = tune.run(train).trials\n assert os.path.exists(os.path.join(trial.checkpoint.value, \"ckpt.log\"))\n\n def testCheckpointFunctionAtEndContext(self):\n def train(config, checkpoint_dir=False):\n for i in range(10):\n tune.report(test=i)\n with tune.checkpoint_dir(step=10) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"hello\")\n\n [trial] = tune.run(train).trials\n assert os.path.exists(os.path.join(trial.checkpoint.value, \"ckpt.log\"))\n\n def testVariousCheckpointFunctionAtEnd(self):\n def train(config, checkpoint_dir=False):\n for i in range(10):\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"hello\")\n tune.report(test=i)\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log2\")\n with open(checkpoint_path, \"w\") as f:\n f.write(\"goodbye\")\n\n [trial] = tune.run(train, keep_checkpoints_num=3).trials\n assert os.path.exists(\n os.path.join(trial.checkpoint.value, \"ckpt.log2\"))\n\n def testReuseCheckpoint(self):\n def train(config, checkpoint_dir=None):\n itr = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"ckpt.log\"), \"r\") as f:\n itr = int(f.read()) + 1\n\n for i in range(itr, config[\"max_iter\"]):\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(str(i))\n tune.report(test=i, training_iteration=i)\n\n [trial] = tune.run(\n train,\n config={\n \"max_iter\": 5\n },\n ).trials\n last_ckpt = trial.checkpoint.value\n assert os.path.exists(os.path.join(trial.checkpoint.value, \"ckpt.log\"))\n analysis = tune.run(train, config={\"max_iter\": 10}, restore=last_ckpt)\n trial_dfs = list(analysis.trial_dataframes.values())\n assert len(trial_dfs[0][\"training_iteration\"]) == 5\n\n def testRetry(self):\n def train(config, checkpoint_dir=None):\n restored = bool(checkpoint_dir)\n itr = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"ckpt.log\"), \"r\") as f:\n itr = int(f.read()) + 1\n\n for i in range(itr, 10):\n if i == 5 and not restored:\n raise Exception(\"try to fail me\")\n with tune.checkpoint_dir(step=i) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(str(i))\n tune.report(test=i, training_iteration=i)\n\n analysis = tune.run(train, max_failures=3)\n last_ckpt = analysis.trials[0].checkpoint.value\n assert os.path.exists(os.path.join(last_ckpt, \"ckpt.log\"))\n trial_dfs = list(analysis.trial_dataframes.values())\n assert len(trial_dfs[0][\"training_iteration\"]) == 10\n\n def testEnabled(self):\n def train(config, checkpoint_dir=None):\n is_active = tune.is_session_enabled()\n if is_active:\n tune.report(active=is_active)\n return is_active\n\n assert train({}) is False\n analysis = tune.run(train)\n t = analysis.trials[0]\n assert t.last_result[\"active\"]\n\n def testBlankCheckpoint(self):\n def train(config, checkpoint_dir=None):\n restored = bool(checkpoint_dir)\n itr = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"ckpt.log\"), \"r\") as f:\n itr = int(f.read()) + 1\n\n for i in range(itr, 10):\n if i == 5 and not restored:\n raise Exception(\"try to fail me\")\n with tune.checkpoint_dir(step=itr) as checkpoint_dir:\n checkpoint_path = os.path.join(checkpoint_dir, \"ckpt.log\")\n with open(checkpoint_path, \"w\") as f:\n f.write(str(i))\n tune.report(test=i, training_iteration=i)\n\n analysis = tune.run(train, max_failures=3)\n trial_dfs = list(analysis.trial_dataframes.values())\n assert len(trial_dfs[0][\"training_iteration\"]) == 10\n\n def testWithParameters(self):\n class Data:\n def __init__(self):\n self.data = [0] * 500_000\n\n data = Data()\n data.data[100] = 1\n\n def train(config, data=None):\n data.data[101] = 2 # Changes are local\n tune.report(metric=len(data.data), hundred=data.data[100])\n\n trial_1, trial_2 = tune.run(\n with_parameters(train, data=data), num_samples=2).trials\n\n self.assertEquals(data.data[101], 0)\n self.assertEquals(trial_1.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_1.last_result[\"hundred\"], 1)\n self.assertEquals(trial_2.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_2.last_result[\"hundred\"], 1)\n self.assertTrue(str(trial_1).startswith(\"train_\"))\n\n # With checkpoint dir parameter\n def train(config, checkpoint_dir=\"DIR\", data=None):\n data.data[101] = 2 # Changes are local\n tune.report(metric=len(data.data), cp=checkpoint_dir)\n\n trial_1, trial_2 = tune.run(\n with_parameters(train, data=data), num_samples=2).trials\n\n self.assertEquals(data.data[101], 0)\n self.assertEquals(trial_1.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_1.last_result[\"cp\"], \"DIR\")\n self.assertEquals(trial_2.last_result[\"metric\"], 500_000)\n self.assertEquals(trial_2.last_result[\"cp\"], \"DIR\")\n self.assertTrue(str(trial_1).startswith(\"train_\"))\n\n def testWithParameters2(self):\n class Data:\n def __init__(self):\n import numpy as np\n self.data = np.random.rand((2 * 1024 * 1024))\n\n def train(config, data=None):\n tune.report(metric=len(data.data))\n\n trainable = tune.with_parameters(train, data=Data())\n dumped = cloudpickle.dumps(trainable)\n assert sys.getsizeof(dumped) < 100 * 1024\n\n def testReturnAnonymous(self):\n def train(config):\n return config[\"a\"]\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4)\n self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8)\n\n def testReturnSpecific(self):\n def train(config):\n return {\"m\": config[\"a\"]}\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[\"m\"], 4)\n self.assertEquals(trial_2.last_result[\"m\"], 8)\n\n def testYieldAnonymous(self):\n def train(config):\n for i in range(10):\n yield config[\"a\"] + i\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4 + 9)\n self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8 + 9)\n\n def testYieldSpecific(self):\n def train(config):\n for i in range(10):\n yield {\"m\": config[\"a\"] + i}\n\n trial_1, trial_2 = tune.run(\n train, config={\n \"a\": tune.grid_search([4, 8])\n }).trials\n\n self.assertEquals(trial_1.last_result[\"m\"], 4 + 9)\n self.assertEquals(trial_2.last_result[\"m\"], 8 + 9)\n"
] | [
[
"numpy.random.rand"
]
] |
takuto0831/Competition-utils | [
"c738e199c6a771a0c58b9cd237660bb76b4be4fb"
] | [
"pyscript/torch/utils.py"
] | [
"import os\nimport random\nimport subprocess\nimport numpy as np\nimport torch\nimport time\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n XLA = True\nexcept ModuleNotFoundError:\n XLA = False\n\n\ndef freeze_module(module):\n for i, param in enumerate(module.parameters()):\n param.requires_grad = False\n\n\ndef fit_state_dict(state_dict, model):\n '''\n Ignore size mismatch when loading state_dict\n '''\n for name, param in model.named_parameters():\n new_param = state_dict[name]\n if new_param.size() != param.size():\n print(f'Size mismatch in {name}: {new_param.shape} -> {param.shape}')\n state_dict.pop(name)\n\n\ndef get_device(arg):\n if isinstance(arg, torch.device) or \\\n (XLA and isinstance(arg, xm.xla_device)):\n device = arg\n elif arg is None or isinstance(arg, (list, tuple)):\n if XLA:\n device = xm.xla_device()\n else:\n device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu')\n elif isinstance(arg, str):\n if arg == 'xla' and XLA:\n device = xm.xla_device()\n else:\n device = torch.device(arg)\n \n if isinstance(arg, (list, tuple)):\n if isinstance(arg[0], int):\n device_ids = list(arg)\n elif isinstance(arg[0], str) and arg[0].isnumeric():\n device_ids = [ int(a) for a in arg ]\n else:\n raise ValueError(f'Invalid device: {arg}')\n else:\n if device.type == 'cuda':\n assert torch.cuda.is_available()\n if device.index is None:\n device_count = torch.cuda.device_count()\n if device_count > 1:\n device_ids = list(range(device_count))\n else:\n device_ids = [0]\n else:\n device_ids = [device.index]\n else:\n device_ids = [device.index]\n \n return device, device_ids\n\n\ndef seed_everything(random_state=0, deterministic=False):\n random.seed(random_state)\n os.environ['PYTHONHASHSEED'] = str(random_state)\n np.random.seed(random_state)\n torch.manual_seed(random_state)\n torch.cuda.manual_seed(random_state)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else:\n torch.backends.cudnn.deterministic = False\n\n\ndef get_gpu_memory():\n \"\"\"\n Code borrowed from: \n https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4\n\n Get the current gpu usage.\n\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map\n\n\ndef get_time(time_format='%H:%M:%S'):\n return time.strftime(time_format, time.localtime())\n"
] | [
[
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.device"
]
] |
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs | [
"57195ccab62d23dcbcac1a317f8a9811a9fd6cb5"
] | [
"models/GNN/GIN.py"
] | [
"from dgl import BatchedDGLGraph\nfrom dgl.nn.pytorch.conv import GINConv\nfrom torch import nn\n\nfrom models.GNN.GNNModelBase import GNNModelBase\nfrom models.utils import TypeConditionalLinear\n\n\nclass GIN(GNNModelBase):\n \"\"\"\n Graph Isomorphism Network as described in https://arxiv.org/pdf/1810.00826.pdf\n \"\"\"\n\n def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):\n super().__init__(**kwargs)\n self.layers = nn.ModuleList()\n for _ in range(self.n_layers):\n apply_func_layers = sum(\n [[nn.Linear(self.hidden_dim, self.hidden_dim),\n self.get_act(),\n self.get_norm(self.hidden_dim),\n nn.Dropout(self.p_dropout)] for _ in\n range(n_apply_func_layers)],\n [])\n apply_func = nn.Sequential(*apply_func_layers)\n self.layers.append(GINConv(apply_func=apply_func,\n aggregator_type=aggregator_type,\n init_eps=init_eps,\n learn_eps=learn_eps))\n\n def gnn_forward(self, g: BatchedDGLGraph):\n feats = g.ndata['h']\n for layer in self.layers:\n feats = layer(graph=g, feat=feats)\n readout = self.readout(g, feats)\n out = self.fcout(readout)\n return out\n\n\nclass RelationalGIN(GNNModelBase):\n \"\"\"\n Version of GIN that passes edge-type-conditional messages\n \"\"\"\n\n def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):\n super().__init__(**kwargs)\n self.n_relations = 2 * len(\n self.db_info['edge_type_to_int']) - 1 # there are negative edge types for the reverse edges\n self.layers = nn.ModuleList()\n for _ in range(self.n_layers):\n apply_func_layers = sum(\n [[nn.Linear(self.hidden_dim, self.hidden_dim),\n self.get_act(),\n self.get_norm(self.hidden_dim),\n nn.Dropout(self.p_dropout)] for _ in\n range(n_apply_func_layers)],\n [])\n apply_func = nn.Sequential(*apply_func_layers)\n self.layers.append(RelationalGINConv(apply_func=apply_func,\n activation=self.get_act(),\n aggregator_type=aggregator_type,\n hidden_dim=self.hidden_dim,\n init_eps=init_eps,\n learn_eps=learn_eps,\n num_rels=self.n_relations))\n\n def gnn_forward(self, g: BatchedDGLGraph):\n feats = g.ndata['h']\n etypes = g.edata['edge_types'] + self.n_relations // 2\n for layer in self.layers:\n feats = layer(graph=g, feat=feats, etypes=etypes)\n readout = self.readout(g, feats)\n out = self.fcout(readout)\n return out\n\n\nclass RelationalGINConv(GINConv):\n def __init__(self, apply_func, activation, aggregator_type, hidden_dim, init_eps=0, learn_eps=False, num_rels=0):\n super().__init__(apply_func, aggregator_type, init_eps, learn_eps)\n self.num_rels = num_rels\n self.act = activation\n self.edge_message_layer = TypeConditionalLinear(hidden_dim, hidden_dim, num_rels)\n\n def message_func(self, edges):\n msg = edges.src['h']\n msg = self.edge_message_layer(msg, edges.data['type'])\n msg = self.act(msg)\n return {'msg': msg}\n\n def forward(self, graph, feat, etypes):\n graph = graph.local_var()\n graph.ndata['h'] = feat\n graph.edata['type'] = etypes\n graph.update_all(self.message_func, self._reducer('msg', 'neigh'))\n rst = (1 + self.eps) * feat + graph.ndata['neigh']\n if self.apply_func is not None:\n rst = self.apply_func(rst)\n return rst\n\n\nclass ERGIN(RelationalGIN):\n \"\"\"\n GIN using different linear mappings for each node and edge type\n \"\"\"\n\n def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):\n super().__init__(n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs)\n self.n_node_types = len(self.db_info['node_type_to_int'])\n self.act = self.get_act()\n self.layers = nn.ModuleList()\n self.apply_func_blocks = nn.ModuleList()\n for _ in range(self.n_layers):\n self.layers.append(RelationalGINConv(apply_func=None,\n activation=self.get_act(),\n aggregator_type=aggregator_type,\n hidden_dim=self.hidden_dim,\n init_eps=init_eps,\n learn_eps=learn_eps,\n num_rels=self.n_relations))\n self.apply_func_blocks.append(\n nn.ModuleList([nn.ModuleDict({'tcl': TypeConditionalLinear(self.hidden_dim,\n self.hidden_dim,\n self.n_node_types),\n 'act': self.get_act(),\n 'norm': self.get_norm(self.hidden_dim),\n 'do': nn.Dropout(self.p_dropout)\n })\n for _ in range(n_apply_func_layers)])\n )\n\n def gnn_forward(self, g: BatchedDGLGraph):\n feats = g.ndata['h']\n ntypes = g.ndata['node_types']\n etypes = g.edata['edge_types'] + self.n_relations // 2\n for layer, apply_func_blocks in zip(self.layers, self.apply_func_blocks):\n feats = layer(graph=g, feat=feats, etypes=etypes)\n for block in apply_func_blocks:\n feats = block['tcl'](feats, ntypes)\n feats = block['act'](feats)\n feats = block['norm'](feats)\n feats = block['do'](feats)\n readout = self.readout(g, feats)\n out = self.fcout(readout)\n return out\n"
] | [
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.Dropout",
"torch.nn.Sequential"
]
] |
jlvdb/the-wizz | [
"21e88888472d2598a0db861aef31076078628b8e"
] | [
"pdf_maker.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"This code is the main access point for the majority of users of The-wiZZ. It\ntakes an input subselection of a survey catalog, a The-wiZZ HDF5 data file, and\nmatches the two together to create a resultant clustering redshift estimate\nthat can then be turned into a redshift PDF. This code also takes care of any\nweighting of the objects with unknown redshift, redshift binning, bootstrapping\nerrors, and output. See input_flags.py for a list of options or use --help from\nthe command line.\n\"\"\"\n\nimport numpy as np\n\nfrom the_wizz import core_utils\nfrom the_wizz import pdf_maker_utils\nfrom the_wizz import input_flags\n\n\nif __name__ == \"__main__\":\n print(\"\")\n print(\"The-wiZZ has begun conjuring: running pdf maker...\")\n # First we parse the command line for arguments as usual. See\n # input_flags.py for a full list of input arguments.\n args = input_flags.parse_input_pdf_args()\n input_flags.print_args(args)\n # Load the file containing all matched pairs of spectroscopic and\n # photometric objects.\n print(\"Loading unknown data...\")\n unknown_data = core_utils.file_checker_loader(args.unknown_sample_file)\n # Now we figure out what kind of redshift binning we would like to have.\n # This will be one of the largest impacts on the signal to noise of the\n # measurement. Some rules of thumb are:\n # The narrower bins are in redshift the better. You are measuring a\n # correlation, the narrower the bin size in comoving distance the more\n # correlated things will be and thus increase the amplitude. Aka use\n # Groth/Pebbles[sic] scaling to your advantage.\n # For a spectroscopic sample that is selected for a specific redshift\n # range with few galaxies outside that range (eg DEEP2), adaptive binning\n # is recommended. This will keep a equal number spectra per redshift bin.\n # A good rule is to try to have about 100 spectra per redshift bin for max\n # signal to noise.\n # Linear binning is provided as a curtesy and is not nesassarly\n # recommended. It will not give the best signal to noise compared to\n # adaptive and has the same draw backs as adaptive is that the bias could\n # be changing oddly from bin to bin. It is recommended that the user try\n # adaptive and comoving spaced bins for the best results. Comoving returns\n # bins that are of equal comoving distance from the line of sight. We also\n # provide binning in equal ln(1 + z). This is for people who want a\n # comoving like binning but without the dependece on cosmology. It also\n # has the convienent property of giving errors that can be more easlily\n # compared the usual simga/(1 + z) error.\n print(\"Creating bins...\")\n if args.z_binning_type[0] == 'linear':\n z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n elif args.z_binning_type[0] == 'adaptive':\n z_bin_edge_array = pdf_maker_utils._create_adaptive_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins,\n pdf_maker.reference_redshift_array)\n elif args.z_binning_type[0] == 'comoving':\n z_bin_edge_array = pdf_maker_utils._create_comoving_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n elif args.z_binning_type[0] == 'logspace':\n z_bin_edge_array = pdf_maker_utils._create_logspace_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n elif args.z_binning_type[0] == 'file':\n z_bin_edge_array = np.loadtxt(args.z_binning_type[1])[:-1]\n else:\n print(\"Requested binning name invalid. Valid types are:\")\n print(\"\\tlinear: linear binning in redshift\")\n print(\"\\tadaptive: constant reference objects per redshift bin\")\n print(\"\\tcomoving: linear binning in comoving distance\")\n print(\"\\tfile: file providing the bin edges\")\n print(\"Returning linear binning...\")\n z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges(\n args.z_min, args.z_max, args.z_n_bins)\n # This is where the heavy lifting happens. We create our PDF maker object\n # which will hold the pair file for use, calculate the over density per\n # redshift bin, and also store intermediary results for later use.\n # Before we can estimate the PDF, we must mask for the objects we want\n # to estimate the redshit of. These objects can be color selected,\n # photo-z selected, or any other object selection you would like. The code\n # line below turns the array of indices in the hdf5 pair file, into a\n # single density estimate around the reference object.\n print(\"Starting indices matcher...\")\n pdf_maker = pdf_maker_utils.collapse_ids_to_single_estimate(\n args.input_pair_hdf5_file, args.pair_scale_name, unknown_data, args)\n # Before we calculated the pdfs, we want to know what the over densities\n # are in each of the regions calculated on the area we consider.\n print(\"Calculating region densities...\")\n pdf_maker.compute_region_densities(z_bin_edge_array, args.z_max)\n if args.output_region_pickle_file is not None:\n pdf_maker.write_region_densities(args.output_region_pickle_file, args)\n # Now that we've \"collapsed\" the estimate around the reference object we\n # need to bin up the results in redshift and create our final PDF.\n print(\"Calculating pdf...\")\n if args.bootstrap_samples is None:\n pdf_maker.compute_pdf_bootstrap(args.n_bootstrap)\n else:\n bootstrap_region_array = np.loadtxt(args.bootstrap_samples,\n dtype=np.int_)\n pdf_maker._compute_pdf_bootstrap(bootstrap_region_array)\n # Write individual bootstraps to file.\n if args.output_bootstraps_file is not None:\n pdf_maker.write_bootstrap_samples_to_ascii(args.output_bootstraps_file,\n args)\n # Now that we have the results. We just need to write them to file and we\n # are done.\n print(\"Writing...\")\n output_file = core_utils.create_ascii_file(args.output_pdf_file_name,\n args)\n pdf_maker.write_pdf_to_ascii(output_file)\n output_file.close()\n print(\"Done!\")\n"
] | [
[
"numpy.loadtxt"
]
] |
Agyey/fsdl-text-recognizer-2021-labs | [
"4bd85042ab9f6decd78849bb655c197cc13ffc11"
] | [
"lab4/text_recognizer/models/line_cnn.py"
] | [
"from typing import Any, Dict\nimport argparse\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nCONV_DIM = 64\nFC_DIM = 128\nWINDOW_WIDTH = 28\nWINDOW_STRIDE = 28\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU.\n \"\"\"\n\n def __init__(self, input_channels: int, output_channels: int, kernel_size: int = 3, stride: int = 1) -> None:\n super().__init__()\n self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=1)\n self.relu = nn.ReLU()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n of dimensions (B, C, H, W)\n\n Returns\n -------\n torch.Tensor\n of dimensions (B, C, H, W)\n \"\"\"\n c = self.conv(x)\n r = self.relu(c)\n return r\n\n\nclass LineCNN(nn.Module):\n \"\"\"\n Model that uses a simple CNN to process an image of a line of characters with a window, outputting a sequence of logits.\n \"\"\"\n\n def __init__(\n self,\n data_config: Dict[str, Any],\n args: argparse.Namespace = None,\n ) -> None:\n super().__init__()\n self.data_config = data_config\n self.args = vars(args) if args is not None else {}\n self.num_classes = len(data_config[\"mapping\"])\n self.output_length = data_config[\"output_dims\"][0]\n self.limit_output_length = self.args.get(\"limit_output_length\", False)\n\n _C, H, _W = data_config[\"input_dims\"]\n conv_dim = self.args.get(\"conv_dim\", CONV_DIM)\n fc_dim = self.args.get(\"fc_dim\", FC_DIM)\n self.WW = self.args.get(\"window_width\", WINDOW_WIDTH)\n self.WS = self.args.get(\"window_stride\", WINDOW_STRIDE)\n\n # Input is (1, H, W)\n self.conv1 = ConvBlock(1, conv_dim)\n self.conv2 = ConvBlock(conv_dim, conv_dim)\n self.conv3 = ConvBlock(conv_dim, conv_dim, stride=2)\n # Conv math! https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\n # OW = torch.floor((W // 2 - WW // 2) + 1)\n self.conv4 = ConvBlock(conv_dim, fc_dim, kernel_size=(H // 2, self.WW // 2), stride=(H // 2, self.WS // 2))\n self.dropout = nn.Dropout(0.25)\n self.fc1 = nn.Linear(fc_dim, fc_dim)\n self.fc2 = nn.Linear(fc_dim, self.num_classes)\n\n self._init_weights()\n\n def _init_weights(self):\n \"\"\"\n A better weight initialization scheme than PyTorch default.\n\n See https://github.com/pytorch/pytorch/issues/18182\n \"\"\"\n for m in self.modules():\n if type(m) in {\n nn.Conv2d,\n nn.Conv3d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d,\n nn.Linear,\n }:\n nn.init.kaiming_normal_(m.weight.data, a=0, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n _fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Parameters\n ----------\n x\n (B, 1, H, W) input image\n\n Returns\n -------\n torch.Tensor\n (B, C, S) logits, where S is the length of the sequence and C is the number of classes\n S can be computed from W and self.window_width\n C is self.num_classes\n \"\"\"\n _B, _C, _H, W = x.shape\n x = self.conv1(x) # -> (B, CONV_DIM, H, W)\n x = self.conv2(x) # -> (B, CONV_DIM, H, W)\n x = self.conv3(x) # -> (B, CONV_DIM, H//2, W//2)\n OW = math.floor((W // 2 + 2 - self.WW // 2) / (self.WS // 2) + 1)\n x = self.conv4(x) # -> (B, FC_DIM, 1, OW)\n assert x.shape[-1] == OW\n x = x.squeeze().permute(0, 2, 1) # -> (B, OW, FC_DIM)\n x = F.relu(self.fc1(x)) # -> (B, OW, FC_DIM)\n x = self.dropout(x)\n x = self.fc2(x) # -> (B, OW, self.C)\n x = x.permute(0, 2, 1) # -> (B, self.C, OW)\n if self.limit_output_length:\n x = x[:, :, : self.output_length]\n return x\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\"--conv_dim\", type=int, default=CONV_DIM)\n parser.add_argument(\"--fc_dim\", type=int, default=FC_DIM)\n parser.add_argument(\n \"--window_width\",\n type=int,\n default=WINDOW_WIDTH,\n help=\"Width of the window that will slide over the input image.\",\n )\n parser.add_argument(\n \"--window_stride\",\n type=int,\n default=WINDOW_STRIDE,\n help=\"Stride of the window that will slide over the input image.\",\n )\n parser.add_argument(\"--limit_output_length\", action=\"store_true\", default=False)\n return parser\n"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.nn.Linear",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.init.normal_",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.nn.Dropout"
]
] |
addschile/qtps | [
"3220af82d409526463dc4fe9e4ea869d655c0bd8"
] | [
"data/compute_rates.py"
] | [
"import numpy as np\nfrom sys import argv\n\ntobs = int(argv[1])\np0 = np.zeros(10)\np2 = np.zeros(10)\np1 = np.zeros(10)\nZab = np.zeros(10)\nrate = np.zeros(10)\n\nfor i in range(10):\n da = np.loadtxt('tobs%d/reweighted_hist_%d.dat'%(tobs,i))\n p0[i] = np.exp(-da[-2,1])\n p2[i] = np.exp(-da[-1,1])\n p1[i] = np.exp(-da[-3,1])\nZab = p1/(p0+p2)\n\nf = open('tobs%d/path_partition_function_%d.dat'%(tobs,tobs),'w')\nfor i in range(10):\n f.write('%d %.16f\\n'%(i,Zab[i]))\n\nZab_avg = np.sum(Zab[:])/10.\nfor i in range(10):\n Zab[i] -= Zab_avg\nZab *= Zab\nstd_err = np.sqrt(np.sum(Zab[:])/10.)\nf.write('%.16f %.16f\\n'%(Zab_avg,std_err))\nf.close()\n"
] | [
[
"numpy.sum",
"numpy.exp",
"numpy.loadtxt",
"numpy.zeros"
]
] |
TillBeemelmanns/OpenPCDet | [
"b7553c879d0ba36477931efe07a55adbc39823b9"
] | [
"tools/test.py"
] | [
"import os\nimport torch\nfrom tensorboardX import SummaryWriter\nimport time\nimport glob\nimport re\nimport datetime\nimport argparse\nfrom pathlib import Path\nimport torch.distributed as dist\nfrom pcdet.datasets import build_dataloader\nfrom pcdet.models import build_network\nfrom pcdet.utils import common_utils\nfrom pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\nfrom eval_utils import eval_utils\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')\n\n parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')\n parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')\n parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')\n parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')\n parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')\n parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')\n parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')\n parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')\n parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')\n parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,\n help='set extra config keys if needed')\n\n parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')\n parser.add_argument('--start_epoch', type=int, default=0, help='')\n parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')\n parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')\n parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')\n parser.add_argument('--save_to_file', action='store_true', default=False, help='')\n\n args = parser.parse_args()\n\n cfg_from_yaml_file(args.cfg_file, cfg)\n cfg.TAG = Path(args.cfg_file).stem\n cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs, cfg)\n\n return args, cfg\n\n\ndef eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):\n # load checkpoint\n model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)\n model.cuda()\n\n # start evaluation\n eval_utils.eval_one_epoch(\n cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,\n result_dir=eval_output_dir, save_to_file=args.save_to_file\n )\n\n\ndef get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):\n ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))\n ckpt_list.sort(key=os.path.getmtime)\n evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]\n\n for cur_ckpt in ckpt_list:\n num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)\n if num_list.__len__() == 0:\n continue\n\n epoch_id = num_list[-1]\n if 'optim' in epoch_id:\n continue\n if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:\n return epoch_id, cur_ckpt\n return -1, None\n\n\ndef repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):\n # evaluated ckpt record\n ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])\n with open(ckpt_record_file, 'a'):\n pass\n\n # tensorboard log\n if cfg.LOCAL_RANK == 0:\n tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))\n total_time = 0\n first_eval = True\n\n while True:\n # check whether there is checkpoint which is not evaluated\n cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)\n if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:\n wait_second = 30\n if cfg.LOCAL_RANK == 0:\n print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \\r'\n % (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)\n time.sleep(wait_second)\n total_time += 30\n if total_time > args.max_waiting_mins * 60 and (first_eval is False):\n break\n continue\n\n total_time = 0\n first_eval = False\n\n model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)\n model.cuda()\n\n # start evaluation\n cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']\n tb_dict = eval_utils.eval_one_epoch(\n cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,\n result_dir=cur_result_dir, save_to_file=args.save_to_file\n )\n\n if cfg.LOCAL_RANK == 0:\n for key, val in tb_dict.items():\n tb_log.add_scalar(key, val, cur_epoch_id)\n\n # record this epoch which has been evaluated\n with open(ckpt_record_file, 'a') as f:\n print('%s' % cur_epoch_id, file=f)\n logger.info('Epoch %s has been evaluated' % cur_epoch_id)\n\n\ndef main():\n args, cfg = parse_config()\n if args.launcher == 'none':\n dist_test = False\n else:\n args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(\n args.batch_size, args.tcp_port, args.local_rank, backend='nccl'\n )\n dist_test = True\n\n output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag\n output_dir.mkdir(parents=True, exist_ok=True)\n\n eval_output_dir = output_dir / 'eval'\n\n if not args.eval_all:\n num_list = re.findall(r'\\d+', args.ckpt) if args.ckpt is not None else []\n epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'\n eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']\n else:\n eval_output_dir = eval_output_dir / 'eval_all_default'\n\n if args.eval_tag is not None:\n eval_output_dir = eval_output_dir / args.eval_tag\n\n eval_output_dir.mkdir(parents=True, exist_ok=True)\n log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))\n logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)\n\n # log to file\n logger.info('**********************Start logging**********************')\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'\n logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)\n\n if dist_test:\n total_gpus = dist.get_world_size()\n logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))\n for key, val in vars(args).items():\n logger.info('{:16} {}'.format(key, val))\n log_config_to_file(cfg, logger=logger)\n\n ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'\n\n test_set, test_loader, sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_test, workers=args.workers, logger=logger, training=False\n )\n\n model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)\n with torch.no_grad():\n if args.eval_all:\n repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)\n else:\n eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.no_grad",
"torch.distributed.get_world_size"
]
] |
anthowen/duplify | [
"846d01c1b21230937fdf0281b0cf8c0b08a8c24e",
"9444dce96954c546333d5aecc92a06c3bfd19aa5"
] | [
"env/lib/python3.6/site-packages/pandas/core/panel.py",
"env/lib/python3.6/site-packages/scipy/optimize/tests/test__spectral.py"
] | [
"\"\"\"\nContains data structures designed for manipulating panel (3-dimensional) data\n\"\"\"\n# pylint: disable=E1103,W0231,W0212,W0621\nfrom __future__ import division\n\nimport warnings\n\nimport numpy as np\n\nfrom pandas.types.cast import (_infer_dtype_from_scalar,\n _possibly_cast_item)\nfrom pandas.types.common import (is_integer, is_list_like,\n is_string_like, is_scalar)\nfrom pandas.types.missing import notnull\n\nimport pandas.computation.expressions as expressions\nimport pandas.core.common as com\nimport pandas.core.ops as ops\nimport pandas.core.missing as missing\nfrom pandas import compat\nfrom pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.core.common import PandasError, _try_sort, _default_index\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.index import (Index, MultiIndex, _ensure_index,\n _get_combined_index)\nfrom pandas.formats.printing import pprint_thing\nfrom pandas.core.indexing import maybe_droplevels\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays,\n create_block_manager_from_blocks)\nfrom pandas.core.ops import _op_descriptions\nfrom pandas.core.series import Series\nfrom pandas.tools.util import cartesian_product\nfrom pandas.util.decorators import (deprecate, Appender)\n\n_shared_doc_kwargs = dict(\n axes='items, major_axis, minor_axis',\n klass=\"Panel\",\n axes_single_arg=\"{0, 1, 2, 'items', 'major_axis', 'minor_axis'}\")\n_shared_doc_kwargs['args_transpose'] = (\"three positional arguments: each one\"\n \"of\\n%s\" %\n _shared_doc_kwargs['axes_single_arg'])\n\n\ndef _ensure_like_indices(time, panels):\n \"\"\"\n Makes sure that time and panels are conformable\n \"\"\"\n n_time = len(time)\n n_panel = len(panels)\n u_panels = np.unique(panels) # this sorts!\n u_time = np.unique(time)\n if len(u_time) == n_time:\n time = np.tile(u_time, len(u_panels))\n if len(u_panels) == n_panel:\n panels = np.repeat(u_panels, len(u_time))\n return time, panels\n\n\ndef panel_index(time, panels, names=None):\n \"\"\"\n Returns a multi-index suitable for a panel-like DataFrame\n\n Parameters\n ----------\n time : array-like\n Time index, does not have to repeat\n panels : array-like\n Panel index, does not have to repeat\n names : list, optional\n List containing the names of the indices\n\n Returns\n -------\n multi_index : MultiIndex\n Time index is the first level, the panels are the second level.\n\n Examples\n --------\n >>> years = range(1960,1963)\n >>> panels = ['A', 'B', 'C']\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),\n (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),\n (1962, 'C')], dtype=object)\n\n or\n\n >>> import numpy as np\n >>> years = np.repeat(range(1960,1963), 3)\n >>> panels = np.tile(['A', 'B', 'C'], 3)\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),\n (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),\n (1962, 'C')], dtype=object)\n \"\"\"\n if names is None:\n names = ['time', 'panel']\n time, panels = _ensure_like_indices(time, panels)\n return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)\n\n\nclass Panel(NDFrame):\n \"\"\"\n Represents wide format panel data, stored as 3-dimensional array\n\n Parameters\n ----------\n data : ndarray (items x major x minor), or dict of DataFrames\n items : Index or array-like\n axis=0\n major_axis : Index or array-like\n axis=1\n minor_axis : Index or array-like\n axis=2\n dtype : dtype, default None\n Data type to force, otherwise infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n \"\"\"\n\n @property\n def _constructor(self):\n return type(self)\n\n _constructor_sliced = DataFrame\n\n def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,\n copy=False, dtype=None):\n self._init_data(data=data, items=items, major_axis=major_axis,\n minor_axis=minor_axis, copy=copy, dtype=dtype)\n\n def _init_data(self, data, copy, dtype, **kwargs):\n \"\"\"\n Generate ND initialization; axes are passed\n as required objects to __init__\n \"\"\"\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]\n\n if kwargs:\n raise TypeError('_init_data() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n axes = None\n if isinstance(data, BlockManager):\n if any(x is not None for x in passed_axes):\n axes = [x if x is not None else y\n for x, y in zip(passed_axes, data.axes)]\n mgr = data\n elif isinstance(data, dict):\n mgr = self._init_dict(data, passed_axes, dtype=dtype)\n copy = False\n dtype = None\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)\n copy = False\n dtype = None\n elif is_scalar(data) and all(x is not None for x in passed_axes):\n if dtype is None:\n dtype, data = _infer_dtype_from_scalar(data)\n values = np.empty([len(x) for x in passed_axes], dtype=dtype)\n values.fill(data)\n mgr = self._init_matrix(values, passed_axes, dtype=dtype,\n copy=False)\n copy = False\n else: # pragma: no cover\n raise PandasError('Panel constructor not properly called!')\n\n NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)\n\n def _init_dict(self, data, axes, dtype=None):\n haxis = axes.pop(self._info_axis_number)\n\n # prefilter if haxis passed\n if haxis is not None:\n haxis = _ensure_index(haxis)\n data = OrderedDict((k, v)\n for k, v in compat.iteritems(data)\n if k in haxis)\n else:\n ks = list(data.keys())\n if not isinstance(data, OrderedDict):\n ks = _try_sort(ks)\n haxis = Index(ks)\n\n for k, v in compat.iteritems(data):\n if isinstance(v, dict):\n data[k] = self._constructor_sliced(v)\n\n # extract axis for remaining axes & create the slicemap\n raxes = [self._extract_axis(self, data, axis=i) if a is None else a\n for i, a in enumerate(axes)]\n raxes_sm = self._extract_axes_for_slice(self, raxes)\n\n # shallow copy\n arrays = []\n haxis_shape = [len(a) for a in raxes]\n for h in haxis:\n v = values = data.get(h)\n if v is None:\n values = np.empty(haxis_shape, dtype=dtype)\n values.fill(np.nan)\n elif isinstance(v, self._constructor_sliced):\n d = raxes_sm.copy()\n d['copy'] = False\n v = v.reindex(**d)\n if dtype is not None:\n v = v.astype(dtype)\n values = v.values\n arrays.append(values)\n\n return self._init_arrays(arrays, haxis, [haxis] + raxes)\n\n def _init_arrays(self, arrays, arr_names, axes):\n return create_block_manager_from_arrays(arrays, arr_names, axes)\n\n @classmethod\n def from_dict(cls, data, intersect=False, orient='items', dtype=None):\n \"\"\"\n Construct Panel from dict of DataFrame objects\n\n Parameters\n ----------\n data : dict\n {field : DataFrame}\n intersect : boolean\n Intersect indexes of input DataFrames\n orient : {'items', 'minor'}, default 'items'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the items of the result panel, pass 'items'\n (default). Otherwise if the columns of the values of the passed\n DataFrame objects should be the items (which in the case of\n mixed-dtype data you should do), instead pass 'minor'\n dtype : dtype, default None\n Data type to force, otherwise infer\n\n Returns\n -------\n Panel\n \"\"\"\n orient = orient.lower()\n if orient == 'minor':\n new_data = OrderedDefaultdict(dict)\n for col, df in compat.iteritems(data):\n for item, s in compat.iteritems(df):\n new_data[item][col] = s\n data = new_data\n elif orient != 'items': # pragma: no cover\n raise ValueError('Orientation must be one of {items, minor}.')\n\n d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)\n ks = list(d['data'].keys())\n if not isinstance(d['data'], OrderedDict):\n ks = list(sorted(ks))\n d[cls._info_axis_name] = Index(ks)\n return cls(**d)\n\n def __getitem__(self, key):\n key = com._apply_if_callable(key, self)\n\n if isinstance(self._info_axis, MultiIndex):\n return self._getitem_multilevel(key)\n if not (is_list_like(key) or isinstance(key, slice)):\n return super(Panel, self).__getitem__(key)\n return self.ix[key]\n\n def _getitem_multilevel(self, key):\n info = self._info_axis\n loc = info.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_index = info[loc]\n result_index = maybe_droplevels(new_index, key)\n slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]\n new_values = self.values[slices]\n\n d = self._construct_axes_dict(self._AXIS_ORDERS[1:])\n d[self._info_axis_name] = result_index\n result = self._constructor(new_values, **d)\n return result\n else:\n return self._get_item_cache(key)\n\n def _init_matrix(self, data, axes, dtype=None, copy=False):\n values = self._prep_ndarray(self, data, copy=copy)\n\n if dtype is not None:\n try:\n values = values.astype(dtype)\n except Exception:\n raise ValueError('failed to cast to %s' % dtype)\n\n shape = values.shape\n fixed_axes = []\n for i, ax in enumerate(axes):\n if ax is None:\n ax = _default_index(shape[i])\n else:\n ax = _ensure_index(ax)\n fixed_axes.append(ax)\n\n return create_block_manager_from_blocks([values], fixed_axes)\n\n # ----------------------------------------------------------------------\n # Comparison methods\n\n def _compare_constructor(self, other, func):\n if not self._indexed_same(other):\n raise Exception('Can only compare identically-labeled '\n 'same type objects')\n\n new_data = {}\n for col in self._info_axis:\n new_data[col] = func(self[col], other[col])\n\n d = self._construct_axes_dict(copy=False)\n return self._constructor(data=new_data, **d)\n\n # ----------------------------------------------------------------------\n # Magic methods\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular Panel\n\n Invoked by unicode(df) in py2 only.\n Yields a Unicode String in both py2/py3.\n \"\"\"\n\n class_name = str(self.__class__)\n\n shape = self.shape\n dims = u('Dimensions: %s') % ' x '.join(\n [\"%d (%s)\" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])\n\n def axis_pretty(a):\n v = getattr(self, a)\n if len(v) > 0:\n return u('%s axis: %s to %s') % (a.capitalize(),\n pprint_thing(v[0]),\n pprint_thing(v[-1]))\n else:\n return u('%s axis: None') % a.capitalize()\n\n output = '\\n'.join(\n [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])\n return output\n\n def _get_plane_axes_index(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes indexes\n \"\"\"\n axis_name = self._get_axis_name(axis)\n\n if axis_name == 'major_axis':\n index = 'minor_axis'\n columns = 'items'\n if axis_name == 'minor_axis':\n index = 'major_axis'\n columns = 'items'\n elif axis_name == 'items':\n index = 'major_axis'\n columns = 'minor_axis'\n\n return index, columns\n\n def _get_plane_axes(self, axis):\n \"\"\"\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes\n \"\"\"\n return [self._get_axis(axi)\n for axi in self._get_plane_axes_index(axis)]\n\n fromDict = from_dict\n\n def to_sparse(self, *args, **kwargs):\n \"\"\"\n NOT IMPLEMENTED: do not call this method, as sparsifying is not\n supported for Panel objects and will raise an error.\n\n Convert to SparsePanel\n \"\"\"\n raise NotImplementedError(\"sparsifying is not supported \"\n \"for Panel objects\")\n\n def to_excel(self, path, na_rep='', engine=None, **kwargs):\n \"\"\"\n Write each DataFrame in Panel to a separate excel sheet\n\n Parameters\n ----------\n path : string or ExcelWriter object\n File path or existing ExcelWriter\n na_rep : string, default ''\n Missing data representation\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n\n Other Parameters\n ----------------\n float_format : string, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n\n Notes\n -----\n Keyword arguments (and na_rep) are passed to the ``to_excel`` method\n for each DataFrame written.\n \"\"\"\n from pandas.io.excel import ExcelWriter\n\n if isinstance(path, compat.string_types):\n writer = ExcelWriter(path, engine=engine)\n else:\n writer = path\n kwargs['na_rep'] = na_rep\n\n for item, df in self.iteritems():\n name = str(item)\n df.to_excel(writer, name, **kwargs)\n writer.save()\n\n def as_matrix(self):\n self._consolidate_inplace()\n return self._data.as_matrix()\n\n # ----------------------------------------------------------------------\n # Getting and setting elements\n\n def get_value(self, *args, **kwargs):\n \"\"\"\n Quickly retrieve single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n nargs = len(args)\n nreq = self._AXIS_LEN\n\n # require an arg for each axis\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis, you gave'\n ' {0} args, but {1} are required'.format(nargs,\n nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('get_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n return lower.get_value(*args[1:], takeable=takeable)\n\n def set_value(self, *args, **kwargs):\n \"\"\"\n Quickly set single value at (item, major, minor) location\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n value : scalar\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n panel : Panel\n If label combo is contained, will be reference to calling Panel,\n otherwise a new object\n \"\"\"\n # require an arg for each axis and the value\n nargs = len(args)\n nreq = self._AXIS_LEN + 1\n\n if nargs != nreq:\n raise TypeError('There must be an argument for each axis plus the '\n 'value provided, you gave {0} args, but {1} are '\n 'required'.format(nargs, nreq))\n takeable = kwargs.pop('takeable', None)\n\n if kwargs:\n raise TypeError('set_value() got an unexpected keyword '\n 'argument \"{0}\"'.format(list(kwargs.keys())[0]))\n\n try:\n if takeable is True:\n lower = self._iget_item_cache(args[0])\n else:\n lower = self._get_item_cache(args[0])\n\n lower.set_value(*args[1:], takeable=takeable)\n return self\n except KeyError:\n axes = self._expand_axes(args)\n d = self._construct_axes_dict_from(self, axes, copy=False)\n result = self.reindex(**d)\n args = list(args)\n likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])\n made_bigger = not np.array_equal(axes[0], self._info_axis)\n # how to make this logic simpler?\n if made_bigger:\n _possibly_cast_item(result, args[0], likely_dtype)\n\n return result.set_value(*args)\n\n def _box_item_values(self, key, values):\n if self.ndim == values.ndim:\n result = self._constructor(values)\n\n # a dup selection will yield a full ndim\n if result._get_axis(0).is_unique:\n result = result[key]\n\n return result\n\n d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])\n return self._constructor_sliced(values, **d)\n\n def __setitem__(self, key, value):\n key = com._apply_if_callable(key, self)\n shape = tuple(self.shape)\n if isinstance(value, self._constructor_sliced):\n value = value.reindex(\n **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))\n mat = value.values\n elif isinstance(value, np.ndarray):\n if value.shape != shape[1:]:\n raise ValueError('shape of value must be {0}, shape of given '\n 'object was {1}'.format(\n shape[1:], tuple(map(int, value.shape))))\n mat = np.asarray(value)\n elif is_scalar(value):\n dtype, value = _infer_dtype_from_scalar(value)\n mat = np.empty(shape[1:], dtype=dtype)\n mat.fill(value)\n else:\n raise TypeError('Cannot set item of type: %s' % str(type(value)))\n\n mat = mat.reshape(tuple([1]) + shape[1:])\n NDFrame._set_item(self, key, mat)\n\n def _unpickle_panel_compat(self, state): # pragma: no cover\n \"Unpickle the panel\"\n _unpickle = com._unpickle_array\n vals, items, major, minor = state\n\n items = _unpickle(items)\n major = _unpickle(major)\n minor = _unpickle(minor)\n values = _unpickle(vals)\n wp = Panel(values, items, major, minor)\n self._data = wp._data\n\n def conform(self, frame, axis='items'):\n \"\"\"\n Conform input DataFrame to align with chosen axis pair.\n\n Parameters\n ----------\n frame : DataFrame\n axis : {'items', 'major', 'minor'}\n\n Axis the input corresponds to. E.g., if axis='major', then\n the frame's columns would be items, and the index would be\n values of the minor axis\n\n Returns\n -------\n DataFrame\n \"\"\"\n axes = self._get_plane_axes(axis)\n return frame.reindex(**self._extract_axes_for_slice(self, axes))\n\n def head(self, n=5):\n raise NotImplementedError\n\n def tail(self, n=5):\n raise NotImplementedError\n\n def round(self, decimals=0, *args, **kwargs):\n \"\"\"\n Round each value in Panel to a specified number of decimal places.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n decimals : int\n Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of\n positions to the left of the decimal point.\n\n Returns\n -------\n Panel object\n\n See Also\n --------\n numpy.around\n \"\"\"\n nv.validate_round(args, kwargs)\n\n if is_integer(decimals):\n result = np.apply_along_axis(np.round, 0, self.values)\n return self._wrap_result(result, axis=0)\n raise TypeError(\"decimals must be an integer\")\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" don't allow a multi reindex on Panel or above ndim \"\"\"\n return False\n\n def align(self, other, **kwargs):\n raise NotImplementedError\n\n def dropna(self, axis=0, how='any', inplace=False):\n \"\"\"\n Drop 2D from panel, holding passed axis constant\n\n Parameters\n ----------\n axis : int, default 0\n Axis to hold constant. E.g. axis=1 will drop major_axis entries\n having a certain amount of NA data\n how : {'all', 'any'}, default 'any'\n 'any': one or more values are NA in the DataFrame along the\n axis. For 'all' they all must be.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : Panel\n \"\"\"\n axis = self._get_axis_number(axis)\n\n values = self.values\n mask = notnull(values)\n\n for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):\n mask = mask.sum(ax)\n\n per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])\n\n if how == 'all':\n cond = mask > 0\n else:\n cond = mask == per_slice\n\n new_ax = self._get_axis(axis)[cond]\n result = self.reindex_axis(new_ax, axis=axis)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _combine(self, other, func, axis=0):\n if isinstance(other, Panel):\n return self._combine_panel(other, func)\n elif isinstance(other, DataFrame):\n return self._combine_frame(other, func, axis=axis)\n elif is_scalar(other):\n return self._combine_const(other, func)\n else:\n raise NotImplementedError(\"%s is not supported in combine \"\n \"operation with %s\" %\n (str(type(other)), str(type(self))))\n\n def _combine_const(self, other, func):\n with np.errstate(all='ignore'):\n new_values = func(self.values, other)\n d = self._construct_axes_dict()\n return self._constructor(new_values, **d)\n\n def _combine_frame(self, other, func, axis=0):\n index, columns = self._get_plane_axes(axis)\n axis = self._get_axis_number(axis)\n\n other = other.reindex(index=index, columns=columns)\n\n with np.errstate(all='ignore'):\n if axis == 0:\n new_values = func(self.values, other.values)\n elif axis == 1:\n new_values = func(self.values.swapaxes(0, 1), other.values.T)\n new_values = new_values.swapaxes(0, 1)\n elif axis == 2:\n new_values = func(self.values.swapaxes(0, 2), other.values)\n new_values = new_values.swapaxes(0, 2)\n\n return self._constructor(new_values, self.items, self.major_axis,\n self.minor_axis)\n\n def _combine_panel(self, other, func):\n items = self.items.union(other.items)\n major = self.major_axis.union(other.major_axis)\n minor = self.minor_axis.union(other.minor_axis)\n\n # could check that everything's the same size, but forget it\n this = self.reindex(items=items, major=major, minor=minor)\n other = other.reindex(items=items, major=major, minor=minor)\n\n with np.errstate(all='ignore'):\n result_values = func(this.values, other.values)\n\n return self._constructor(result_values, items, major, minor)\n\n def major_xs(self, key):\n \"\"\"\n Return slice of panel along major axis\n\n Parameters\n ----------\n key : object\n Major axis label\n\n Returns\n -------\n y : DataFrame\n index -> minor axis, columns -> items\n\n Notes\n -----\n major_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of major_xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n return self.xs(key, axis=self._AXIS_LEN - 2)\n\n def minor_xs(self, key):\n \"\"\"\n Return slice of panel along minor axis\n\n Parameters\n ----------\n key : object\n Minor axis label\n\n Returns\n -------\n y : DataFrame\n index -> major axis, columns -> items\n\n Notes\n -----\n minor_xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of minor_xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n return self.xs(key, axis=self._AXIS_LEN - 1)\n\n def xs(self, key, axis=1):\n \"\"\"\n Return slice of panel along selected axis\n\n Parameters\n ----------\n key : object\n Label\n axis : {'items', 'major', 'minor}, default 1/'major'\n\n Returns\n -------\n y : ndim(self)-1\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n\n \"\"\"\n axis = self._get_axis_number(axis)\n if axis == 0:\n return self[key]\n\n self._consolidate_inplace()\n axis_number = self._get_axis_number(axis)\n new_data = self._data.xs(key, axis=axis_number, copy=False)\n result = self._construct_return_type(new_data)\n copy = new_data.is_mixed_type\n result._set_is_copy(self, copy=copy)\n return result\n\n _xs = xs\n\n def _ixs(self, i, axis=0):\n \"\"\"\n i : int, slice, or sequence of integers\n axis : int\n \"\"\"\n\n ax = self._get_axis(axis)\n key = ax[i]\n\n # xs cannot handle a non-scalar key, so just reindex here\n # if we have a multi-index and a single tuple, then its a reduction\n # (GH 7516)\n if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):\n if is_list_like(key):\n indexer = {self._get_axis_name(axis): key}\n return self.reindex(**indexer)\n\n # a reduction\n if axis == 0:\n values = self._data.iget(i)\n return self._box_item_values(key, values)\n\n # xs by position\n self._consolidate_inplace()\n new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)\n return self._construct_return_type(new_data)\n\n def groupby(self, function, axis='major'):\n \"\"\"\n Group data on given axis, returning GroupBy object\n\n Parameters\n ----------\n function : callable\n Mapping function for chosen access\n axis : {'major', 'minor', 'items'}, default 'major'\n\n Returns\n -------\n grouped : PanelGroupBy\n \"\"\"\n from pandas.core.groupby import PanelGroupBy\n axis = self._get_axis_number(axis)\n return PanelGroupBy(self, function, axis=axis)\n\n def to_frame(self, filter_observations=True):\n \"\"\"\n Transform wide format into long (stacked) format as DataFrame whose\n columns are the Panel's items and whose index is a MultiIndex formed\n of the Panel's major and minor axes.\n\n Parameters\n ----------\n filter_observations : boolean, default True\n Drop (major, minor) pairs without a complete set of observations\n across all the items\n\n Returns\n -------\n y : DataFrame\n \"\"\"\n _, N, K = self.shape\n\n if filter_observations:\n # shaped like the return DataFrame\n mask = notnull(self.values).all(axis=0)\n # size = mask.sum()\n selector = mask.ravel()\n else:\n # size = N * K\n selector = slice(None, None)\n\n data = {}\n for item in self.items:\n data[item] = self[item].values.ravel()[selector]\n\n def construct_multi_parts(idx, n_repeat, n_shuffle=1):\n axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)\n labels = [x[selector] for x in axis_idx.labels]\n levels = axis_idx.levels\n names = axis_idx.names\n return labels, levels, names\n\n def construct_index_parts(idx, major=True):\n levels = [idx]\n if major:\n labels = [np.arange(N).repeat(K)[selector]]\n names = idx.name or 'major'\n else:\n labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]\n labels = [labels.ravel()[selector]]\n names = idx.name or 'minor'\n names = [names]\n return labels, levels, names\n\n if isinstance(self.major_axis, MultiIndex):\n major_labels, major_levels, major_names = construct_multi_parts(\n self.major_axis, n_repeat=K)\n else:\n major_labels, major_levels, major_names = construct_index_parts(\n self.major_axis)\n\n if isinstance(self.minor_axis, MultiIndex):\n minor_labels, minor_levels, minor_names = construct_multi_parts(\n self.minor_axis, n_repeat=N, n_shuffle=K)\n else:\n minor_labels, minor_levels, minor_names = construct_index_parts(\n self.minor_axis, major=False)\n\n levels = major_levels + minor_levels\n labels = major_labels + minor_labels\n names = major_names + minor_names\n\n index = MultiIndex(levels=levels, labels=labels, names=names,\n verify_integrity=False)\n\n return DataFrame(data, index=index, columns=self.items)\n\n to_long = deprecate('to_long', to_frame)\n toLong = deprecate('toLong', to_frame)\n\n def apply(self, func, axis='major', **kwargs):\n \"\"\"\n Applies function along axis (or axes) of the Panel\n\n Parameters\n ----------\n func : function\n Function to apply to each combination of 'other' axes\n e.g. if axis = 'items', the combination of major_axis/minor_axis\n will each be passed as a Series; if axis = ('items', 'major'),\n DataFrames of items & major axis will be passed\n axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two\n axes\n Additional keyword arguments will be passed as keywords to the function\n\n Examples\n --------\n\n Returns a Panel with the square root of each element\n\n >>> p = pd.Panel(np.random.rand(4,3,2))\n >>> p.apply(np.sqrt)\n\n Equivalent to p.sum(1), returning a DataFrame\n\n >>> p.apply(lambda x: x.sum(), axis=1)\n\n Equivalent to previous:\n\n >>> p.apply(lambda x: x.sum(), axis='minor')\n\n Return the shapes of each DataFrame over axis 2 (i.e the shapes of\n items x major), as a Series\n\n >>> p.apply(lambda x: x.shape, axis=(0,1))\n\n Returns\n -------\n result : Panel, DataFrame, or Series\n \"\"\"\n\n if kwargs and not isinstance(func, np.ufunc):\n f = lambda x: func(x, **kwargs)\n else:\n f = func\n\n # 2d-slabs\n if isinstance(axis, (tuple, list)) and len(axis) == 2:\n return self._apply_2d(f, axis=axis)\n\n axis = self._get_axis_number(axis)\n\n # try ufunc like\n if isinstance(f, np.ufunc):\n try:\n with np.errstate(all='ignore'):\n result = np.apply_along_axis(func, axis, self.values)\n return self._wrap_result(result, axis=axis)\n except (AttributeError):\n pass\n\n # 1d\n return self._apply_1d(f, axis=axis)\n\n def _apply_1d(self, func, axis):\n\n axis_name = self._get_axis_name(axis)\n ndim = self.ndim\n values = self.values\n\n # iter thru the axes\n slice_axis = self._get_axis(axis)\n slice_indexer = [0] * (ndim - 1)\n indexer = np.zeros(ndim, 'O')\n indlist = list(range(ndim))\n indlist.remove(axis)\n indexer[axis] = slice(None, None)\n indexer.put(indlist, slice_indexer)\n planes = [self._get_axis(axi) for axi in indlist]\n shape = np.array(self.shape).take(indlist)\n\n # all the iteration points\n points = cartesian_product(planes)\n\n results = []\n for i in range(np.prod(shape)):\n\n # construct the object\n pts = tuple([p[i] for p in points])\n indexer.put(indlist, slice_indexer)\n\n obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)\n result = func(obj)\n\n results.append(result)\n\n # increment the indexer\n slice_indexer[-1] += 1\n n = -1\n while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):\n slice_indexer[n - 1] += 1\n slice_indexer[n] = 0\n n -= 1\n\n # empty object\n if not len(results):\n return self._constructor(**self._construct_axes_dict())\n\n # same ndim as current\n if isinstance(results[0], Series):\n arr = np.vstack([r.values for r in results])\n arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))\n tranp = np.array([axis] + indlist).argsort()\n arr = arr.transpose(tuple(list(tranp)))\n return self._constructor(arr, **self._construct_axes_dict())\n\n # ndim-1 shape\n results = np.array(results).reshape(shape)\n if results.ndim == 2 and axis_name != self._info_axis_name:\n results = results.T\n planes = planes[::-1]\n return self._construct_return_type(results, planes)\n\n def _apply_2d(self, func, axis):\n \"\"\" handle 2-d slices, equiv to iterating over the other axis \"\"\"\n\n ndim = self.ndim\n axis = [self._get_axis_number(a) for a in axis]\n\n # construct slabs, in 2-d this is a DataFrame result\n indexer_axis = list(range(ndim))\n for a in axis:\n indexer_axis.remove(a)\n indexer_axis = indexer_axis[0]\n\n slicer = [slice(None, None)] * ndim\n ax = self._get_axis(indexer_axis)\n\n results = []\n for i, e in enumerate(ax):\n slicer[indexer_axis] = i\n sliced = self.iloc[tuple(slicer)]\n\n obj = func(sliced)\n results.append((e, obj))\n\n return self._construct_return_type(dict(results))\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n if numeric_only:\n raise NotImplementedError('Panel.{0} does not implement '\n 'numeric_only.'.format(name))\n\n axis_name = self._get_axis_name(axis)\n axis_number = self._get_axis_number(axis_name)\n f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)\n\n with np.errstate(all='ignore'):\n result = f(self.values)\n\n axes = self._get_plane_axes(axis_name)\n if result.ndim == 2 and axis_name != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n def _construct_return_type(self, result, axes=None):\n \"\"\" return the type for the ndim of the result \"\"\"\n ndim = getattr(result, 'ndim', None)\n\n # need to assume they are the same\n if ndim is None:\n if isinstance(result, dict):\n ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)\n\n # have a dict, so top-level is +1 dim\n if ndim != 0:\n ndim += 1\n\n # scalar\n if ndim == 0:\n return Series(result)\n\n # same as self\n elif self.ndim == ndim:\n # return the construction dictionary for these axes\n if axes is None:\n return self._constructor(result)\n return self._constructor(result, **self._construct_axes_dict())\n\n # sliced\n elif self.ndim == ndim + 1:\n if axes is None:\n return self._constructor_sliced(result)\n return self._constructor_sliced(\n result, **self._extract_axes_for_slice(self, axes))\n\n raise PandasError('invalid _construct_return_type [self->%s] '\n '[result->%s]' % (self, result))\n\n def _wrap_result(self, result, axis):\n axis = self._get_axis_name(axis)\n axes = self._get_plane_axes(axis)\n if result.ndim == 2 and axis != self._info_axis_name:\n result = result.T\n\n return self._construct_return_type(result, axes)\n\n @Appender(_shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None else\n kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None else\n kwargs.pop('minor', None))\n return super(Panel, self).reindex(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):\n major_axis = (major_axis if major_axis is not None else\n kwargs.pop('major', None))\n minor_axis = (minor_axis if minor_axis is not None else\n kwargs.pop('minor', None))\n return super(Panel, self).rename(items=items, major_axis=major_axis,\n minor_axis=minor_axis, **kwargs)\n\n @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\n def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,\n limit=None, fill_value=np.nan):\n return super(Panel, self).reindex_axis(labels=labels, axis=axis,\n method=method, level=level,\n copy=copy, limit=limit,\n fill_value=fill_value)\n\n @Appender(_shared_docs['transpose'] % _shared_doc_kwargs)\n def transpose(self, *args, **kwargs):\n # check if a list of axes was passed in instead as a\n # single *args element\n if (len(args) == 1 and hasattr(args[0], '__iter__') and\n not is_string_like(args[0])):\n axes = args[0]\n else:\n axes = args\n\n if 'axes' in kwargs and axes:\n raise TypeError(\"transpose() got multiple values for \"\n \"keyword argument 'axes'\")\n elif not axes:\n axes = kwargs.pop('axes', ())\n\n return super(Panel, self).transpose(*axes, **kwargs)\n\n @Appender(_shared_docs['fillna'] % _shared_doc_kwargs)\n def fillna(self, value=None, method=None, axis=None, inplace=False,\n limit=None, downcast=None, **kwargs):\n return super(Panel, self).fillna(value=value, method=method, axis=axis,\n inplace=inplace, limit=limit,\n downcast=downcast, **kwargs)\n\n def count(self, axis='major'):\n \"\"\"\n Return number of observations over requested axis.\n\n Parameters\n ----------\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n count : DataFrame\n \"\"\"\n i = self._get_axis_number(axis)\n\n values = self.values\n mask = np.isfinite(values)\n result = mask.sum(axis=i, dtype='int64')\n\n return self._wrap_result(result, axis)\n\n def shift(self, periods=1, freq=None, axis='major'):\n \"\"\"\n Shift index by desired number of periods with an optional time freq.\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original. This is different\n from the behavior of DataFrame.shift()\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, optional\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n shifted : Panel\n \"\"\"\n if freq:\n return self.tshift(periods, freq, axis=axis)\n\n return super(Panel, self).slice_shift(periods, axis=axis)\n\n def tshift(self, periods=1, freq=None, axis='major'):\n return super(Panel, self).tshift(periods, freq, axis)\n\n def join(self, other, how='left', lsuffix='', rsuffix=''):\n \"\"\"\n Join items with other Panel either on major and minor axes column\n\n Parameters\n ----------\n other : Panel or list of Panels\n Index should be similar to one of the columns in this one\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n\n Returns\n -------\n joined : Panel\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(other, Panel):\n join_major, join_minor = self._get_join_index(other, how)\n this = self.reindex(major=join_major, minor=join_minor)\n other = other.reindex(major=join_major, minor=join_minor)\n merged_data = this._data.merge(other._data, lsuffix, rsuffix)\n return self._constructor(merged_data)\n else:\n if lsuffix or rsuffix:\n raise ValueError('Suffixes not supported when passing '\n 'multiple panels')\n\n if how == 'left':\n how = 'outer'\n join_axes = [self.major_axis, self.minor_axis]\n elif how == 'right':\n raise ValueError('Right join not supported with multiple '\n 'panels')\n else:\n join_axes = None\n\n return concat([self] + list(other), axis=0, join=how,\n join_axes=join_axes, verify_integrity=True)\n\n def update(self, other, join='left', overwrite=True, filter_func=None,\n raise_conflict=False):\n \"\"\"\n Modify Panel in place using non-NA values from passed\n Panel, or object coercible to Panel. Aligns on items\n\n Parameters\n ----------\n other : Panel, or object coercible to Panel\n join : How to join individual DataFrames\n {'left', 'right', 'outer', 'inner'}, default 'left'\n overwrite : boolean, default True\n If True then overwrite values for common keys in the calling panel\n filter_func : callable(1d-array) -> 1d-array<boolean>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated\n raise_conflict : bool\n If True, will raise an error if a DataFrame and other both\n contain data in the same place.\n \"\"\"\n\n if not isinstance(other, self._constructor):\n other = self._constructor(other)\n\n axis_name = self._info_axis_name\n axis_values = self._info_axis\n other = other.reindex(**{axis_name: axis_values})\n\n for frame in axis_values:\n self[frame].update(other[frame], join, overwrite, filter_func,\n raise_conflict)\n\n def _get_join_index(self, other, how):\n if how == 'left':\n join_major, join_minor = self.major_axis, self.minor_axis\n elif how == 'right':\n join_major, join_minor = other.major_axis, other.minor_axis\n elif how == 'inner':\n join_major = self.major_axis.intersection(other.major_axis)\n join_minor = self.minor_axis.intersection(other.minor_axis)\n elif how == 'outer':\n join_major = self.major_axis.union(other.major_axis)\n join_minor = self.minor_axis.union(other.minor_axis)\n return join_major, join_minor\n\n # miscellaneous data creation\n @staticmethod\n def _extract_axes(self, data, axes, **kwargs):\n \"\"\" return a list of the axis indicies \"\"\"\n return [self._extract_axis(self, data, axis=i, **kwargs)\n for i, a in enumerate(axes)]\n\n @staticmethod\n def _extract_axes_for_slice(self, axes):\n \"\"\" return the slice dictionary for these axes \"\"\"\n return dict([(self._AXIS_SLICEMAP[i], a)\n for i, a in zip(\n self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],\n axes)])\n\n @staticmethod\n def _prep_ndarray(self, values, copy=True):\n if not isinstance(values, np.ndarray):\n values = np.asarray(values)\n # NumPy strings are a pain, convert to object\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype=object, copy=True)\n else:\n if copy:\n values = values.copy()\n if values.ndim != self._AXIS_LEN:\n raise ValueError(\"The number of dimensions required is {0}, \"\n \"but the number of dimensions of the \"\n \"ndarray given was {1}\".format(self._AXIS_LEN,\n values.ndim))\n return values\n\n @staticmethod\n def _homogenize_dict(self, frames, intersect=True, dtype=None):\n \"\"\"\n Conform set of _constructor_sliced-like objects to either\n an intersection of indices / columns or a union.\n\n Parameters\n ----------\n frames : dict\n intersect : boolean, default True\n\n Returns\n -------\n dict of aligned results & indicies\n \"\"\"\n\n result = dict()\n # caller differs dict/ODict, presered type\n if isinstance(frames, OrderedDict):\n result = OrderedDict()\n\n adj_frames = OrderedDict()\n for k, v in compat.iteritems(frames):\n if isinstance(v, dict):\n adj_frames[k] = self._constructor_sliced(v)\n else:\n adj_frames[k] = v\n\n axes = self._AXIS_ORDERS[1:]\n axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(\n self, adj_frames, axes, intersect=intersect))])\n\n reindex_dict = dict(\n [(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])\n reindex_dict['copy'] = False\n for key, frame in compat.iteritems(adj_frames):\n if frame is not None:\n result[key] = frame.reindex(**reindex_dict)\n else:\n result[key] = None\n\n axes_dict['data'] = result\n axes_dict['dtype'] = dtype\n return axes_dict\n\n @staticmethod\n def _extract_axis(self, data, axis=0, intersect=False):\n\n index = None\n if len(data) == 0:\n index = Index([])\n elif len(data) > 0:\n raw_lengths = []\n indexes = []\n\n have_raw_arrays = False\n have_frames = False\n\n for v in data.values():\n if isinstance(v, self._constructor_sliced):\n have_frames = True\n indexes.append(v._get_axis(axis))\n elif v is not None:\n have_raw_arrays = True\n raw_lengths.append(v.shape[axis])\n\n if have_frames:\n index = _get_combined_index(indexes, intersect=intersect)\n\n if have_raw_arrays:\n lengths = list(set(raw_lengths))\n if len(lengths) > 1:\n raise ValueError('ndarrays must match shape on axis %d' % axis)\n\n if have_frames:\n if lengths[0] != len(index):\n raise AssertionError('Length of data and index must match')\n else:\n index = Index(np.arange(lengths[0]))\n\n if index is None:\n index = Index([])\n\n return _ensure_index(index)\n\n @classmethod\n def _add_aggregate_operations(cls, use_numexpr=True):\n \"\"\" add the operations to the cls; evaluate the doc strings again \"\"\"\n\n # doc strings substitors\n _agg_doc = \"\"\"\nWrapper method for %%s\n\nParameters\n----------\nother : %s or %s\"\"\" % (cls._constructor_sliced.__name__, cls.__name__) + \"\"\"\naxis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\n Axis to broadcast over\n\nReturns\n-------\n\"\"\" + cls.__name__ + \"\\n\"\n\n def _panel_arith_method(op, name, str_rep=None, default_axis=None,\n fill_zeros=None, **eval_kwargs):\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True,\n **eval_kwargs)\n except TypeError:\n result = op(x, y)\n\n # handles discrepancy between numpy and numexpr on division/mod\n # by 0 though, given that these are generally (always?)\n # non-scalars, I'm not sure whether it's worth it at the moment\n result = missing.fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n if name in _op_descriptions:\n op_name = name.replace('__', '')\n op_desc = _op_descriptions[op_name]\n if op_desc['reversed']:\n equiv = 'other ' + op_desc['op'] + ' panel'\n else:\n equiv = 'panel ' + op_desc['op'] + ' other'\n\n _op_doc = \"\"\"\n %%s of series and other, element-wise (binary operator `%%s`).\n Equivalent to ``%%s``.\n\n Parameters\n ----------\n other : %s or %s\"\"\" % (cls._constructor_sliced.__name__,\n cls.__name__) + \"\"\"\n axis : {\"\"\" + ', '.join(cls._AXIS_ORDERS) + \"}\" + \"\"\"\n Axis to broadcast over\n\n Returns\n -------\n \"\"\" + cls.__name__ + \"\"\"\n\n See also\n --------\n \"\"\" + cls.__name__ + \".%s\\n\"\n doc = _op_doc % (op_desc['desc'], op_name, equiv,\n op_desc['reverse'])\n else:\n doc = _agg_doc % name\n\n @Appender(doc)\n def f(self, other, axis=0):\n return self._combine(other, na_op, axis=axis)\n\n f.__name__ = name\n return f\n\n # add `div`, `mul`, `pow`, etc..\n ops.add_flex_arithmetic_methods(\n cls, _panel_arith_method, use_numexpr=use_numexpr,\n flex_comp_method=ops._comp_method_PANEL)\n\n\nPanel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,\n stat_axis=1, aliases={'major': 'major_axis',\n 'minor': 'minor_axis'},\n slicers={'major_axis': 'index',\n 'minor_axis': 'columns'})\n\nops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)\nPanel._add_aggregate_operations()\nPanel._add_numeric_operations()\n\n\n# legacy\nclass WidePanel(Panel):\n def __init__(self, *args, **kwargs):\n # deprecation, #10892\n warnings.warn(\"WidePanel is deprecated. Please use Panel\",\n FutureWarning, stacklevel=2)\n\n super(WidePanel, self).__init__(*args, **kwargs)\n\n\nclass LongPanel(DataFrame):\n def __init__(self, *args, **kwargs):\n # deprecation, #10892\n warnings.warn(\"LongPanel is deprecated. Please use DataFrame\",\n FutureWarning, stacklevel=2)\n\n super(LongPanel, self).__init__(*args, **kwargs)\n",
"from __future__ import division, absolute_import, print_function\n\nimport itertools\n\nimport numpy as np\nfrom numpy import exp\nfrom numpy.testing import assert_, assert_equal\n\nfrom scipy.optimize import root\n\n\ndef test_performance():\n # Compare performance results to those listed in\n # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]\n # and\n # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].\n # and those produced by dfsane.f from M. Raydan's website.\n #\n # Where the results disagree, the largest limits are taken.\n\n e_a = 1e-5\n e_r = 1e-4\n\n table_1 = [\n dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),\n dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),\n dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),\n dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),\n # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors\n dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3\n dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?\n dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?\n dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?\n dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),\n dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12\n ]\n\n # Check also scaling invariance\n for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],\n ['cruz', 'cheng']):\n for problem in table_1:\n n = problem['n']\n func = lambda x, n: yscale*problem['F'](x/xscale, n)\n args = (n,)\n x0 = problem['x0'](n) * xscale\n\n fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))\n\n sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)\n sigma_0 = xscale/yscale\n\n with np.errstate(over='ignore'):\n sol = root(func, x0, args=args,\n options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,\n sigma_0=sigma_0, sigma_eps=sigma_eps,\n line_search=line_search),\n method='DF-SANE')\n\n err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),\n fatol, sol.success, sol.nit, sol.nfev])\n assert_(sol.success, err_msg)\n assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval\n assert_(sol.nit <= problem['nit'], err_msg)\n assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)\n\n\ndef test_complex():\n def func(z):\n return z**2 - 1 + 2j\n x0 = 2.0j\n\n ftol = 1e-4\n sol = root(func, x0, tol=ftol, method='DF-SANE')\n\n assert_(sol.success)\n\n f0 = np.linalg.norm(func(x0))\n fx = np.linalg.norm(func(sol.x))\n assert_(fx <= ftol*f0)\n\n\ndef test_linear_definite():\n # The DF-SANE paper proves convergence for \"strongly isolated\"\n # solutions.\n #\n # For linear systems F(x) = A x - b = 0, with A positive or\n # negative definite, the solution is strongly isolated.\n\n def check_solvability(A, b, line_search='cruz'):\n func = lambda x: A.dot(x) - b\n xp = np.linalg.solve(A, b)\n eps = np.linalg.norm(func(xp)) * 1e3\n sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),\n method='DF-SANE')\n assert_(sol.success)\n assert_(np.linalg.norm(func(sol.x)) <= eps)\n\n n = 90\n\n # Test linear pos.def. system\n np.random.seed(1234)\n A = np.arange(n*n).reshape(n, n)\n A = A + n*n * np.diag(1 + np.arange(n))\n assert_(np.linalg.eigvals(A).min() > 0)\n b = np.arange(n) * 1.0\n check_solvability(A, b, 'cruz')\n check_solvability(A, b, 'cheng')\n\n # Test linear neg.def. system\n check_solvability(-A, b, 'cruz')\n check_solvability(-A, b, 'cheng')\n\n\ndef test_shape():\n def f(x, arg):\n return x - arg\n\n for dt in [float, complex]:\n x = np.zeros([2,2])\n arg = np.ones([2,2], dtype=dt)\n\n sol = root(f, x, args=(arg,), method='DF-SANE')\n assert_(sol.success)\n assert_equal(sol.x.shape, x.shape)\n\n\n# Some of the test functions and initial guesses listed in\n# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]\n\ndef F_1(x, n):\n g = np.zeros([n])\n i = np.arange(2, n+1)\n g[0] = exp(x[0] - 1) - 1\n g[1:] = i*(exp(x[1:] - 1) - x[1:])\n return g\n\ndef x0_1(n):\n x0 = np.empty([n])\n x0.fill(n/(n-1))\n return x0\n\ndef F_2(x, n):\n g = np.zeros([n])\n i = np.arange(2, n+1)\n g[0] = exp(x[0]) - 1\n g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)\n return g\n\ndef x0_2(n):\n x0 = np.empty([n])\n x0.fill(1/n**2)\n return x0\n\ndef F_4(x, n):\n assert_equal(n % 3, 0)\n g = np.zeros([n])\n # Note: the first line is typoed in some of the references;\n # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]\n g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8\n g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16\n g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3\n return g\n\ndef x0_4(n):\n assert_equal(n % 3, 0)\n x0 = np.array([-1, 1/2, -1] * (n//3))\n return x0\n\ndef F_6(x, n):\n c = 0.9\n mu = (np.arange(1, n+1) - 0.5)/n\n return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))\n\ndef x0_6(n):\n return np.ones([n])\n\ndef F_7(x, n):\n assert_equal(n % 3, 0)\n\n def phi(t):\n v = 0.5*t - 2\n v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]\n v[t >= 2] = (0.5*t + 2)[t >= 2]\n return v\n g = np.zeros([n])\n g[::3] = 1e4 * x[1::3]**2 - 1\n g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001\n g[2::3] = phi(x[2::3])\n return g\n\ndef x0_7(n):\n assert_equal(n % 3, 0)\n return np.array([1e-3, 18, 1] * (n//3))\n\ndef F_9(x, n):\n g = np.zeros([n])\n i = np.arange(2, n)\n g[0] = x[0]**3/3 + x[1]**2/2\n g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2\n g[-1] = -x[-1]**2/2 + n*x[-1]**3/3\n return g\n\ndef x0_9(n):\n return np.ones([n])\n\ndef F_10(x, n):\n return np.log(1 + x) - x/n\n\ndef x0_10(n):\n return np.ones([n])\n"
] | [
[
"pandas.core.groupby.PanelGroupBy",
"pandas.types.common.is_integer",
"pandas.compat.iteritems",
"pandas.core.generic.NDFrame._set_item",
"pandas.compat.map",
"pandas.core.common._default_index",
"pandas.core.internals.create_block_manager_from_arrays",
"numpy.asarray",
"pandas.core.missing.fill_zeros",
"pandas.types.cast._possibly_cast_item",
"pandas.compat.zip",
"pandas.types.missing.notnull",
"numpy.apply_along_axis",
"numpy.isfinite",
"numpy.vstack",
"pandas.core.index.MultiIndex.from_arrays",
"pandas.compat.numpy.function.validate_round",
"pandas.compat.itervalues",
"pandas.core.common.PandasError",
"pandas.compat.OrderedDefaultdict",
"pandas.util.decorators.Appender",
"pandas.types.common.is_list_like",
"pandas.core.index.MultiIndex",
"pandas.types.common.is_scalar",
"pandas.core.generic.NDFrame.__init__",
"numpy.unique",
"pandas.core.index._ensure_index",
"pandas.core.ops.add_special_arithmetic_methods",
"numpy.zeros",
"pandas.core.common._try_sort",
"pandas.types.cast._infer_dtype_from_scalar",
"pandas.core.indexing.maybe_droplevels",
"pandas.util.decorators.deprecate",
"numpy.arange",
"pandas.formats.printing.pprint_thing",
"numpy.prod",
"pandas.types.common.is_string_like",
"pandas.compat.u",
"pandas.core.ops.add_flex_arithmetic_methods",
"numpy.empty",
"pandas.core.common._apply_if_callable",
"pandas.compat.OrderedDict",
"pandas.core.series.Series",
"pandas.computation.expressions.evaluate",
"pandas.core.index.Index",
"numpy.errstate",
"pandas.core.index._get_combined_index",
"pandas.compat.range",
"pandas.tools.util.cartesian_product",
"numpy.array_equal",
"numpy.array",
"pandas.core.frame.DataFrame",
"pandas.core.internals.create_block_manager_from_blocks",
"pandas.io.excel.ExcelWriter"
],
[
"numpy.sqrt",
"numpy.ones",
"numpy.linalg.solve",
"numpy.empty",
"numpy.zeros",
"numpy.testing.assert_equal",
"scipy.optimize.root",
"numpy.random.seed",
"numpy.exp",
"numpy.errstate",
"numpy.arange",
"numpy.linalg.eigvals",
"numpy.log",
"numpy.array",
"numpy.testing.assert_"
]
] |
eric-czech/FaST-LMM | [
"497ac732f0cb25e328282cff42045afb70a99076"
] | [
"fastlmm/inference/fastlmm_predictor.py"
] | [
"from __future__ import print_function #Python 2 & 3 compatibility\nfrom __future__ import absolute_import\nimport numpy as np\nimport logging\nimport unittest\nimport os\nimport scipy.linalg as LA\nimport time\n\nfrom pysnptools.snpreader import Bed,Pheno\nfrom pysnptools.snpreader import SnpData,SnpReader\nfrom pysnptools.kernelreader import KernelNpz\nfrom pysnptools.kernelreader import SnpKernel\nfrom pysnptools.kernelreader import KernelReader\nfrom pysnptools.kernelreader import Identity as KernelIdentity\nimport pysnptools.util as pstutil\nfrom pysnptools.standardizer import DiagKtoN,UnitTrained\nfrom pysnptools.standardizer import Unit\nfrom pysnptools.util import intersect_apply\nfrom pysnptools.standardizer import Standardizer\nfrom fastlmm.inference.lmm import LMM\nfrom pysnptools.standardizer import Identity as StandardizerIdentity\nfrom scipy.stats import multivariate_normal\nfrom fastlmm.util.pickle_io import load, save\nfrom pysnptools.pstreader import PstReader\nfrom six.moves import range\n\nclass _SnpWholeTest(KernelReader):\n '''\n Warning: Assumes that if train and test contains the same iid, they have the same value.\n '''\n def __init__(self,train,test,standardizer,block_size,iid0=None):\n self.train = train\n self.test = test\n self.standardizer = standardizer\n assert standardizer.is_constant, \"Expect standardizer to be constant\"\n self.block_size = block_size\n if iid0 is not None:\n _row = iid0\n\n @property\n def row(self):\n if not hasattr(self,'_row'):\n assert np.array_equal(self.train.sid,self.test.sid), \"Expect train and test to have same sid in same order\"\n train_set = set(tuple(item) for item in self.train.iid)\n test_unique = [item2 for item2 in (tuple(item) for item in self.test.iid) if item2 not in train_set]\n self._row = np.r_[self.train.iid,np.array(test_unique,dtype='str').reshape(-1,2)]\n return self._row\n\n\n @property\n def col(self):\n return self.test.iid\n\n def __getitem__(self, iid_indexer_and_snp_indexer):\n if isinstance(iid_indexer_and_snp_indexer,tuple):\n iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer\n else:\n iid0_indexer = iid_indexer_and_snp_indexer\n iid1_indexer = iid0_indexer\n\n row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.row_count, iid0_indexer)\n col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.col_count, iid1_indexer)\n\n if row_index_or_none is None:\n row_index_or_none = list(range(self.row_count))\n\n assert not isinstance(row_index_or_none,str), \"row_index_or_none should not be a string\"\n iid = self.row[row_index_or_none]\n\n if col_index_or_none is None or np.array_equal(col_index_or_none,list(range(self.col_count))):\n test = self.test\n else:\n test = self.test[col_index_or_none]\n \n try: #case 1: asking for train x test\n train = self.train[self.train.iid_to_index(iid),:]\n is_ok = True\n except:\n is_ok = False\n if is_ok:\n return _SnpTrainTest(train=train,test=test,standardizer=self.standardizer,block_size=self.block_size)\n\n #case 2: asking for train x test\n if np.array_equal(test.iid,iid):\n return SnpKernel(test,standardizer=self.standardizer,block_size=self.block_size)\n\n #case 3: Just re-reordering the iids\n if len(row_index_or_none) == self.row_count and (col_index_or_none is None or len(col_index_or_none) == self.col_count):\n result = _SnpWholeTest(train=self.train,test=test,standardizer=self.standardizer,block_size=self.block_size,iid0=iid)\n return result\n\n \n raise Exception(\"When reading from a _SnpWholeTest, can only ask to reorder iids or to access from train x test or test x test\")\n\n\n #!!! does it make sense to read from disk in to parts?\n def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):\n result = self[row_index_or_none,col_index_or_none]._read(row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok)\n return result\n\n def __repr__(self):\n s = \"_SnpWholeTest(train={0},test={1},standardizer={2}\".format(self.train,self.test,self.standardizer)\n if self.block_size is not None:\n s += \",block_size={0}\".format(self.block_size)\n s += \")\"\n return s\n\n def copyinputs(self, copier):\n #Doesn't need run_once\n copier.input(self.train)\n copier.input(self.test)\n copier.input(self.standardizer)\n\nclass _SnpTrainTest(KernelReader):\n def __init__(self,train,test,standardizer,block_size):\n self.train = train\n self.test = test\n self.standardizer = standardizer\n assert standardizer.is_constant, \"Expect standardizer to be constant\"\n self.block_size = block_size\n if np.array_equal(train.iid,test.iid):\n self._col = train.iid\n else:\n self._col = test.iid\n\n @property\n def row(self):\n return self.train.iid\n\n @property\n def col(self):\n return self._col\n\n def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):\n assert self.train.sid_count == self.test.sid_count, \"real assert\"\n #case 1: asking for all of train x test\n if (row_index_or_none is None or np.array_equal(row_index_or_none,np.arange(self.row_count))\n and col_index_or_none is None or np.array_equal(col_index_or_none,np.arange(self.col_count))):\n\n #Do all-at-once (not in blocks) if 1. No block size is given or 2. The #ofSNPs < Min(block_size,iid_count) #similar code elsewhere\n if self.block_size is None or (self.train.sid_count <= self.block_size or self.train.sid_count <= self.train.iid_count+self.test.iid_count):\n train_snps = self.train.read(dtype=dtype).standardize(self.standardizer)\n test_snps = self.test.read(dtype=dtype).standardize(self.standardizer)\n if order == 'F': #numpy's 'dot' always returns 'C' order\n k_val = test_snps.val.dot(train_snps.val.T).T\n else:\n k_val = train_snps.val.dot(test_snps.val.T)\n return k_val\n else: #Do in blocks\n #Set the default order to 'C' because with kernels any order is fine and the Python .dot method likes 'C' best.\n if order=='A':\n order = 'C'\n k_val = np.zeros([self.train.iid_count,self.test.iid_count],dtype=dtype,order=order)\n ct = 0\n ts = time.time()\n\n for start in range(0, self.train.sid_count, self.block_size):\n ct += self.block_size\n train_snps = self.train[:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)\n test_snps = self.test [:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)\n if order == 'F': #numpy's 'dot' always returns 'C' order\n k_val += test_snps.val.dot(train_snps.val.T).T\n else:\n k_val += train_snps.val.dot(test_snps.val.T)\n if ct % self.block_size==0:\n diff = time.time()-ts\n if diff > 1: logging.info(\"read %s SNPs in %.2f seconds\" % (ct, diff))\n return k_val\n else:\n raise Exception(\"_SnpTrainTest currently only has code for reading all of train x test\")\n\n\n def __repr__(self):\n s = \"_SnpTrainTest(train={0},test={1},standardizer={2}\".format(self.train,self.test,self.standardizer)\n if self.block_size is not None:\n s += \",block_size={0}\".format(self.block_size)\n s += \")\"\n return s\n\n def copyinputs(self, copier):\n #Doesn't need run_once\n copier.input(self.train)\n copier.input(self.test)\n copier.input(self.standardizer)\n\ndef _snps_fixup(snp_input, iid_if_none=None,count_A1=None):\n from pysnptools.snpreader import _snps_fixup as pst_snps_fixup\n return pst_snps_fixup(snp_input,iid_if_none,count_A1)\n\ndef _pheno_fixup(pheno_input, iid_if_none=None, missing ='NaN',count_A1=None):\n\n try:\n ret = Pheno(pheno_input, iid_if_none, missing=missing)\n ret.iid #doing this just to force file load\n return ret\n except:\n return _snps_fixup(pheno_input, iid_if_none=iid_if_none,count_A1=count_A1)\n\ndef _kernel_fixup(input, iid_if_none, standardizer, test=None, test_iid_if_none=None, block_size=None, train_snps=None, count_A1=None):\n if test is not None and input is None:\n input = test\n test = None\n\n if isinstance(input, str) and input.endswith(\".npz\"):\n return KernelNpz(input)\n\n if isinstance(input, str):\n input = Bed(input, count_A1=count_A1) #Note that we don't return here. Processing continues\n if isinstance(test, str):\n test = Bed(test, count_A1=count_A1) #Note that we don't return here. Processing continues\n\n if isinstance(input,SnpReader):\n if test is not None:\n return _SnpWholeTest(train=train_snps,test=test,standardizer=standardizer,block_size=block_size)\n else:\n return SnpKernel(input,standardizer=standardizer, block_size=block_size)\n \n \n if input is None:\n return KernelIdentity(iid=iid_if_none,test=test_iid_if_none)\n\n return input\n\n\nclass FastLMM(object):\n '''\n A predictor, somewhat in the style of scikit-learn, for learning and predicting with linear mixed models.\n\n **Constructor:**\n :Parameters: * **GB_goal** (int) -- gigabytes of memory the run should use, optional. If not given, will read the test_snps in blocks the same size as the kernel, which is memory efficient with little overhead on computation time.\n * **force_full_rank** (bool) -- Even if kernels are defined with fewer SNPs than IIDs, create an explicit iid_count x iid_count kernel. Cannot be True if force_low_rank is True.\n * **force_low_rank** (bool) -- Even if kernels are defined with fewer IIDs than SNPs, create a low-rank iid_count x sid_count kernel. Cannot be True if force_full_rank is True.\n * **snp_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to SNP data. Choices include :class:`Standardizer.Unit` (Default. Makes values for each SNP have mean zero and standard deviation 1.0, then fills missing with zero) and :class:`Standardizer.Identity` (Do nothing)\n * **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing)\n * **kernel_standardizer** (:class:`KernelStandardizer`) -- The PySnpTools kernel standardizer to be apply to the kernels. Some choices include :class:`KernelStandardizer.DiagKToN` (Default. Make the diagonal sum to iid_count) and :class:`KernelStandardizer.Identity` (Do nothing)\n\n :Example:\n\n >>> from __future__ import print_function #Python 2 & 3 compatibility\n >>> import numpy as np\n >>> import logging\n >>> from pysnptools.snpreader import Bed, Pheno\n >>> from fastlmm.inference import FastLMM\n >>> logging.basicConfig(level=logging.INFO)\n >>> snpreader = Bed('../feature_selection/examples/toydata.bed',count_A1=False)\n >>> cov_fn = \"../feature_selection/examples/toydata.cov\"\n >>> pheno_fn = \"../feature_selection/examples/toydata.phe\"\n >>> train_idx = np.r_[10:snpreader.iid_count] # iids 10 and on\n >>> test_idx = np.r_[0:10] # the first 10 iids\n >>> fastlmm = FastLMM(GB_goal=2)\n >>> #We give it phenotype and covariate information for extra examples, but it reorders and intersects the examples, so only training examples are used. \n >>> _ = fastlmm.fit(K0_train=snpreader[train_idx,:],X=cov_fn,y=pheno_fn) \n >>> mean, covariance = fastlmm.predict(K0_whole_test=snpreader[test_idx,:],X=cov_fn,count_A1=False)\n >>> print(list(mean.iid[0]), round(mean.val[0,0],7), round(covariance.val[0,0],7))\n ['per0', 'per0'] 0.1791958 0.8995209\n >>> nll = fastlmm.score(K0_whole_test=snpreader[test_idx,:],X=cov_fn,y=pheno_fn,count_A1=False)\n >>> print(round(nll,7))\n 13.4623234\n\n\n '''\n\n def __init__(self, GB_goal=None, force_full_rank=False, force_low_rank=False, snp_standardizer=Unit(), covariate_standardizer=Unit(), kernel_standardizer=DiagKtoN()):\n self.GB_goal = GB_goal\n self.force_full_rank = force_full_rank\n self.force_low_rank = force_low_rank\n self.snp_standardizer = snp_standardizer\n self.covariate_standardizer = covariate_standardizer\n self.kernel_standardizer = kernel_standardizer\n self.is_fitted = False\n\n #!!!update doc to explain h2raw w.r.t h2\n def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2raw=None, mixing=None,count_A1=None):#!!!is this h2 or h2corr????\n \"\"\"\n Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected.\n\n :param X: training covariate information, optional: \n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__\n (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param y: training phenotype:\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ \n (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param K0_train: A similarity matrix or SNPs from which to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__.\n If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.\n If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K0_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or\n `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param K1_train: A second similarity matrix or SNPs from which to construct such a second similarity matrix. (Also, see 'mixing').\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.\n If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or\n `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param h2raw: A parameter to LMM learning that tells how much weight to give the K's vs. the identity matrix, optional \n If not given will search for best value.\n If mixing is unspecified, then h2 must also be unspecified.\n :type h2raw: number\n\n :param mixing: Weight between 0.0 (inclusive, default) and 1.0 (inclusive) given to K1_train relative to K0_train.\n If you give no mixing number and a K1_train is given, the best weight will be learned.\n :type mixing: number\n\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\n :type count_A1: bool\n\n :rtype: self, the fitted FastLMM predictor\n \"\"\"\n self.is_fitted = True\n # should this have a cache file like 'single_snp'?\n #!!!later what happens if missing values in pheno_train?\n #!!!later add code so that X, y, etc can be array-like objects without iid information. In that case, make up iid info\n\n assert y is not None, \"y must be given\"\n\n y = _pheno_fixup(y,count_A1=count_A1)\n assert y.sid_count == 1, \"Expect y to be just one variable\"\n X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)\n\n K0_train = _kernel_fixup(K0_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)\n K1_train = _kernel_fixup(K1_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)\n\n K0_train, K1_train, X, y = intersect_apply([K0_train, K1_train, X, y],intersect_before_standardize=True) #!!! test this on both K's as None\n from fastlmm.association.single_snp import _set_block_size\n K0_train, K1_train, block_size = _set_block_size(K0_train, K1_train, mixing, self.GB_goal, self.force_full_rank, self.force_low_rank)\n\n X = X.read()\n # If possible, unit standardize train and test together. If that is not possible, unit standardize only train and later apply\n # the same linear transformation to test. Unit standardization is necessary for FastLMM to work correctly.\n #!!!later is the calculation of the training data's stats done twice???\n X, covar_unit_trained = X.standardize(self.covariate_standardizer,block_size=block_size,return_trained=True) #This also fills missing with the mean\n\n # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset\n X = SnpData(iid=X.iid,\n sid=self._new_snp_name(X),\n val=np.c_[X.val,np.ones((X.iid_count,1))],\n name =\"covariate_train w/ 1's\")\n\n y0 = y.read().val #!!!later would view_ok=True,order='A' be ok because this code already did a fresh read to look for any missing values \n\n from fastlmm.association.single_snp import _Mixer #!!!move _combine_the_best_way to another file (e.g. this one)\n K_train, h2raw, mixer = _Mixer.combine_the_best_way(K0_train,K1_train,X.val,y0,mixing,h2raw,force_full_rank=self.force_full_rank,force_low_rank=self.force_low_rank,kernel_standardizer=self.kernel_standardizer,block_size=block_size)\n\n # do final prediction using lmm.py\n lmm = LMM()\n\n #Special case: The K kernel is defined implicitly with SNP data\n if mixer.do_g:\n assert isinstance(K_train.standardizer,StandardizerIdentity), \"Expect Identity standardizer\"\n G_train = K_train.snpreader\n lmm.setG(G0=K_train.snpreader.val)\n else:\n lmm.setK(K0=K_train.val)\n\n lmm.setX(X.val)\n lmm.sety(y0[:,0])\n\n # Find the best h2 and also on covariates (not given from new model)\n if h2raw is None:\n res = lmm.findH2() #!!!why is REML true in the return???\n else:\n res = lmm.nLLeval(h2=h2raw)\n\n\n #We compute sigma2 instead of using res['sigma2'] because res['sigma2'] is only the pure noise.\n full_sigma2 = float(sum((np.dot(X.val,res['beta']).reshape(-1,1)-y0)**2))/y.iid_count #!!! this is non REML. Is that right?\n\n ###### all references to 'fastlmm_model' should be here so that we don't forget any\n self.block_size = block_size\n self.beta = res['beta']\n self.h2raw = res['h2']\n self.sigma2 = full_sigma2\n self.U = lmm.U\n self.S = lmm.S\n self.K = lmm.K\n self.G = lmm.G\n self.y = lmm.y\n self.Uy = lmm.Uy\n self.X = lmm.X\n self.UX = lmm.UX\n self.mixer = mixer\n self.covar_unit_trained = covar_unit_trained\n self.K_train_iid = K_train.iid\n self.covar_sid = X.sid\n self.pheno_sid = y.sid\n self.G0_train = K0_train.snpreader if isinstance(K0_train,SnpKernel) else None #!!!later expensive?\n self.G1_train = K1_train.snpreader if isinstance(K1_train,SnpKernel) else None #!!!later expensive?\n return self\n\n @staticmethod\n def _new_snp_name(snpreader):\n new_snp = \"always1\"\n while True:\n if not new_snp in snpreader.sid:\n return np.r_[snpreader.sid,[new_snp]]\n new_snp += \"_\"\n \n\n def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, return_per_iid=False, count_A1=None):\n \"\"\"\n Method for calculating the negative log likelihood of testing examples.\n If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.\n\n :param X: testing covariate information, optional: \n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param y: testing phenotype:\n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.\n :type iid_if_none: an ndarray of two strings\n\n :param return_mse_too: If true, will also return the mean squared error.\n :type return_mse_too: bool\n\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\n :type count_A1: bool\n\n :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1\n alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.\n :type count_A1: bool\n\n :rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error.\n \"\"\"\n mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)\n y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)\n mean, covar, y = intersect_apply([mean0, covar0, y])\n mean = mean.read(order='A',view_ok=True).val\n covar = covar.read(order='A',view_ok=True).val\n y_actual = y.read().val\n if not return_per_iid:\n var = multivariate_normal(mean=mean.reshape(-1), cov=covar)\n nll = -np.log(var.pdf(y_actual.reshape(-1)))\n if not return_mse_too:\n return nll\n else:\n mse = ((y_actual-mean)**2).sum()\n return nll, mse\n else:\n if not return_mse_too:\n result = SnpData(iid=y.iid,sid=['nLL'],val=np.empty((y.iid_count,1)),name=\"nLL\")\n for iid_index in range(y.iid_count):\n var = multivariate_normal(mean=mean[iid_index], cov=covar[iid_index,iid_index])\n nll = -np.log(var.pdf(y_actual[iid_index]))\n result.val[iid_index,0] = nll\n return result\n else:\n raise Exception(\"need code for mse_too\") \n\n\n def _extract_fixup(kernel):\n assert kernel.iid0_count >= kernel.iid1_count, \"Expect iid0 to be at least as long as iid1\"\n\n\n def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None, count_A1=None):\n \"\"\"\n Method for predicting from a fitted :class:`FastLMM` predictor.\n If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.\n\n :param X: testing covariate information, optional: \n If you give a string, it should be the file name of a PLINK phenotype-formatted file.\n :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.\n\n :param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,\n the test SNPs needed to construct such a similarity matrix.\n Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.\n Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.\n :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__\n\n :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.\n :type iid_if_none: an ndarray of two strings\n\n :rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance\n \"\"\"\n\n assert self.is_fitted, \"Can only predict after predictor has been fitted\"\n #assert K0_whole_test is not None, \"K0_whole_test must be given\"\n #!!!later is it too wasteful to keep both G0_train, G1_train, and lmm.G when storing to disk?\n #!!!later all _kernel_fixup's should use block_size input\n\n K0_whole_test_b = _kernel_fixup(K0_whole_test, train_snps=self.G0_train, iid_if_none=iid_if_none, standardizer=self.mixer.snp_trained0, test=K0_whole_test, test_iid_if_none=None, block_size=self.block_size,count_A1=count_A1)\n K1_whole_test = _kernel_fixup(K1_whole_test, train_snps=self.G1_train, iid_if_none=K0_whole_test_b.iid0, standardizer=self.mixer.snp_trained1, test=K1_whole_test, test_iid_if_none=K0_whole_test_b.iid1, block_size=self.block_size,count_A1=count_A1)\n X = _pheno_fixup(X,iid_if_none=K0_whole_test_b.iid1,count_A1=count_A1)\n K0_whole_test_c, K1_whole_test, X = intersect_apply([K0_whole_test_b, K1_whole_test, X],intersect_before_standardize=True,is_test=True)\n X = X.read().standardize(self.covar_unit_trained)\n # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset\n X = SnpData(iid=X.iid,\n sid=self._new_snp_name(X),\n val=np.c_[X.read().val,np.ones((X.iid_count,1))])\n assert np.array_equal(X.sid,self.covar_sid), \"Expect covar sids to be the same in train and test.\"\n\n train_idx0 = K0_whole_test_c.iid0_to_index(self.K_train_iid)\n K0_train_test = K0_whole_test_c[train_idx0,:]\n train_idx1 = K1_whole_test.iid0_to_index(self.K_train_iid)\n K1_train_test = K1_whole_test[train_idx1,:]\n test_idx0 = K0_whole_test_c.iid0_to_index(K0_whole_test_c.iid1)\n K0_test_test = K0_whole_test_c[test_idx0,:]\n if K0_test_test.iid0 is not K0_test_test.iid1:\n raise Exception(\"real assert\")\n test_idx1 = K1_whole_test.iid0_to_index(K0_whole_test_c.iid1)\n K1_test_test = K1_whole_test[test_idx1,:]\n\n if self.mixer.do_g:\n ###################################################\n # low rank from Rasmussen eq 2.9 + noise term added to covar\n ###################################################\n Gstar = self.mixer.g_mix(K0_train_test,K1_train_test)\n varg = self.h2raw * self.sigma2\n vare = (1.-self.h2raw) * self.sigma2\n Ainv = LA.inv((1./vare) * np.dot(self.G.T,self.G) + (1./varg)*np.eye(self.G.shape[1]))\n testAinv = np.dot(Gstar.test.val, Ainv)\n pheno_predicted = np.dot(X.val,self.beta) + (1./vare) * np.dot(np.dot(testAinv,self.G.T),self.y-np.dot(self.X,self.beta))\n pheno_predicted = pheno_predicted.reshape(-1,1)\n covar = np.dot(testAinv,Gstar.test.val.T) + vare * np.eye(Gstar.test.val.shape[0])\n\n else:\n lmm = LMM()\n lmm.U = self.U\n lmm.S = self.S\n lmm.G = self.G\n lmm.y = self.y\n lmm.Uy = self.Uy\n lmm.X = self.X\n lmm.UX = self.UX\n\n Kstar = self.mixer.k_mix(K0_train_test,K1_train_test) #!!!later do we need/want reads here? how about view_OK?\n lmm.setTestData(Xstar=X.val, K0star=Kstar.val.T)\n\n Kstar_star = self.mixer.k_mix(K0_test_test,K1_test_test) #!!!later do we need/want reads here?how about view_OK?\n pheno_predicted, covar = lmm.predict_mean_and_variance(beta=self.beta, h2=self.h2raw,sigma2=self.sigma2, Kstar_star=Kstar_star.val)\n\n #pheno_predicted = lmm.predictMean(beta=self.beta, h2=self.h2,scale=self.sigma2).reshape(-1,1)\n ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name=\"lmm Prediction\")\n\n from pysnptools.kernelreader import KernelData\n ret1 = KernelData(iid=K0_test_test.iid,val=covar)\n return ret0, ret1\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n import doctest\n doctest.testmod()\n"
] | [
[
"numpy.eye",
"numpy.ones",
"numpy.empty",
"numpy.zeros",
"numpy.arange",
"numpy.array_equal",
"numpy.array",
"numpy.dot",
"scipy.stats.multivariate_normal"
]
] |
jnettels/reegis | [
"fe50c124aa041b9faa494611cba6b833675115e4",
"fe50c124aa041b9faa494611cba6b833675115e4"
] | [
"reegis/mobility.py",
"reegis/entsoe.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Calculate the mobility demand.\n\nSPDX-FileCopyrightText: 2016-2019 Uwe Krien <krien@uni-bremen.de>\n\nSPDX-License-Identifier: MIT\n\"\"\"\n__copyright__ = \"Uwe Krien <krien@uni-bremen.de>\"\n__license__ = \"MIT\"\n\n\nimport os\nimport pandas as pd\nfrom collections import namedtuple\n\nfrom reegis import geometries, config as cfg, tools, energy_balance\n\n\ndef format_kba_table(filename, sheet):\n \"\"\"\n Clean the layout of the table.\n\n The tables are made for human readability and not for automatic processing.\n Lines with subtotals and format-strings of the column names are removed.\n A valid MultiIndex is created to make it easier to filter the table by the\n index.\n\n Parameters\n ----------\n filename : str\n Path and name of the excel file.\n sheet : str\n Name of the sheet of the excel table.\n\n Returns\n -------\n pandas.DataFrame\n\n \"\"\"\n\n # Read table\n df = pd.read_excel(filename, sheet, skiprows=7, header=[0, 1])\n\n # Drop empty column\n df = df.drop([(\"Unnamed: 0_level_0\", \"Unnamed: 0_level_1\")], axis=1)\n\n idx1 = df.columns[0]\n idx2 = df.columns[1]\n idx3 = df.columns[2]\n\n # Remove lines with subtotal\n df.loc[(df[idx1] == \"SONSTIGE\"), idx2] = \"SONSTIGE\"\n df.loc[(df[idx1] == \"SONSTIGE\"), idx3] = \"00000 SONSTIGE\"\n df = df.drop(df.loc[df[idx3].isnull()].index)\n df[df.columns[[0, 1, 2]]] = df[df.columns[[0, 1, 2]]].fillna(\n method=\"ffill\"\n )\n\n # Add column with name of subregion and remove name from index\n df[df.columns[2]] = df[df.columns[2]].str[:5]\n\n # set MultiIndex\n df.set_index(list(df.columns[[0, 1, 2]]), inplace=True)\n df.index = df.index.set_names([\"state\", \"region\", \"subregion\"])\n\n # Remove format-strings from column names\n level1 = (\n df.columns.get_level_values(1)\n .str.replace(\"\\n\", \" \")\n .str.replace(\"- \", \"\")\n .str.replace(\":\", \"\")\n )\n level0 = (\n df.columns.get_level_values(0)\n .str.replace(\"\\n\", \" \")\n .str.replace(\"- \", \"\")\n .str.replace(\":\", \"\")\n )\n df.columns = pd.MultiIndex.from_arrays([level0, level1])\n\n return df\n\n\ndef get_kba_table():\n \"\"\"\n Get the \"kfz\" table for all vehicles and the \"pkw\" table for more\n statistics about passenger cars.\n\n Returns\n -------\n namedtuple\n\n Examples\n --------\n >>> table = get_kba_table()\n >>> kfz = table.kfz\n >>> print(type(kfz))\n <class 'pandas.core.frame.DataFrame'>\n \"\"\"\n kba_table = namedtuple(\"kba_table\", \"kfz pkw\")\n kba_filename = os.path.join(\n cfg.get(\"paths\", \"general\"), cfg.get(\"mobility\", \"table_kba\")\n )\n\n # Download table if it does not exit\n if not os.path.isfile(kba_filename):\n tools.download_file(kba_filename, cfg.get(\"mobility\", \"url_kba\"))\n\n return kba_table(\n kfz=format_kba_table(kba_filename, \"Kfz_u_Kfz_Anh\"),\n pkw=format_kba_table(kba_filename, \"Pkw\"),\n )\n\n\ndef get_mileage_table():\n \"\"\"\n Download mileage table from the KBA (Kraftfahrtbundesamt) and store it\n locally.\n \"\"\"\n url = (\n \"https://www.kba.de/SharedDocs/Publikationen/DE/Statistik/\"\n \"Kraftverkehr/VK/2018/vk_2018_xlsx.xlsx?__blob=publicationFile&v=22\"\n )\n\n mileage_filename = os.path.join(\n cfg.get(\"paths\", \"general\"), \"mileage_table_kba.xlsx\"\n )\n\n # Download table if it does not exit\n if not os.path.isfile(mileage_filename):\n tools.download_file(mileage_filename, url)\n return mileage_filename\n\n\ndef get_sheet_from_mileage_table(sheet):\n \"\"\"Load given sheet from the mileage file.\"\"\"\n fn = get_mileage_table()\n df = pd.read_excel(\n fn, sheet, skiprows=7, index_col=[0, 1, 2], skipfooter=9\n )\n df.index = df.index.droplevel(0).set_names([\"\", \"\"])\n\n return df.drop(\n df.loc[pd.IndexSlice[slice(None), \"Insgesamt\"], slice(None)].index\n )\n\n\ndef get_mileage_by_type_and_fuel(year=2018):\n \"\"\"\n Get mileage by type and fuel from mileage table and other sources.\n\n See mobility.ini file for more information.\n \"\"\"\n # get km per year and type\n total = (\n get_sheet_from_mileage_table(\"VK 1.1\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n passenger = (\n get_sheet_from_mileage_table(\"VK 1.7\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n small_trucks = (\n get_sheet_from_mileage_table(\"VK 1.17\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n medium_trucks = (\n get_sheet_from_mileage_table(\"VK 1.20\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n )\n big_trucks_diesel = (\n get_sheet_from_mileage_table(\"VK 1.23\")\n .loc[\"Jahresfahrleistung in 1.000 km\", str(year)]\n .mul(1000)\n .sum()\n )\n df = pd.DataFrame(index=total.index, columns=[\"diesel\", \"petrol\", \"other\"])\n\n vt_dict = cfg.get_dict(\"vehicle_types_dictionary\")\n df.rename(vt_dict, axis=0, inplace=True)\n total.rename(vt_dict, axis=0, inplace=True)\n\n dc = cfg.get_dict(\"fuel_dictionary\")\n\n # add km by fuel for passenger cars\n df.loc[\"passenger car\"] = passenger.rename(dc, axis=0)\n\n # add km by fuel for small trucks (<= 3.5 tons)\n df.loc[\"small truck (max. 3.5 tons)\"] = small_trucks.rename(dc, axis=0)\n\n # add km by fuel for medium trucks (3.5 < weight <= 7.5 tons)\n df.loc[\"medium truck (3.5 to 7.5 tons)\"] = medium_trucks.rename(dc, axis=0)\n\n # add km by fuel for big trucks (> 7.5 tons)\n # assuming that non-diesel engines are 50% petrol and 50% other\n n = \"big truck (over 7.5 tons)\"\n df.loc[n, \"diesel\"] = big_trucks_diesel\n df.loc[n, [\"petrol\", \"other\"]] = (total[n] - big_trucks_diesel) / 2\n\n fuel_share = pd.DataFrame(\n cfg.get_dict_list(\"fuel share\"), index=[\"diesel\", \"petrol\", \"other\"]\n ).astype(float)\n\n for col in fuel_share.columns:\n df.loc[col] = fuel_share[col].mul(total[col])\n\n return df\n\n\ndef create_grouped_table_kfz():\n \"\"\"Group the kfz-table by main groups.\"\"\"\n df = get_kba_table().kfz\n df.index = df.index.droplevel([0, 1])\n df.columns = [\" \".join(col).strip() for col in df.columns]\n kfz_dict = cfg.get_dict(\"KFZ\")\n for col in df.columns:\n df[col] = pd.to_numeric(df[col].replace(\"-\", \"\"))\n df = df.groupby(by=kfz_dict, axis=1).sum()\n df[\"traction engine, general\"] = (\n df[\"traction engine\"] - df[\"traction engine, agriculture and forestry\"]\n )\n df.drop(\"traction engine\", axis=1, inplace=True)\n df.drop(\"ignore\", axis=1, inplace=True)\n return df\n\n\ndef create_grouped_table_pkw():\n \"\"\"\n Extract fuel groups of passenger cars\n\n Examples\n --------\n >>> pkw = create_grouped_table_pkw()\n >>> pkw['petrol'].sum()\n 31031021.0\n >>> pkw['diesel'].sum()\n 15153364.0\n \"\"\"\n df = get_kba_table().pkw\n df.index = df.index.droplevel([0, 1])\n df = df[\"Nach Kraftstoffarten\"]\n df = df.groupby(by=cfg.get_dict(\"PKW\"), axis=1).sum()\n df.drop(\"ignore\", axis=1, inplace=True)\n return df\n\n\ndef get_admin_by_region(region):\n \"\"\"\n Allocate admin keys to the given regions.\n\n Parameters\n ----------\n region : geopandas.GeoDataFrame\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n fn = os.path.join(cfg.get(\"paths\", \"geometry\"), \"vg1000_geodata.geojson\")\n vg = geometries.load(fullname=fn)\n vg.set_index(\"RS\", inplace=True)\n\n reg2vg = geometries.spatial_join_with_buffer(\n vg.representative_point(), region, \"fs\", limit=0\n )\n\n return pd.DataFrame(reg2vg.drop(\"geometry\", axis=1))\n\n\ndef get_grouped_kfz_by_region(region):\n \"\"\"\n Get the main vehicle groups by region.\n\n Parameters\n ----------\n region : geopandas.GeoDataFrame\n\n Returns\n -------\n pd.DataFrame\n\n Examples\n --------\n >>> fs = geometries.get_federal_states_polygon()\n >>> total = get_grouped_kfz_by_region(fs).sum()\n >>> int(total[\"passenger car\"])\n 47095784\n >>> int(total[\"lorry, > 7500\"])\n 295826\n \"\"\"\n df = create_grouped_table_kfz()\n reg2vg = get_admin_by_region(region)\n df2reg = df.merge(reg2vg, left_index=True, right_index=True, how=\"left\")\n df2reg[\"fs\"] = df2reg[\"fs\"].fillna(\"unknown\")\n return df2reg.groupby(\"fs\").sum()\n\n\ndef get_traffic_fuel_energy(year):\n \"\"\"\n\n Parameters\n ----------\n year : int\n\n Returns\n -------\n\n Examples\n --------\n >>> fuel_energy = get_traffic_fuel_energy(2017)\n >>> int(fuel_energy[\"Ottokraftstoffe\"])\n 719580\n >>> fuel_share = fuel_energy.div(fuel_energy.sum()) * 100\n >>> round(fuel_share[\"Dieselkraftstoffe\"], 1)\n 62.7\n \"\"\"\n fuel_energy = energy_balance.get_de_balance(year).loc[\"Straßenverkehr\"]\n fuel_energy = fuel_energy[fuel_energy != 0]\n fuel_energy.drop(\n [\"primär (gesamt)\", \"sekundär (gesamt)\", \"Row\", \"gesamt\"], inplace=True\n )\n return fuel_energy\n\n\ndef calculate_mobility_energy_use(year):\n \"\"\"\n\n Parameters\n ----------\n year\n\n Returns\n -------\n\n Examples\n --------\n >>> mobility_balance = get_traffic_fuel_energy(2017)\n >>> energy_use = calculate_mobility_energy_use(2017)\n >>> p = \"Petrol usage [TJ]\"\n >>> d = \"Diesel usage [TJ]\"\n >>> o = \"Overall fuel usage [TJ]\"\n >>> print(p, \"(energy balance):\", int(mobility_balance[\"Ottokraftstoffe\"]))\n Petrol usage [TJ] (energy balance): 719580\n >>> print(p, \"(calculated):\", int(energy_use[\"petrol\"].sum()))\n Petrol usage [TJ] (calculated): 803603\n >>> print(d, \"(energy balance):\",\n ... int(mobility_balance[\"Dieselkraftstoffe\"]))\n Diesel usage [TJ] (energy balance): 1425424\n >>> print(d, \"(calculated):\", int(energy_use[\"diesel\"].sum()))\n Diesel usage [TJ] (calculated): 1636199\n >>> print(o, \"(energy balance):\", int(mobility_balance.sum()))\n Overall fuel usage [TJ] (energy balance): 2275143\n >>> print(o, \"(calculated):\", int(energy_use.sum().sum()))\n Overall fuel usage [TJ] (calculated): 2439803\n \"\"\"\n # fetch table of mileage by fuel and vehicle type\n mileage = get_mileage_by_type_and_fuel(year)\n\n # fetch table of specific demand by fuel and vehicle type (from 2011)\n spec_demand = (\n pd.DataFrame(\n cfg.get_dict_list(\"fuel consumption\"),\n index=[\"diesel\", \"petrol\", \"other\"],\n )\n .astype(float)\n .transpose()\n )\n\n # fetch the energy content of the different fuel types\n energy_content = pd.Series(cfg.get_dict(\"energy_per_liter\"))[\n [\"diesel\", \"petrol\", \"other\"]\n ]\n\n return mileage.mul(spec_demand).mul(energy_content) / 10 ** 6\n\n\nif __name__ == \"__main__\":\n pass\n",
"# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\n\"\"\" Download and prepare entsoe load profile from opsd data portal.\n\nSPDX-FileCopyrightText: 2016-2019 Uwe Krien <krien@uni-bremen.de>\n\nSPDX-License-Identifier: MIT\n\"\"\"\n__copyright__ = \"Uwe Krien <krien@uni-bremen.de>\"\n__license__ = \"MIT\"\n\n\n# Python libraries\nimport os\nimport logging\nimport datetime\nfrom collections import namedtuple\n\n# internal modules\nfrom reegis import config as cfg\n\n# External packages\nimport pandas as pd\nimport requests\nimport pytz\nimport dateutil\n\n\ndef read_original_timeseries_file(orig_csv_file=None, overwrite=False):\n \"\"\"Read timeseries file if it exists. Otherwise download it from opsd.\n \"\"\"\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n\n if orig_csv_file is None:\n orig_csv_file = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"original_file\")\n ).format(version=version)\n readme = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"readme_file\")\n ).format(version=version)\n json = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"json_file\")\n ).format(version=version)\n\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n\n if not os.path.isfile(orig_csv_file) or overwrite:\n req = requests.get(\n cfg.get(\"entsoe\", \"timeseries_data\").format(version=version)\n )\n\n if not overwrite:\n logging.warning(\"File not found. Try to download it from server.\")\n else:\n logging.warning(\n \"Will download file from server and overwrite\" \"existing ones\"\n )\n logging.warning(\"Check URL if download does not work.\")\n with open(orig_csv_file, \"wb\") as fout:\n fout.write(req.content)\n logging.warning(\n \"Downloaded from {0} and copied to '{1}'.\".format(\n cfg.get(\"entsoe\", \"timeseries_data\").format(version=version),\n orig_csv_file,\n )\n )\n req = requests.get(\n cfg.get(\"entsoe\", \"timeseries_readme\").format(version=version)\n )\n with open(readme, \"wb\") as fout:\n fout.write(req.content)\n req = requests.get(\n cfg.get(\"entsoe\", \"timeseries_json\").format(version=version)\n )\n with open(json, \"wb\") as fout:\n fout.write(req.content)\n logging.debug(\"Reading file: {0}\".format(orig_csv_file))\n orig = pd.read_csv(orig_csv_file, index_col=[0], parse_dates=True,\n date_parser=lambda col: pd.to_datetime(col, utc=True))\n orig = orig.tz_convert(\"Europe/Berlin\")\n return orig\n\n\ndef prepare_de_file(filename=None, overwrite=False):\n \"\"\"Convert demand file. CET index and Germany's load only.\"\"\"\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n if filename is None:\n filename = os.path.join(\n cfg.get(\"paths\", \"entsoe\"),\n cfg.get(\"entsoe\", \"de_file\").format(version=version),\n )\n if not os.path.isfile(filename) or overwrite:\n ts = read_original_timeseries_file(overwrite=overwrite)\n for col in ts.columns:\n if \"DE\" not in col:\n ts.drop(col, 1, inplace=True)\n\n ts.to_csv(filename)\n\n\ndef split_timeseries_file(filename=None, overwrite=False):\n \"\"\"Split table into load and renewables.\"\"\"\n entsoe_ts = namedtuple(\"entsoe\", [\"load\", \"renewables\"])\n logging.info(\"Splitting time series.\")\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n path_pattern = os.path.join(cfg.get(\"paths\", \"entsoe\"), \"{0}\")\n if filename is None:\n filename = path_pattern.format(\n cfg.get(\"entsoe\", \"de_file\").format(version=version)\n )\n\n if not os.path.isfile(filename) or overwrite:\n prepare_de_file(filename, overwrite)\n\n de_ts = pd.read_csv(\n filename,\n index_col=\"utc_timestamp\",\n parse_dates=True,\n date_parser=lambda col: pd.to_datetime(col, utc=True),\n )\n de_ts.index = de_ts.index.tz_convert(\"Europe/Berlin\")\n berlin = pytz.timezone(\"Europe/Berlin\")\n end_date = berlin.localize(datetime.datetime(2015, 1, 1, 0, 0, 0))\n\n de_ts.loc[de_ts.index < end_date, \"DE_load_\"] = de_ts.loc[\n de_ts.index < end_date, \"DE_load_actual_entsoe_power_statistics\"\n ]\n de_ts.loc[de_ts.index >= end_date, \"DE_load_\"] = de_ts.loc[\n de_ts.index >= end_date, \"DE_load_actual_entsoe_transparency\"\n ]\n\n load = pd.DataFrame(\n de_ts[pd.notnull(de_ts[\"DE_load_\"])][\"DE_load_\"], columns=[\"DE_load_\"]\n )\n\n re_columns = [\n \"DE_solar_capacity\",\n \"DE_solar_generation_actual\",\n \"DE_solar_profile\",\n \"DE_wind_capacity\",\n \"DE_wind_generation_actual\",\n \"DE_wind_profile\",\n \"DE_wind_offshore_capacity\",\n \"DE_wind_offshore_generation_actual\",\n \"DE_wind_offshore_profile\",\n \"DE_wind_onshore_capacity\",\n \"DE_wind_onshore_generation_actual\",\n \"DE_wind_onshore_profile\",\n ]\n re_subset = [\n \"DE_solar_capacity\",\n \"DE_solar_generation_actual\",\n \"DE_solar_profile\",\n \"DE_wind_capacity\",\n \"DE_wind_generation_actual\",\n \"DE_wind_profile\",\n ]\n\n renewables = de_ts.dropna(subset=re_subset, how=\"any\")[re_columns]\n\n return entsoe_ts(load=load, renewables=renewables)\n\n\ndef get_entsoe_load(year):\n \"\"\"\n\n Parameters\n ----------\n year\n\n Returns\n -------\n\n Examples\n --------\n >>> entsoe=get_entsoe_load(2015)\n >>> int(entsoe.sum())\n 477923089\n \"\"\"\n filename = os.path.join(\n cfg.get(\"paths\", \"entsoe\"), cfg.get(\"entsoe\", \"load_file\")\n )\n if not os.path.isfile(filename):\n load = split_timeseries_file().load\n load.to_hdf(filename, \"entsoe\")\n\n # Read entsoe time series for the given year\n f = datetime.datetime(year, 1, 1, 0)\n t = datetime.datetime(year, 12, 31, 23)\n f = f.astimezone(pytz.timezone(\"Europe/Berlin\"))\n t = t.astimezone(pytz.timezone(\"Europe/Berlin\"))\n logging.info(\"Read entsoe load series from {0} to {1}\".format(f, t))\n df = pd.DataFrame(pd.read_hdf(filename, \"entsoe\"))\n return df.loc[f:t]\n\n\ndef get_entsoe_renewable_data():\n \"\"\"\n\n Returns\n -------\n\n Examples\n --------\n >>> re=get_entsoe_renewable_data()\n >>> int(re['DE_solar_generation_actual'].sum())\n 237214558\n \"\"\"\n version = cfg.get(\"entsoe\", \"timeseries_version\")\n path_pattern = os.path.join(cfg.get(\"paths\", \"entsoe\"), \"{0}\")\n fn = path_pattern.format(\n cfg.get(\"entsoe\", \"renewables_file_csv\").format(version=version)\n )\n if not os.path.isfile(fn):\n renewables = split_timeseries_file().renewables\n renewables.to_csv(fn)\n re = pd.read_csv(fn, index_col=[0], parse_dates=True)\n return re\n\n\nif __name__ == \"__main__\":\n pass\n"
] | [
[
"pandas.read_excel",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays"
],
[
"pandas.read_csv",
"pandas.notnull",
"pandas.to_datetime",
"pandas.read_hdf"
]
] |
MoriZSJ/GVB | [
"9b954660ef377ead81c8e631c4a0f4a17075b2ea"
] | [
"CDAN-GD/pre_process.py"
] | [
"import numpy as np\nfrom torchvision import transforms\nimport os\nfrom PIL import Image, ImageOps\nimport numbers\nimport torch\n\n\nclass ResizeImage():\n def __init__(self, size):\n if isinstance(size, int):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n th, tw = self.size\n return img.resize((th, tw))\n\n\nclass RandomSizedCrop(object):\n \"\"\"Crop the given PIL.Image to random size and aspect ratio.\n A crop of random size of (0.08 to 1.0) of the original size and a random\n aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n Args:\n size: size of the smaller edge\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, img):\n h_off = random.randint(0, img.shape[1]-self.size)\n w_off = random.randint(0, img.shape[2]-self.size)\n img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]\n return img\n\n\nclass Normalize(object):\n \"\"\"Normalize an tensor image with mean and standard deviation.\n Given mean: (R, G, B),\n will normalize each channel of the torch.*Tensor, i.e.\n channel = channel - mean\n Args:\n mean (sequence): Sequence of means for R, G, B channels respecitvely.\n \"\"\"\n\n def __init__(self, mean=None, meanfile=None):\n if mean:\n self.mean = mean\n else:\n arr = np.load(meanfile)\n self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2, 1, 0], :, :]\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n # TODO: make efficient\n for t, m in zip(tensor, self.mean):\n t.sub_(m)\n return tensor\n\n\nclass PlaceCrop(object):\n \"\"\"Crops the given PIL.Image at the particular index.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (w, h), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size, start_x, start_y):\n if isinstance(size, int):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.start_x = start_x\n self.start_y = start_y\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be cropped.\n Returns:\n PIL.Image: Cropped image.\n \"\"\"\n th, tw = self.size\n return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))\n\n\nclass ForceFlip(object):\n \"\"\"Horizontally flip the given PIL.Image randomly with a probability of 0.5.\"\"\"\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be flipped.\n Returns:\n PIL.Image: Randomly flipped image.\n \"\"\"\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given PIL.Image at the center.\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL.Image): Image to be cropped.\n Returns:\n PIL.Image: Cropped image.\n \"\"\"\n w, h = (img.shape[1], img.shape[2])\n th, tw = self.size\n w_off = int((w - tw) / 2.)\n h_off = int((h - th) / 2.)\n img = img[:, h_off:h_off+th, w_off:w_off+tw]\n return img\n\n\ndef image_train(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.RandomResizedCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_target(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_test(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n start_first = 0\n start_center = (resize_size - crop_size - 1) / 2\n start_last = resize_size - crop_size - 1\n\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_test_10crop(resize_size=256, crop_size=224, alexnet=False):\n if not alexnet:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')\n start_first = 0\n start_center = (resize_size - crop_size - 1) / 2\n start_last = resize_size - crop_size - 1\n data_transforms = [\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_first, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_last, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_last, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_first, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size), ForceFlip(),\n PlaceCrop(crop_size, start_center, start_center),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_first, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_last, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_last, start_first),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_first, start_last),\n transforms.ToTensor(),\n normalize\n ]),\n transforms.Compose([\n ResizeImage(resize_size),\n PlaceCrop(crop_size, start_center, start_center),\n transforms.ToTensor(),\n normalize\n ])\n ]\n return data_transforms\n"
] | [
[
"numpy.load"
]
] |
Halimaz/tensorflow-1 | [
"3437fba39d5bca77fd7627aad15ba76fb75f5731"
] | [
"tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for rnn module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.contrib import rnn as rnn_lib\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.util import nest\n\n\nclass Plus1RNNCell(rnn_lib.RNNCell):\n \"\"\"RNN Cell generating (output, new_state) = (input + 1, state + 1).\"\"\"\n\n @property\n def output_size(self):\n return 5\n\n @property\n def state_size(self):\n return 5\n\n def __call__(self, input_, state, scope=None):\n return (input_ + 1, state + 1)\n\n\nclass DummyMultiDimensionalLSTM(rnn_lib.RNNCell):\n \"\"\"LSTM Cell generating (output, new_state) = (input + 1, state + 1).\n\n The input to this cell may have an arbitrary number of dimensions that follow\n the preceding 'Time' and 'Batch' dimensions.\n \"\"\"\n\n def __init__(self, dims):\n \"\"\"Initialize the Multi-dimensional LSTM cell.\n\n Args:\n dims: tuple that contains the dimensions of the output of the cell,\n without including 'Time' or 'Batch' dimensions.\n \"\"\"\n if not isinstance(dims, tuple):\n raise TypeError(\"The dimensions passed to DummyMultiDimensionalLSTM \"\n \"should be a tuple of ints.\")\n self._dims = dims\n self._output_size = tensor_shape.TensorShape(self._dims)\n self._state_size = (tensor_shape.TensorShape(self._dims),\n tensor_shape.TensorShape(self._dims))\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self._state_size\n\n def __call__(self, input_, state, scope=None):\n h, c = state\n return (input_ + 1, (h + 1, c + 1))\n\n\nclass NestedRNNCell(rnn_lib.RNNCell):\n \"\"\"RNN Cell generating (output, new_state) = (input + 1, state + 1).\n\n The input, output and state of this cell is a tuple of two tensors.\n \"\"\"\n\n @property\n def output_size(self):\n return (5, 5)\n\n @property\n def state_size(self):\n return (6, 6)\n\n def __call__(self, input_, state, scope=None):\n h, c = state\n x, y = input_\n return ((x + 1, y + 1), (h + 1, c + 1))\n\n\nclass TestStateSaver(object):\n\n def __init__(self, batch_size, state_size):\n self._batch_size = batch_size\n self._state_size = state_size\n self.saved_state = {}\n\n def state(self, name):\n\n if isinstance(self._state_size, dict):\n state_size = self._state_size[name]\n else:\n state_size = self._state_size\n if isinstance(state_size, int):\n state_size = (state_size,)\n elif isinstance(state_size, tuple):\n pass\n else:\n raise TypeError(\"state_size should either be an int or a tuple\")\n\n return array_ops.zeros((self._batch_size,) + state_size)\n\n def save_state(self, name, state):\n self.saved_state[name] = state\n return array_ops.identity(state)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n @property\n def state_size(self):\n return self._state_size\n\n\nclass TestStateSaverWithCounters(TestStateSaver):\n \"\"\"Class wrapper around TestStateSaver.\n\n A dummy class used for testing of static_state_saving_rnn. It helps test if\n save_state and state functions got called same number of time when we\n evaluate output of rnn cell and state or either of them separately. It\n inherits from the TestStateSaver and adds the counters for calls of functions.\n \"\"\"\n\n def __init__(self, batch_size, state_size):\n super(TestStateSaverWithCounters, self).__init__(batch_size, state_size)\n self._num_state_calls = variables_lib.Variable(0)\n self._num_save_state_calls = variables_lib.Variable(0)\n\n def state(self, name):\n with ops_lib.control_dependencies(\n [state_ops.assign_add(self._num_state_calls, 1)]):\n return super(TestStateSaverWithCounters, self).state(name)\n\n def save_state(self, name, state):\n with ops_lib.control_dependencies([state_ops.assign_add(\n self._num_save_state_calls, 1)]):\n return super(TestStateSaverWithCounters, self).save_state(name, state)\n\n @property\n def num_state_calls(self):\n return self._num_state_calls\n\n @property\n def num_save_state_calls(self):\n return self._num_save_state_calls\n\n\nclass RNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testInvalidSequenceLengthShape(self):\n cell = Plus1RNNCell()\n inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]\n with self.assertRaisesRegexp(ValueError, \"must be a vector\"):\n rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)\n\n def testRNN(self):\n cell = Plus1RNNCell()\n batch_size = 2\n input_size = 5\n max_length = 8 # unrolled up to this length\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out, inp in zip(outputs, inputs):\n self.assertEqual(out.get_shape(), inp.get_shape())\n self.assertEqual(out.dtype, inp.dtype)\n\n with self.test_session(use_gpu=True) as sess:\n input_value = np.random.randn(batch_size, input_size)\n values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})\n\n # Outputs\n for v in values[:-1]:\n self.assertAllClose(v, input_value + 1.0)\n\n # Final state\n self.assertAllClose(values[-1],\n max_length * np.ones(\n (batch_size, input_size), dtype=np.float32))\n\n def testDropout(self):\n cell = Plus1RNNCell()\n full_dropout_cell = rnn_cell.DropoutWrapper(\n cell, input_keep_prob=1e-12, seed=0)\n (name, dep), = full_dropout_cell._checkpoint_dependencies\n self.assertIs(dep, cell)\n self.assertEqual(\"cell\", name)\n batch_size = 2\n input_size = 5\n max_length = 8\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"drop_scope\"):\n dropped_outputs, _ = rnn.static_rnn(\n full_dropout_cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out, inp in zip(outputs, inputs):\n self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())\n self.assertEqual(out.dtype, inp.dtype)\n\n with self.test_session(use_gpu=True) as sess:\n input_value = np.random.randn(batch_size, input_size)\n values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})\n full_dropout_values = sess.run(\n dropped_outputs, feed_dict={\n inputs[0]: input_value\n })\n\n for v in values[:-1]:\n self.assertAllClose(v, input_value + 1.0)\n for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)\n self.assertAllClose(d_v, np.ones_like(input_value))\n\n def testDynamicCalculation(self):\n cell = Plus1RNNCell()\n sequence_length = array_ops.placeholder(dtypes.int64)\n batch_size = 2\n input_size = 5\n max_length = 8\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"drop_scope\"):\n dynamic_outputs, dynamic_state = rnn.static_rnn(\n cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)\n self.assertEqual(len(dynamic_outputs), len(inputs))\n\n with self.test_session(use_gpu=True) as sess:\n input_value = np.random.randn(batch_size, input_size)\n dynamic_values = sess.run(\n dynamic_outputs,\n feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n dynamic_state_value = sess.run(\n [dynamic_state],\n feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n\n # outputs are fully calculated for t = 0, 1\n for v in dynamic_values[:2]:\n self.assertAllClose(v, input_value + 1.0)\n\n # outputs at t = 2 are zero for entry 0, calculated for entry 1\n self.assertAllClose(dynamic_values[2],\n np.vstack((np.zeros((input_size)),\n 1.0 + input_value[1, :])))\n\n # outputs at t = 3+ are zero\n for v in dynamic_values[3:]:\n self.assertAllEqual(v, np.zeros_like(input_value))\n\n # the final states are:\n # entry 0: the values from the calculation at t=1\n # entry 1: the values from the calculation at t=2\n self.assertAllEqual(dynamic_state_value[0],\n np.vstack((1.0 * (1 + 1) * np.ones((input_size)),\n 1.0 * (2 + 1) * np.ones((input_size)))))\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n\n # check that all the variables names starts\n # with the proper scope.\n variables_lib.global_variables_initializer()\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testScope(self):\n\n def factory(scope):\n cell = Plus1RNNCell()\n batch_size = 2\n input_size = 5\n max_length = 8 # unrolled up to this length\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n\nclass LSTMTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testDType(self):\n # Test case for GitHub issue 16228\n # Not passing dtype in constructor results in default float32\n lstm = rnn_cell.LSTMCell(10)\n input_tensor = array_ops.ones([10, 50])\n lstm.build(input_tensor.get_shape())\n self.assertEqual(lstm._bias.dtype, dtypes.float32_ref)\n\n # Explicitly pass dtype in constructor\n for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:\n lstm = rnn_cell.LSTMCell(10, dtype=dtype)\n input_tensor = array_ops.ones([10, 50])\n lstm.build(input_tensor.get_shape())\n self.assertEqual(lstm._bias.dtype, dtype._as_ref)\n\n def testNoProjNoSharding(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units, initializer=initializer, state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n def testCellClipping(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n cell_clip=0.0,\n initializer=initializer,\n state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n values = sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n for value in values:\n # if cell c is clipped to 0, tanh(c) = 0 => m==0\n self.assertAllEqual(value, np.zeros((batch_size, num_units)))\n\n def testNoProjNoShardingSimpleStateSaver(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n state_saver = TestStateSaver(batch_size, 2 * num_units)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_state_saving_rnn(\n cell, inputs, state_saver=state_saver, state_name=\"save_lstm\")\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n (last_state_value, saved_state_value) = sess.run(\n [state, state_saver.saved_state[\"save_lstm\"]],\n feed_dict={\n inputs[0]: input_value\n })\n self.assertAllEqual(last_state_value, saved_state_value)\n\n def testNoProjNoShardingTupleStateSaver(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n state_saver = TestStateSaver(batch_size, num_units)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=True)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_state_saving_rnn(\n cell, inputs, state_saver=state_saver, state_name=(\"c\", \"m\"))\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n last_and_saved_states = sess.run(\n state + (state_saver.saved_state[\"c\"], state_saver.saved_state[\"m\"]),\n feed_dict={\n inputs[0]: input_value\n })\n self.assertEqual(4, len(last_and_saved_states))\n self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])\n\n def testNoProjNoShardingNestedTupleStateSaver(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n state_saver = TestStateSaver(\n batch_size, {\n \"c0\": num_units,\n \"m0\": num_units,\n \"c1\": num_units + 1,\n \"m1\": num_units + 1,\n \"c2\": num_units + 2,\n \"m2\": num_units + 2,\n \"c3\": num_units + 3,\n \"m3\": num_units + 3\n })\n\n def _cell(i):\n return rnn_cell.LSTMCell(\n num_units + i,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=True)\n\n # This creates a state tuple which has 4 sub-tuples of length 2 each.\n cell = rnn_cell.MultiRNNCell(\n [_cell(i) for i in range(4)], state_is_tuple=True)\n\n self.assertEqual(len(cell.state_size), 4)\n for i in range(4):\n self.assertEqual(len(cell.state_size[i]), 2)\n\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))\n ]\n\n state_names = ((\"c0\", \"m0\"), (\"c1\", \"m1\"), (\"c2\", \"m2\"), (\"c3\", \"m3\"))\n with variable_scope.variable_scope(\"share_scope\"):\n outputs, state = rnn.static_state_saving_rnn(\n cell, inputs, state_saver=state_saver, state_name=state_names)\n self.assertEqual(len(outputs), len(inputs))\n\n # Final output comes from _cell(3) which has state size num_units + 3\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n last_states = sess.run(\n list(nest.flatten(state)), feed_dict={\n inputs[0]: input_value\n })\n saved_states = sess.run(\n list(state_saver.saved_state.values()),\n feed_dict={\n inputs[0]: input_value\n })\n self.assertEqual(8, len(last_states))\n self.assertEqual(8, len(saved_states))\n flat_state_names = nest.flatten(state_names)\n named_saved_states = dict(\n zip(state_saver.saved_state.keys(), saved_states))\n\n for i in range(8):\n self.assertAllEqual(last_states[i],\n named_saved_states[flat_state_names[i]])\n\n def testProjNoSharding(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n def _testStateTupleWithProjAndSequenceLength(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n sequence_length = [4, 6]\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell_notuple = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n cell_tuple = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=True)\n with variable_scope.variable_scope(\"root\") as scope:\n outputs_notuple, state_notuple = rnn.static_rnn(\n cell_notuple,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n scope.reuse_variables()\n # TODO(ebrevdo): For this test, we ensure values are identical and\n # therefore the weights here are tied. In the future, we may consider\n # making the state_is_tuple property mutable so we can avoid\n # having to do this - especially if users ever need to reuse\n # the parameters from different RNNCell instances. Right now,\n # this seems an unrealistic use case except for testing.\n cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access\n outputs_tuple, state_tuple = rnn.static_rnn(\n cell_tuple,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n self.assertEqual(len(outputs_notuple), len(inputs))\n self.assertEqual(len(outputs_tuple), len(inputs))\n self.assertTrue(isinstance(state_tuple, tuple))\n self.assertTrue(isinstance(state_notuple, ops_lib.Tensor))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n outputs_notuple_v = sess.run(\n outputs_notuple, feed_dict={\n inputs[0]: input_value\n })\n outputs_tuple_v = sess.run(\n outputs_tuple, feed_dict={\n inputs[0]: input_value\n })\n self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)\n\n (state_notuple_v,) = sess.run(\n (state_notuple,), feed_dict={\n inputs[0]: input_value\n })\n state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})\n self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))\n\n def testProjSharding(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n initializer=initializer,\n state_is_tuple=False)\n\n outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n sess.run(outputs, feed_dict={inputs[0]: input_value})\n\n def testDoubleInput(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float64, shape=(None, input_size))\n ]\n\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n initializer=initializer,\n state_is_tuple=False)\n\n outputs, _ = rnn.static_rnn(\n cell,\n inputs,\n initial_state=cell.zero_state(batch_size, dtypes.float64))\n\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.asarray(\n np.random.randn(batch_size, input_size), dtype=np.float64)\n values = sess.run(outputs, feed_dict={inputs[0]: input_value})\n self.assertEqual(values[0].dtype, input_value.dtype)\n\n def testShardNoShardEquivalentOutput(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n initializer = init_ops.constant_initializer(0.001)\n\n cell_noshard = rnn_cell.LSTMCell(\n num_units,\n num_proj=num_proj,\n use_peepholes=True,\n initializer=initializer,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n state_is_tuple=False)\n\n cell_shard = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"noshard_scope\"):\n outputs_noshard, state_noshard = rnn.static_rnn(\n cell_noshard, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"shard_scope\"):\n outputs_shard, state_shard = rnn.static_rnn(\n cell_shard, inputs, dtype=dtypes.float32)\n\n self.assertEqual(len(outputs_noshard), len(inputs))\n self.assertEqual(len(outputs_noshard), len(outputs_shard))\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n feeds = dict((x, input_value) for x in inputs)\n values_noshard = sess.run(outputs_noshard, feed_dict=feeds)\n values_shard = sess.run(outputs_shard, feed_dict=feeds)\n state_values_noshard = sess.run([state_noshard], feed_dict=feeds)\n state_values_shard = sess.run([state_shard], feed_dict=feeds)\n self.assertEqual(len(values_noshard), len(values_shard))\n self.assertEqual(len(state_values_noshard), len(state_values_shard))\n for (v_noshard, v_shard) in zip(values_noshard, values_shard):\n self.assertAllClose(v_noshard, v_shard, atol=1e-3)\n for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):\n self.assertAllClose(s_noshard, s_shard, atol=1e-3)\n\n def testDoubleInputWithDropoutAndDynamicCalculation(self):\n \"\"\"Smoke test for using LSTM with doubles, dropout, dynamic calculation.\"\"\"\n\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n num_proj_shards = 3\n num_unit_shards = 2\n max_length = 8\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n sequence_length = array_ops.placeholder(dtypes.int64)\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float64, shape=(None, input_size))\n ]\n\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n num_unit_shards=num_unit_shards,\n num_proj_shards=num_proj_shards,\n initializer=initializer,\n state_is_tuple=False)\n dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)\n\n outputs, state = rnn.static_rnn(\n dropout_cell,\n inputs,\n sequence_length=sequence_length,\n initial_state=cell.zero_state(batch_size, dtypes.float64))\n\n self.assertEqual(len(outputs), len(inputs))\n\n variables_lib.global_variables_initializer().run(feed_dict={\n sequence_length: [2, 3]\n })\n input_value = np.asarray(\n np.random.randn(batch_size, input_size), dtype=np.float64)\n values = sess.run(\n outputs, feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n state_value = sess.run(\n [state], feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n self.assertEqual(values[0].dtype, input_value.dtype)\n self.assertEqual(state_value[0].dtype, input_value.dtype)\n\n def testSharingWeightsWithReuse(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)\n initializer_d = init_ops.random_uniform_initializer(\n -1, 1, seed=self._seed + 1)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n cell_d = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer_d,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"share_scope\"):\n outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"share_scope\", reuse=True):\n outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with variable_scope.variable_scope(\"diff_scope\"):\n outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n output_values = sess.run(\n outputs0 + outputs1 + outputs2, feed_dict={\n inputs[0]: input_value\n })\n outputs0_values = output_values[:max_length]\n outputs1_values = output_values[max_length:2 * max_length]\n outputs2_values = output_values[2 * max_length:]\n self.assertEqual(len(outputs0_values), len(outputs1_values))\n self.assertEqual(len(outputs0_values), len(outputs2_values))\n for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):\n # Same weights used by both RNNs so outputs should be the same.\n self.assertAllEqual(o1, o2)\n # Different weights used so outputs should be different.\n self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)\n\n def testSharingWeightsWithDifferentNamescope(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=False)\n\n with ops_lib.name_scope(\"scope0\"):\n with variable_scope.variable_scope(\"share_scope\"):\n outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n with ops_lib.name_scope(\"scope1\"):\n with variable_scope.variable_scope(\"share_scope\", reuse=True):\n outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)\n\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n output_values = sess.run(\n outputs0 + outputs1, feed_dict={\n inputs[0]: input_value\n })\n outputs0_values = output_values[:max_length]\n outputs1_values = output_values[max_length:]\n self.assertEqual(len(outputs0_values), len(outputs1_values))\n for out0, out1 in zip(outputs0_values, outputs1_values):\n self.assertAllEqual(out0, out1)\n\n def testDynamicRNNAllowsUnknownTimeDimension(self):\n inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])\n cell = rnn_cell.GRUCell(30)\n # Smoke test, this should not raise an error\n rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)\n\n @test_util.run_in_graph_and_eager_modes\n def testDynamicRNNWithTupleStates(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n sequence_length = [4, 6]\n in_graph_mode = not context.executing_eagerly()\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n if in_graph_mode:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n else:\n inputs = max_length * [\n constant_op.constant(\n np.random.randn(batch_size, input_size).astype(np.float32))\n ]\n inputs_c = array_ops.stack(inputs)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n num_proj=num_proj,\n initializer=initializer,\n state_is_tuple=True)\n with variable_scope.variable_scope(\"root\") as scope:\n outputs_static, state_static = rnn.static_rnn(\n cell,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n scope.reuse_variables()\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length,\n scope=scope)\n self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))\n self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))\n self.assertEqual(state_static[0], state_static.c)\n self.assertEqual(state_static[1], state_static.h)\n self.assertEqual(state_dynamic[0], state_dynamic.c)\n self.assertEqual(state_dynamic[1], state_dynamic.h)\n\n if in_graph_mode:\n variables_lib.global_variables_initializer().run()\n input_value = np.random.randn(batch_size, input_size)\n outputs_static = sess.run(\n outputs_static, feed_dict={\n inputs[0]: input_value\n })\n outputs_dynamic = sess.run(\n outputs_dynamic, feed_dict={\n inputs[0]: input_value\n })\n state_static = sess.run(\n state_static, feed_dict={\n inputs[0]: input_value\n })\n state_dynamic = sess.run(\n state_dynamic, feed_dict={\n inputs[0]: input_value\n })\n\n if in_graph_mode:\n self.assertAllEqual(outputs_static, outputs_dynamic)\n else:\n self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)\n self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))\n\n @test_util.run_in_graph_and_eager_modes\n def testDynamicRNNWithNestedTupleStates(self):\n num_units = 3\n input_size = 5\n batch_size = 2\n num_proj = 4\n max_length = 8\n sequence_length = [4, 6]\n in_graph_mode = not context.executing_eagerly()\n with self.test_session(graph=ops_lib.Graph()) as sess:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n if in_graph_mode:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None, input_size))\n ]\n else:\n inputs = max_length * [\n constant_op.constant(\n np.random.randn(batch_size, input_size).astype(np.float32))\n ]\n inputs_c = array_ops.stack(inputs)\n\n def _cell(i):\n return rnn_cell.LSTMCell(\n num_units + i,\n use_peepholes=True,\n num_proj=num_proj + i,\n initializer=initializer,\n state_is_tuple=True)\n\n # This creates a state tuple which has 4 sub-tuples of length 2 each.\n cell = rnn_cell.MultiRNNCell(\n [_cell(i) for i in range(4)], state_is_tuple=True)\n\n self.assertEqual(len(cell.state_size), 4)\n for i in range(4):\n self.assertEqual(len(cell.state_size[i]), 2)\n\n test_zero = cell.zero_state(1, dtypes.float32)\n self.assertEqual(len(test_zero), 4)\n for i in range(4):\n self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])\n self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])\n\n with variable_scope.variable_scope(\"root\") as scope:\n outputs_static, state_static = rnn.static_rnn(\n cell,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n scope.reuse_variables()\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length,\n scope=scope)\n\n if in_graph_mode:\n input_value = np.random.randn(batch_size, input_size)\n variables_lib.global_variables_initializer().run()\n outputs_static = sess.run(\n outputs_static, feed_dict={\n inputs[0]: input_value\n })\n outputs_dynamic = sess.run(\n outputs_dynamic, feed_dict={\n inputs[0]: input_value\n })\n state_static = sess.run(\n nest.flatten(state_static), feed_dict={\n inputs[0]: input_value\n })\n state_dynamic = sess.run(\n nest.flatten(state_dynamic), feed_dict={\n inputs[0]: input_value\n })\n\n if in_graph_mode:\n self.assertAllEqual(outputs_static, outputs_dynamic)\n else:\n self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)\n state_static = nest.flatten(state_static)\n state_dynamic = nest.flatten(state_dynamic)\n self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))\n\n def _testDynamicEquivalentToStaticRNN(self, use_sequence_length):\n time_steps = 8\n num_units = 3\n num_proj = 4\n input_size = 5\n batch_size = 2\n\n input_values = np.random.randn(time_steps, batch_size, input_size).astype(\n np.float32)\n\n if use_sequence_length:\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n else:\n sequence_length = None\n\n in_graph_mode = not context.executing_eagerly()\n\n # TODO(b/68017812): Eager ignores operation seeds, so we need to create a\n # single cell and reuse it across the static and dynamic RNNs. Remove this\n # special case once is fixed.\n if not in_graph_mode:\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n ########### Step 1: Run static graph and generate readouts\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n if in_graph_mode:\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n else:\n concat_inputs = constant_op.constant(input_values)\n inputs = array_ops.unstack(concat_inputs)\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n\n # TODO(akshayka): Remove special case once b/68017812 is fixed.\n if in_graph_mode:\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"dynamic_scope\"):\n outputs_static, state_static = rnn.static_rnn(\n cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)\n\n if in_graph_mode:\n # Generate gradients and run sessions to obtain outputs\n feeds = {concat_inputs: input_values}\n # Initialize\n variables_lib.global_variables_initializer().run(feed_dict=feeds)\n # Generate gradients of sum of outputs w.r.t. inputs\n static_gradients = gradients_impl.gradients(\n outputs_static + [state_static], [concat_inputs])\n # Generate gradients of individual outputs w.r.t. inputs\n static_individual_gradients = nest.flatten([\n gradients_impl.gradients(y, [concat_inputs])\n for y in [outputs_static[0], outputs_static[-1], state_static]\n ])\n # Generate gradients of individual variables w.r.t. inputs\n trainable_variables = ops_lib.get_collection(\n ops_lib.GraphKeys.TRAINABLE_VARIABLES)\n assert len(trainable_variables) > 1, (\n \"Count of trainable variables: %d\" % len(trainable_variables))\n # pylint: disable=bad-builtin\n static_individual_variable_gradients = nest.flatten([\n gradients_impl.gradients(y, trainable_variables)\n for y in [outputs_static[0], outputs_static[-1], state_static]\n ])\n # Test forward pass\n values_static = sess.run(outputs_static, feed_dict=feeds)\n (state_value_static,) = sess.run((state_static,), feed_dict=feeds)\n\n # Test gradients to inputs and variables w.r.t. outputs & final state\n static_grad_values = sess.run(static_gradients, feed_dict=feeds)\n\n static_individual_grad_values = sess.run(\n static_individual_gradients, feed_dict=feeds)\n\n static_individual_var_grad_values = sess.run(\n static_individual_variable_gradients, feed_dict=feeds)\n\n ########## Step 2: Run dynamic graph and generate readouts\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n if in_graph_mode:\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n else:\n concat_inputs = constant_op.constant(input_values)\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n\n # TODO(akshayka): Remove this special case once b/68017812 is\n # fixed.\n if in_graph_mode:\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=True,\n initializer=initializer,\n num_proj=num_proj,\n state_is_tuple=False)\n\n with variable_scope.variable_scope(\"dynamic_scope\"):\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32)\n split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)\n\n if in_graph_mode:\n feeds = {concat_inputs: input_values}\n\n # Initialize\n variables_lib.global_variables_initializer().run(feed_dict=feeds)\n\n # Generate gradients of sum of outputs w.r.t. inputs\n dynamic_gradients = gradients_impl.gradients(\n split_outputs_dynamic + [state_dynamic], [concat_inputs])\n\n # Generate gradients of several individual outputs w.r.t. inputs\n dynamic_individual_gradients = nest.flatten([\n gradients_impl.gradients(y, [concat_inputs])\n for y in [\n split_outputs_dynamic[0], split_outputs_dynamic[-1],\n state_dynamic\n ]\n ])\n\n # Generate gradients of individual variables w.r.t. inputs\n trainable_variables = ops_lib.get_collection(\n ops_lib.GraphKeys.TRAINABLE_VARIABLES)\n assert len(trainable_variables) > 1, (\n \"Count of trainable variables: %d\" % len(trainable_variables))\n dynamic_individual_variable_gradients = nest.flatten([\n gradients_impl.gradients(y, trainable_variables)\n for y in [\n split_outputs_dynamic[0], split_outputs_dynamic[-1],\n state_dynamic\n ]\n ])\n\n # Test forward pass\n values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)\n (state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)\n\n # Test gradients to inputs and variables w.r.t. outputs & final state\n dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)\n\n dynamic_individual_grad_values = sess.run(\n dynamic_individual_gradients, feed_dict=feeds)\n\n dynamic_individual_var_grad_values = sess.run(\n dynamic_individual_variable_gradients, feed_dict=feeds)\n\n ######### Step 3: Comparisons\n if not in_graph_mode:\n values_static = outputs_static\n values_dynamic = split_outputs_dynamic\n state_value_static = state_static\n state_value_dynamic = state_dynamic\n\n self.assertEqual(len(values_static), len(values_dynamic))\n for (value_static, value_dynamic) in zip(values_static, values_dynamic):\n self.assertAllEqual(value_static, value_dynamic)\n self.assertAllEqual(state_value_static, state_value_dynamic)\n\n if in_graph_mode:\n\n self.assertAllEqual(static_grad_values, dynamic_grad_values)\n\n self.assertEqual(\n len(static_individual_grad_values),\n len(dynamic_individual_grad_values))\n self.assertEqual(\n len(static_individual_var_grad_values),\n len(dynamic_individual_var_grad_values))\n\n for i, (a, b) in enumerate(\n zip(static_individual_grad_values, dynamic_individual_grad_values)):\n tf_logging.info(\"Comparing individual gradients iteration %d\" % i)\n self.assertAllEqual(a, b)\n\n for i, (a, b) in enumerate(\n zip(static_individual_var_grad_values,\n dynamic_individual_var_grad_values)):\n tf_logging.info(\n \"Comparing individual variable gradients iteration %d\" % i)\n self.assertAllEqual(a, b)\n\n @test_util.run_in_graph_and_eager_modes\n def testDynamicEquivalentToStaticRNN(self):\n self._testDynamicEquivalentToStaticRNN(use_sequence_length=True)\n self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)\n\n\nclass BidirectionalRNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n sequence_length = array_ops.placeholder(\n dtypes.int64) if use_sequence_length else None\n cell_fw = rnn_cell.LSTMCell(\n num_units, input_size, initializer=initializer, state_is_tuple=False)\n cell_bw = rnn_cell.LSTMCell(\n num_units, input_size, initializer=initializer, state_is_tuple=False)\n inputs = max_length * [\n array_ops.placeholder(\n dtypes.float32,\n shape=(batch_size, input_size) if use_shape else (None, input_size))\n ]\n outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(\n cell_fw,\n cell_bw,\n inputs,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=scope)\n self.assertEqual(len(outputs), len(inputs))\n for out in outputs:\n self.assertEqual(out.get_shape().as_list(),\n [batch_size if use_shape else None, 2 * num_units])\n\n input_value = np.random.randn(batch_size, input_size)\n outputs = array_ops.stack(outputs)\n\n return input_value, inputs, outputs, state_fw, state_bw, sequence_length\n\n def _testBidirectionalRNN(self, use_shape):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n input_value, inputs, outputs, state_fw, state_bw, sequence_length = (\n self._createBidirectionalRNN(use_shape, True))\n variables_lib.global_variables_initializer().run()\n # Run with pre-specified sequence length of 2, 3\n out, s_fw, s_bw = sess.run(\n [outputs, state_fw, state_bw],\n feed_dict={\n inputs[0]: input_value,\n sequence_length: [2, 3]\n })\n\n # Since the forward and backward LSTM cells were initialized with the\n # same parameters, the forward and backward output has to be the same,\n # but reversed in time. The format is output[time][batch][depth], and\n # due to depth concatenation (as num_units=3 for both RNNs):\n # - forward output: out[][][depth] for 0 <= depth < 3\n # - backward output: out[][][depth] for 4 <= depth < 6\n #\n # First sequence in batch is length=2\n # Check that the time=0 forward output is equal to time=1 backward output\n self.assertEqual(out[0][0][0], out[1][0][3])\n self.assertEqual(out[0][0][1], out[1][0][4])\n self.assertEqual(out[0][0][2], out[1][0][5])\n # Check that the time=1 forward output is equal to time=0 backward output\n self.assertEqual(out[1][0][0], out[0][0][3])\n self.assertEqual(out[1][0][1], out[0][0][4])\n self.assertEqual(out[1][0][2], out[0][0][5])\n\n # Second sequence in batch is length=3\n # Check that the time=0 forward output is equal to time=2 backward output\n self.assertEqual(out[0][1][0], out[2][1][3])\n self.assertEqual(out[0][1][1], out[2][1][4])\n self.assertEqual(out[0][1][2], out[2][1][5])\n # Check that the time=1 forward output is equal to time=1 backward output\n self.assertEqual(out[1][1][0], out[1][1][3])\n self.assertEqual(out[1][1][1], out[1][1][4])\n self.assertEqual(out[1][1][2], out[1][1][5])\n # Check that the time=2 forward output is equal to time=0 backward output\n self.assertEqual(out[2][1][0], out[0][1][3])\n self.assertEqual(out[2][1][1], out[0][1][4])\n self.assertEqual(out[2][1][2], out[0][1][5])\n # Via the reasoning above, the forward and backward final state should be\n # exactly the same\n self.assertAllClose(s_fw, s_bw)\n\n def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n input_value, inputs, outputs, state_fw, state_bw, _ = (\n self._createBidirectionalRNN(use_shape, False))\n variables_lib.global_variables_initializer().run()\n out, s_fw, s_bw = sess.run(\n [outputs, state_fw, state_bw], feed_dict={\n inputs[0]: input_value\n })\n\n # Since the forward and backward LSTM cells were initialized with the\n # same parameters, the forward and backward output has to be the same,\n # but reversed in time. The format is output[time][batch][depth], and\n # due to depth concatenation (as num_units=3 for both RNNs):\n # - forward output: out[][][depth] for 0 <= depth < 3\n # - backward output: out[][][depth] for 4 <= depth < 6\n #\n # Both sequences in batch are length=8. Check that the time=i\n # forward output is equal to time=8-1-i backward output\n for i in xrange(8):\n self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])\n self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])\n self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])\n for i in xrange(8):\n self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])\n self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])\n self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])\n # Via the reasoning above, the forward and backward final state should be\n # exactly the same\n self.assertAllClose(s_fw, s_bw)\n\n def testBidirectionalRNN(self):\n self._testBidirectionalRNN(use_shape=False)\n self._testBidirectionalRNN(use_shape=True)\n\n def testBidirectionalRNNWithoutSequenceLength(self):\n self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)\n self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)\n\n def _createBidirectionalDynamicRNN(self,\n use_shape,\n use_state_tuple,\n use_time_major,\n use_sequence_length,\n scope=None):\n num_units = 3\n input_size = 5\n batch_size = 2\n max_length = 8\n\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n sequence_length = (\n array_ops.placeholder(dtypes.int64) if use_sequence_length else None)\n cell_fw = rnn_cell.LSTMCell(\n num_units, initializer=initializer, state_is_tuple=use_state_tuple)\n cell_bw = rnn_cell.LSTMCell(\n num_units, initializer=initializer, state_is_tuple=use_state_tuple)\n inputs = max_length * [\n array_ops.placeholder(\n dtypes.float32,\n shape=(batch_size if use_shape else None, input_size))\n ]\n inputs_c = array_ops.stack(inputs)\n if not use_time_major:\n inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])\n outputs, states = rnn.bidirectional_dynamic_rnn(\n cell_fw,\n cell_bw,\n inputs_c,\n sequence_length,\n dtype=dtypes.float32,\n time_major=use_time_major,\n scope=scope)\n outputs = array_ops.concat(outputs, 2)\n state_fw, state_bw = states\n outputs_shape = [None, max_length, 2 * num_units]\n if use_shape:\n outputs_shape[0] = batch_size\n if use_time_major:\n outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]\n self.assertEqual(outputs.get_shape().as_list(), outputs_shape)\n\n input_value = np.random.randn(batch_size, input_size)\n\n return input_value, inputs, outputs, state_fw, state_bw, sequence_length\n\n def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,\n use_time_major, use_sequence_length):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n input_value, inputs, outputs, state_fw, state_bw, sequence_length = (\n self._createBidirectionalDynamicRNN(\n use_shape, use_state_tuple, use_time_major, use_sequence_length))\n variables_lib.global_variables_initializer().run()\n # Run with pre-specified sequence length of 2, 3\n feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})\n feed_dict.update({inputs[0]: input_value})\n if use_state_tuple:\n out, c_fw, m_fw, c_bw, m_bw = sess.run(\n [outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],\n feed_dict=feed_dict)\n s_fw = (c_fw, m_fw)\n s_bw = (c_bw, m_bw)\n else:\n feed_dict.update({inputs[0]: input_value})\n out, s_fw, s_bw = sess.run(\n [outputs, state_fw, state_bw], feed_dict=feed_dict)\n\n # Since the forward and backward LSTM cells were initialized with the\n # same parameters, the forward and backward output has to be the same,\n # but reversed in time. The format is output[time][batch][depth], and\n # due to depth concatenation (as num_units=3 for both RNNs):\n # - forward output: out[][][depth] for 0 <= depth < 3\n # - backward output: out[][][depth] for 4 <= depth < 6\n #\n if not use_time_major:\n out = np.swapaxes(out, 0, 1)\n\n if use_sequence_length:\n # First sequence in batch is length=2\n # Check that the t=0 forward output is equal to t=1 backward output\n self.assertEqual(out[0][0][0], out[1][0][3])\n self.assertEqual(out[0][0][1], out[1][0][4])\n self.assertEqual(out[0][0][2], out[1][0][5])\n # Check that the t=1 forward output is equal to t=0 backward output\n self.assertEqual(out[1][0][0], out[0][0][3])\n self.assertEqual(out[1][0][1], out[0][0][4])\n self.assertEqual(out[1][0][2], out[0][0][5])\n\n # Second sequence in batch is length=3\n # Check that the t=0 forward output is equal to t=2 backward output\n self.assertEqual(out[0][1][0], out[2][1][3])\n self.assertEqual(out[0][1][1], out[2][1][4])\n self.assertEqual(out[0][1][2], out[2][1][5])\n # Check that the t=1 forward output is equal to t=1 backward output\n self.assertEqual(out[1][1][0], out[1][1][3])\n self.assertEqual(out[1][1][1], out[1][1][4])\n self.assertEqual(out[1][1][2], out[1][1][5])\n # Check that the t=2 forward output is equal to t=0 backward output\n self.assertEqual(out[2][1][0], out[0][1][3])\n self.assertEqual(out[2][1][1], out[0][1][4])\n self.assertEqual(out[2][1][2], out[0][1][5])\n # Via the reasoning above, the forward and backward final state should\n # be exactly the same\n self.assertAllClose(s_fw, s_bw)\n else: # not use_sequence_length\n max_length = 8 # from createBidirectionalDynamicRNN\n for t in range(max_length):\n self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])\n self.assertAllClose(s_fw, s_bw)\n\n def testBidirectionalDynamicRNN(self):\n # Generate 2^5 option values\n # from [True, True, True, True, True] to [False, False, False, False, False]\n options = itertools.product([True, False], repeat=4)\n for option in options:\n self._testBidirectionalDynamicRNN(\n use_shape=option[0],\n use_state_tuple=option[1],\n use_time_major=option[2],\n use_sequence_length=option[3])\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n # REMARKS: factory(scope) is a function accepting a scope\n # as an argument, such scope can be None, a string\n # or a VariableScope instance.\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n\n # check that all the variables names starts\n # with the proper scope.\n variables_lib.global_variables_initializer()\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"bidirectional_rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"BiRNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testBidirectionalRNNScope(self):\n\n def factory(scope):\n return self._createBidirectionalRNN(\n use_shape=True, use_sequence_length=True, scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n def testBidirectionalDynamicRNNScope(self):\n\n def get_factory(use_time_major):\n\n def factory(scope):\n return self._createBidirectionalDynamicRNN(\n use_shape=True,\n use_state_tuple=True,\n use_sequence_length=True,\n use_time_major=use_time_major,\n scope=scope)\n\n return factory\n\n self._testScope(get_factory(True), use_outer_scope=True)\n self._testScope(get_factory(True), use_outer_scope=False)\n self._testScope(get_factory(True), prefix=None, use_outer_scope=False)\n self._testScope(get_factory(False), use_outer_scope=True)\n self._testScope(get_factory(False), use_outer_scope=False)\n self._testScope(get_factory(False), prefix=None, use_outer_scope=False)\n\n\nclass MultiDimensionalLSTMTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testMultiDimensionalLSTMAllRNNContainers(self):\n feature_dims = (3, 4, 5)\n input_size = feature_dims\n batch_size = 2\n max_length = 8\n sequence_length = [4, 6]\n with self.test_session(graph=ops_lib.Graph()) as sess:\n inputs = max_length * [\n array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)\n ]\n inputs_using_dim = max_length * [\n array_ops.placeholder(\n dtypes.float32, shape=(batch_size,) + input_size)\n ]\n inputs_c = array_ops.stack(inputs)\n # Create a cell for the whole test. This is fine because the cell has no\n # variables.\n cell = DummyMultiDimensionalLSTM(feature_dims)\n state_saver = TestStateSaver(batch_size, input_size)\n outputs_static, state_static = rnn.static_rnn(\n cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length)\n outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(\n cell,\n cell,\n inputs_using_dim,\n dtype=dtypes.float32,\n sequence_length=sequence_length)\n outputs_sav, state_sav = rnn.static_state_saving_rnn(\n cell,\n inputs_using_dim,\n sequence_length=sequence_length,\n state_saver=state_saver,\n state_name=(\"h\", \"c\"))\n\n self.assertEqual(outputs_dynamic.get_shape().as_list(),\n inputs_c.get_shape().as_list())\n for out, inp in zip(outputs_static, inputs):\n self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())\n for out, inp in zip(outputs_bid, inputs_using_dim):\n input_shape_list = inp.get_shape().as_list()\n # fwd and bwd activations are concatenated along the second dim.\n input_shape_list[1] *= 2\n self.assertEqual(out.get_shape().as_list(), input_shape_list)\n\n variables_lib.global_variables_initializer().run()\n\n input_total_size = (batch_size,) + input_size\n input_value = np.random.randn(*input_total_size)\n outputs_static_v = sess.run(\n outputs_static, feed_dict={\n inputs[0]: input_value\n })\n outputs_dynamic_v = sess.run(\n outputs_dynamic, feed_dict={\n inputs[0]: input_value\n })\n outputs_bid_v = sess.run(\n outputs_bid, feed_dict={\n inputs_using_dim[0]: input_value\n })\n outputs_sav_v = sess.run(\n outputs_sav, feed_dict={\n inputs_using_dim[0]: input_value\n })\n\n self.assertAllEqual(outputs_static_v, outputs_dynamic_v)\n self.assertAllEqual(outputs_static_v, outputs_sav_v)\n outputs_static_array = np.array(outputs_static_v)\n outputs_static_array_double = np.concatenate(\n (outputs_static_array, outputs_static_array), axis=2)\n outputs_bid_array = np.array(outputs_bid_v)\n self.assertAllEqual(outputs_static_array_double, outputs_bid_array)\n\n state_static_v = sess.run(\n state_static, feed_dict={\n inputs[0]: input_value\n })\n state_dynamic_v = sess.run(\n state_dynamic, feed_dict={\n inputs[0]: input_value\n })\n state_bid_fw_v = sess.run(\n state_fw, feed_dict={\n inputs_using_dim[0]: input_value\n })\n state_bid_bw_v = sess.run(\n state_bw, feed_dict={\n inputs_using_dim[0]: input_value\n })\n state_sav_v = sess.run(\n state_sav, feed_dict={\n inputs_using_dim[0]: input_value\n })\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))\n\n\nclass NestedLSTMTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testNestedIOLSTMAllRNNContainers(self):\n input_size = 5\n batch_size = 2\n state_size = 6\n max_length = 8\n sequence_length = [4, 6]\n with self.test_session(graph=ops_lib.Graph()) as sess:\n state_saver = TestStateSaver(batch_size, state_size)\n single_input = (array_ops.placeholder(\n dtypes.float32, shape=(None, input_size)),\n array_ops.placeholder(\n dtypes.float32, shape=(None, input_size)))\n inputs = max_length * [single_input]\n inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),\n array_ops.stack([input_[1] for input_ in inputs]))\n single_input_using_dim = (array_ops.placeholder(\n dtypes.float32, shape=(batch_size, input_size)),\n array_ops.placeholder(\n dtypes.float32,\n shape=(batch_size, input_size)))\n inputs_using_dim = max_length * [single_input_using_dim]\n\n # Create a cell for the whole test. This is fine because the cell has no\n # variables.\n cell = NestedRNNCell()\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs_c,\n dtype=dtypes.float32,\n time_major=True,\n sequence_length=sequence_length)\n outputs_static, state_static = rnn.static_rnn(\n cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)\n outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(\n cell,\n cell,\n inputs_using_dim,\n dtype=dtypes.float32,\n sequence_length=sequence_length)\n outputs_sav, state_sav = rnn.static_state_saving_rnn(\n cell,\n inputs_using_dim,\n sequence_length=sequence_length,\n state_saver=state_saver,\n state_name=(\"h\", \"c\"))\n\n def _assert_same_shape(input1, input2, double=False):\n flat_input1 = nest.flatten(input1)\n flat_input2 = nest.flatten(input2)\n for inp1, inp2 in zip(flat_input1, flat_input2):\n input_shape = inp1.get_shape().as_list()\n if double:\n input_shape[1] *= 2\n self.assertEqual(input_shape, inp2.get_shape().as_list())\n\n _assert_same_shape(inputs_c, outputs_dynamic)\n _assert_same_shape(inputs, outputs_static)\n _assert_same_shape(inputs_using_dim, outputs_sav)\n _assert_same_shape(inputs_using_dim, outputs_bid, double=True)\n\n variables_lib.global_variables_initializer().run()\n\n input_total_size = (batch_size, input_size)\n input_value = (np.random.randn(*input_total_size),\n np.random.randn(*input_total_size))\n outputs_dynamic_v = sess.run(\n outputs_dynamic, feed_dict={\n single_input: input_value\n })\n outputs_static_v = sess.run(\n outputs_static, feed_dict={\n single_input: input_value\n })\n outputs_sav_v = sess.run(\n outputs_sav, feed_dict={\n single_input_using_dim: input_value\n })\n outputs_bid_v = sess.run(\n outputs_bid, feed_dict={\n single_input_using_dim: input_value\n })\n\n self.assertAllEqual(outputs_static_v,\n np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))\n self.assertAllEqual(outputs_static_v, outputs_sav_v)\n outputs_static_array = np.array(outputs_static_v)\n outputs_static_array_double = np.concatenate(\n (outputs_static_array, outputs_static_array), axis=3)\n outputs_bid_array = np.array(outputs_bid_v)\n self.assertAllEqual(outputs_static_array_double, outputs_bid_array)\n\n state_dynamic_v = sess.run(\n state_dynamic, feed_dict={\n single_input: input_value\n })\n state_static_v = sess.run(\n state_static, feed_dict={\n single_input: input_value\n })\n state_bid_fw_v = sess.run(\n state_fw, feed_dict={\n single_input_using_dim: input_value\n })\n state_bid_bw_v = sess.run(\n state_bw, feed_dict={\n single_input_using_dim: input_value\n })\n state_sav_v = sess.run(\n state_sav, feed_dict={\n single_input_using_dim: input_value\n })\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))\n self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))\n\n\nclass StateSaverRNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def _factory(self, scope, state_saver):\n num_units = state_saver.state_size // 2\n batch_size = state_saver.batch_size\n input_size = 5\n max_length = 8\n initializer = init_ops.random_uniform_initializer(\n -0.01, 0.01, seed=self._seed)\n cell = rnn_cell.LSTMCell(\n num_units,\n use_peepholes=False,\n initializer=initializer,\n state_is_tuple=False)\n inputs = max_length * [\n array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size))\n ]\n out, state = rnn.static_state_saving_rnn(\n cell,\n inputs,\n state_saver=state_saver,\n state_name=\"save_lstm\",\n scope=scope)\n return out, state, state_saver\n\n def _testScope(self, prefix=\"prefix\", use_outer_scope=True):\n num_units = 3\n batch_size = 2\n state_saver = TestStateSaver(batch_size, 2 * num_units)\n\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n self._factory(scope=scope, state_saver=state_saver)\n else:\n self._factory(scope=prefix, state_saver=state_saver)\n variables_lib.global_variables_initializer()\n\n # check that all the variables names starts\n # with the proper scope.\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testStateSaverRNNScope(self):\n self._testScope(use_outer_scope=True)\n self._testScope(use_outer_scope=False)\n self._testScope(prefix=None, use_outer_scope=False)\n\n def testStateSaverCallsSaveState(self):\n \"\"\"Test that number of calls to state and save_state is equal.\n\n Test if the order of actual evaluating or skipping evaluation of out,\n state tensors, which are the output tensors from static_state_saving_rnn,\n have influence on number of calls to save_state and state methods of\n state_saver object (the number of calls should be same.)\n \"\"\"\n\n num_units = 3\n batch_size = 2\n state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units)\n out, state, state_saver = self._factory(scope=None, state_saver=state_saver)\n\n with self.test_session() as sess:\n sess.run(variables_lib.global_variables_initializer())\n sess.run(variables_lib.local_variables_initializer())\n\n _, _, num_state_calls, num_save_state_calls = sess.run([\n out,\n state,\n state_saver.num_state_calls,\n state_saver.num_save_state_calls])\n self.assertEqual(num_state_calls, num_save_state_calls)\n\n _, num_state_calls, num_save_state_calls = sess.run([\n out,\n state_saver.num_state_calls,\n state_saver.num_save_state_calls])\n self.assertEqual(num_state_calls, num_save_state_calls)\n\n _, num_state_calls, num_save_state_calls = sess.run([\n state,\n state_saver.num_state_calls,\n state_saver.num_save_state_calls])\n self.assertEqual(num_state_calls, num_save_state_calls)\n\nclass GRUTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def testDynamic(self):\n time_steps = 8\n num_units = 3\n input_size = 5\n batch_size = 2\n\n input_values = np.random.randn(time_steps, batch_size, input_size)\n\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n\n cell = rnn_cell.GRUCell(num_units=num_units)\n\n with variable_scope.variable_scope(\"dynamic_scope\"):\n outputs_dynamic, state_dynamic = rnn.dynamic_rnn(\n cell,\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32)\n\n feeds = {concat_inputs: input_values}\n\n # Initialize\n variables_lib.global_variables_initializer().run(feed_dict=feeds)\n\n sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n variables_lib.global_variables_initializer()\n\n # check that all the variables names starts\n # with the proper scope.\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testDynamicScope(self):\n time_steps = 8\n num_units = 3\n input_size = 5\n batch_size = 2\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n\n def factory(scope):\n concat_inputs = array_ops.placeholder(\n dtypes.float32, shape=(time_steps, batch_size, input_size))\n cell = rnn_cell.GRUCell(num_units=num_units)\n return rnn.dynamic_rnn(\n cell,\n inputs=concat_inputs,\n sequence_length=sequence_length,\n time_major=True,\n dtype=dtypes.float32,\n scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n\nclass RawRNNTest(test.TestCase):\n\n def setUp(self):\n self._seed = 23489\n np.random.seed(self._seed)\n\n def _testRawRNN(self, max_time):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = array_ops.placeholder(\n shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)\n sequence_length = array_ops.placeholder(\n shape=(batch_size,), dtype=dtypes.int32)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, unused_loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n next_state = cell_state # copy state through\n elements_finished = (time_ >= sequence_length)\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output, None)\n\n reuse_scope = variable_scope.get_variable_scope()\n\n outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)\n outputs = outputs_ta.stack()\n\n reuse_scope.reuse_variables()\n outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(\n cell,\n inputs,\n time_major=True,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n scope=reuse_scope)\n\n variables = variables_lib.trainable_variables()\n gradients = gradients_impl.gradients([outputs, final_state],\n [inputs] + variables)\n gradients_dynamic_rnn = gradients_impl.gradients(\n [outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)\n\n variables_lib.global_variables_initializer().run()\n\n rand_input = np.random.randn(max_time, batch_size, input_depth)\n if max_time == 0:\n rand_seq_len = np.zeros(batch_size)\n else:\n rand_seq_len = np.random.randint(max_time, size=batch_size)\n\n # To ensure same output lengths for dynamic_rnn and raw_rnn\n rand_seq_len[0] = max_time\n\n (outputs_val, outputs_dynamic_rnn_val, final_state_val,\n final_state_dynamic_rnn_val) = sess.run(\n [outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],\n feed_dict={\n inputs: rand_input,\n sequence_length: rand_seq_len\n })\n\n self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)\n self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)\n\n # NOTE: Because with 0 time steps, raw_rnn does not have shape\n # information about the input, it is impossible to perform\n # gradients comparisons as the gradients eval will fail. So\n # this case skips the gradients test.\n if max_time > 0:\n self.assertEqual(len(gradients), len(gradients_dynamic_rnn))\n gradients_val = sess.run(\n gradients,\n feed_dict={\n inputs: rand_input,\n sequence_length: rand_seq_len\n })\n gradients_dynamic_rnn_val = sess.run(\n gradients_dynamic_rnn,\n feed_dict={\n inputs: rand_input,\n sequence_length: rand_seq_len\n })\n self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))\n input_gradients_val = gradients_val[0]\n input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]\n self.assertAllClose(input_gradients_val,\n input_gradients_dynamic_rnn_val)\n for i in range(1, len(gradients_val)):\n self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])\n\n def testRawRNNZeroLength(self):\n # NOTE: Because with 0 time steps, raw_rnn does not have shape\n # information about the input, it is impossible to perform\n # gradients comparisons as the gradients eval will fail. So this\n # case skips the gradients test.\n self._testRawRNN(max_time=0)\n\n def testRawRNN(self):\n self._testRawRNN(max_time=10)\n\n def testLoopState(self):\n with self.test_session(graph=ops_lib.Graph()):\n max_time = 10\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = np.random.randn(max_time, batch_size, input_depth)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, loop_state):\n if cell_output is None:\n loop_state = constant_op.constant([0])\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])\n next_state = cell_state\n emit_output = cell_output # == None for time == 0\n elements_finished = array_ops.tile([time_ >= max_time], [batch_size])\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output,\n loop_state)\n\n r = rnn.raw_rnn(cell, loop_fn)\n loop_state = r[-1]\n self.assertEqual([10], loop_state.eval())\n\n def testLoopStateWithTensorArray(self):\n with self.test_session(graph=ops_lib.Graph()):\n max_time = 4\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = np.random.randn(max_time, batch_size, input_depth)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, loop_state):\n if cell_output is None:\n loop_state = tensor_array_ops.TensorArray(\n dynamic_size=True,\n size=0,\n dtype=dtypes.int32,\n clear_after_read=False)\n loop_state = loop_state.write(0, 1)\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n loop_state = loop_state.write(time_,\n loop_state.read(time_ - 1) + time_)\n next_state = cell_state\n emit_output = cell_output # == None for time == 0\n elements_finished = array_ops.tile([time_ >= max_time], [batch_size])\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output,\n loop_state)\n\n r = rnn.raw_rnn(cell, loop_fn)\n loop_state = r[-1]\n loop_state = loop_state.stack()\n self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())\n\n def testEmitDifferentStructureThanCellOutput(self):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n max_time = 10\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n inputs = np.random.randn(max_time, batch_size, input_depth)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n # Verify emit shapes may be unknown by feeding a placeholder that\n # determines an emit shape.\n unknown_dim = array_ops.placeholder(dtype=dtypes.int32)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, _):\n if cell_output is None:\n emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),\n array_ops.zeros([unknown_dim], dtype=dtypes.int64))\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),\n array_ops.ones(\n [batch_size, unknown_dim], dtype=dtypes.int64))\n next_state = cell_state\n elements_finished = array_ops.tile([time_ >= max_time], [batch_size])\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output, None)\n\n r = rnn.raw_rnn(cell, loop_fn)\n output_ta = r[0]\n self.assertEqual(2, len(output_ta))\n self.assertEqual([dtypes.int32, dtypes.int64],\n [ta.dtype for ta in output_ta])\n output = [ta.stack() for ta in output_ta]\n output_vals = sess.run(output, feed_dict={unknown_dim: 1})\n self.assertAllEqual(\n np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])\n self.assertAllEqual(\n np.ones((max_time, batch_size, 1), np.int64), output_vals[1])\n\n def _testScope(self, factory, prefix=\"prefix\", use_outer_scope=True):\n with self.test_session(use_gpu=True, graph=ops_lib.Graph()):\n if use_outer_scope:\n with variable_scope.variable_scope(prefix) as scope:\n factory(scope)\n else:\n factory(prefix)\n variables_lib.global_variables_initializer()\n\n # check that all the variables names starts\n # with the proper scope.\n all_vars = variables_lib.global_variables()\n prefix = prefix or \"rnn\"\n scope_vars = [v for v in all_vars if v.name.startswith(prefix + \"/\")]\n tf_logging.info(\"RNN with scope: %s (%s)\" %\n (prefix, \"scope\" if use_outer_scope else \"str\"))\n for v in scope_vars:\n tf_logging.info(v.name)\n self.assertEqual(len(scope_vars), len(all_vars))\n\n def testRawRNNScope(self):\n max_time = 10\n batch_size = 16\n input_depth = 4\n num_units = 3\n\n def factory(scope):\n inputs = array_ops.placeholder(\n shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)\n sequence_length = array_ops.placeholder(\n shape=(batch_size,), dtype=dtypes.int32)\n inputs_ta = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)\n\n def loop_fn(time_, cell_output, cell_state, unused_loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_state = cell.zero_state(batch_size, dtypes.float32)\n else:\n next_state = cell_state\n\n elements_finished = (time_ >= sequence_length)\n finished = math_ops.reduce_all(elements_finished)\n # For the very final iteration, we must emit a dummy input\n next_input = control_flow_ops.cond(\n finished,\n lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),\n lambda: inputs_ta.read(time_))\n return (elements_finished, next_input, next_state, emit_output, None)\n\n return rnn.raw_rnn(cell, loop_fn, scope=scope)\n\n self._testScope(factory, use_outer_scope=True)\n self._testScope(factory, use_outer_scope=False)\n self._testScope(factory, prefix=None, use_outer_scope=False)\n\n\nclass DeviceWrapperCell(rnn_cell.RNNCell):\n \"\"\"Class to ensure cell calculation happens on a specific device.\"\"\"\n\n def __init__(self, cell, device):\n self._cell = cell\n self._device = device\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n def __call__(self, input_, state, scope=None):\n if self._device is not None:\n with ops_lib.device(self._device):\n return self._cell(input_, state, scope=scope)\n else:\n return self._cell(input_, state, scope=scope)\n\n\nclass TensorArrayOnCorrectDeviceTest(test.TestCase):\n\n def _execute_rnn_on(self,\n rnn_device=None,\n cell_device=None,\n input_device=None):\n batch_size = 3\n time_steps = 7\n input_size = 5\n num_units = 10\n\n cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)\n gpu_cell = DeviceWrapperCell(cell, cell_device)\n inputs = np.random.randn(batch_size, time_steps, input_size).astype(\n np.float32)\n sequence_length = np.random.randint(0, time_steps, size=batch_size)\n\n if input_device is not None:\n with ops_lib.device(input_device):\n inputs = constant_op.constant(inputs)\n\n if rnn_device is not None:\n with ops_lib.device(rnn_device):\n outputs, _ = rnn.dynamic_rnn(\n gpu_cell,\n inputs,\n sequence_length=sequence_length,\n dtype=dtypes.float32)\n else:\n outputs, _ = rnn.dynamic_rnn(\n gpu_cell,\n inputs,\n sequence_length=sequence_length,\n dtype=dtypes.float32)\n\n with self.test_session(use_gpu=True) as sess:\n opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n variables_lib.global_variables_initializer().run()\n sess.run(outputs, options=opts, run_metadata=run_metadata)\n\n return run_metadata\n\n def _retrieve_cpu_gpu_stats(self, run_metadata):\n cpu_stats = None\n gpu_stats = None\n step_stats = run_metadata.step_stats\n for ds in step_stats.dev_stats:\n if \"cpu:0\" in ds.device[-5:].lower():\n cpu_stats = ds.node_stats\n if \"gpu:0\" == ds.device[-5:].lower():\n gpu_stats = ds.node_stats\n return cpu_stats, gpu_stats\n\n def testRNNOnCPUCellOnGPU(self):\n if not test.is_gpu_available():\n return # Test requires access to a GPU\n\n gpu_dev = test.gpu_device_name()\n run_metadata = self._execute_rnn_on(\n rnn_device=\"/cpu:0\", cell_device=gpu_dev)\n cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)\n\n def _assert_in(op_str, in_stats, out_stats):\n self.assertTrue(any(op_str in s.node_name for s in in_stats))\n self.assertFalse(any(op_str in s.node_name for s in out_stats))\n\n # Writes happen at output of RNN cell\n _assert_in(\"TensorArrayWrite\", gpu_stats, cpu_stats)\n # Gather happens on final TensorArray\n _assert_in(\"TensorArrayGather\", gpu_stats, cpu_stats)\n # Reads happen at input to RNN cell\n _assert_in(\"TensorArrayRead\", cpu_stats, gpu_stats)\n # Scatters happen to get initial input into TensorArray\n _assert_in(\"TensorArrayScatter\", cpu_stats, gpu_stats)\n\n def testRNNOnCPUCellOnCPU(self):\n if not test.is_gpu_available():\n return # Test requires access to a GPU\n\n gpu_dev = test.gpu_device_name()\n run_metadata = self._execute_rnn_on(\n rnn_device=\"/cpu:0\", cell_device=\"/cpu:0\", input_device=gpu_dev)\n cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)\n\n def _assert_in(op_str, in_stats, out_stats):\n self.assertTrue(any(op_str in s.node_name for s in in_stats))\n self.assertFalse(any(op_str in s.node_name for s in out_stats))\n\n # All TensorArray operations happen on CPU\n _assert_in(\"TensorArray\", cpu_stats, gpu_stats)\n\n def testInputOnGPUCellNotDeclared(self):\n if not test.is_gpu_available():\n return # Test requires access to a GPU\n\n gpu_dev = test.gpu_device_name()\n run_metadata = self._execute_rnn_on(input_device=gpu_dev)\n cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)\n\n def _assert_in(op_str, in_stats, out_stats):\n self.assertTrue(any(op_str in s.node_name for s in in_stats))\n self.assertFalse(any(op_str in s.node_name for s in out_stats))\n\n # Everything happens on GPU\n _assert_in(\"TensorArray\", gpu_stats, cpu_stats)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"numpy.ones",
"numpy.random.seed",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.rnn_cell.DropoutWrapper",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.platform.test.is_gpu_available",
"numpy.ones_like",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.rnn.static_rnn",
"tensorflow.python.ops.rnn.static_bidirectional_rnn",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.init_ops.random_uniform_initializer",
"numpy.transpose",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.rnn_cell.GRUCell",
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.ops.variables.local_variables_initializer",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.rnn.dynamic_rnn",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.framework.ops.device",
"tensorflow.python.platform.tf_logging.info",
"numpy.zeros",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.platform.test.gpu_device_name",
"tensorflow.python.ops.rnn.static_state_saving_rnn",
"tensorflow.python.ops.rnn.raw_rnn",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.squeeze",
"numpy.hstack",
"tensorflow.python.ops.rnn.bidirectional_dynamic_rnn",
"tensorflow.python.ops.variables.Variable",
"numpy.linalg.norm",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"numpy.zeros_like",
"numpy.swapaxes",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"numpy.random.randn",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.rnn_cell.LSTMCell",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.variables.global_variables",
"numpy.array",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.concatenate",
"numpy.random.randint",
"tensorflow.python.ops.array_ops.transpose"
]
] |
oricou/pandas | [
"9405e58d9268041f5416711c051cf5429a19bf49",
"9405e58d9268041f5416711c051cf5429a19bf49"
] | [
"pandas/tests/indexing/common.py",
"pandas/tests/indexing/multiindex/test_chaining_and_caching.py"
] | [
"\"\"\" common utilities \"\"\"\nimport itertools\n\nimport numpy as np\n\nfrom pandas import (\n DataFrame,\n Float64Index,\n MultiIndex,\n Series,\n UInt64Index,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef _mklbl(prefix, n):\n return [f\"{prefix}{i}\" for i in range(n)]\n\n\ndef _axify(obj, key, axis):\n # create a tuple accessor\n axes = [slice(None)] * obj.ndim\n axes[axis] = key\n return tuple(axes)\n\n\nclass Base:\n \"\"\" indexing comprehensive base class \"\"\"\n\n _kinds = {\"series\", \"frame\"}\n _typs = {\n \"ints\",\n \"uints\",\n \"labels\",\n \"mixed\",\n \"ts\",\n \"floats\",\n \"empty\",\n \"ts_rev\",\n \"multi\",\n }\n\n def setup_method(self, method):\n\n self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))\n self.frame_ints = DataFrame(\n np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)\n )\n\n self.series_uints = Series(\n np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))\n )\n self.frame_uints = DataFrame(\n np.random.randn(4, 4),\n index=UInt64Index(range(0, 8, 2)),\n columns=UInt64Index(range(0, 12, 3)),\n )\n\n self.series_floats = Series(\n np.random.rand(4), index=Float64Index(range(0, 8, 2))\n )\n self.frame_floats = DataFrame(\n np.random.randn(4, 4),\n index=Float64Index(range(0, 8, 2)),\n columns=Float64Index(range(0, 12, 3)),\n )\n\n m_idces = [\n MultiIndex.from_product([[1, 2], [3, 4]]),\n MultiIndex.from_product([[5, 6], [7, 8]]),\n MultiIndex.from_product([[9, 10], [11, 12]]),\n ]\n\n self.series_multi = Series(np.random.rand(4), index=m_idces[0])\n self.frame_multi = DataFrame(\n np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]\n )\n\n self.series_labels = Series(np.random.randn(4), index=list(\"abcd\"))\n self.frame_labels = DataFrame(\n np.random.randn(4, 4), index=list(\"abcd\"), columns=list(\"ABCD\")\n )\n\n self.series_mixed = Series(np.random.randn(4), index=[2, 4, \"null\", 8])\n self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, \"null\", 8])\n\n self.series_ts = Series(\n np.random.randn(4), index=date_range(\"20130101\", periods=4)\n )\n self.frame_ts = DataFrame(\n np.random.randn(4, 4), index=date_range(\"20130101\", periods=4)\n )\n\n dates_rev = date_range(\"20130101\", periods=4).sort_values(ascending=False)\n self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)\n self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)\n\n self.frame_empty = DataFrame()\n self.series_empty = Series(dtype=object)\n\n # form agglomerates\n for kind in self._kinds:\n d = {}\n for typ in self._typs:\n d[typ] = getattr(self, f\"{kind}_{typ}\")\n\n setattr(self, kind, d)\n\n def generate_indices(self, f, values=False):\n \"\"\"\n generate the indices\n if values is True , use the axis values\n is False, use the range\n \"\"\"\n axes = f.axes\n if values:\n axes = (list(range(len(ax))) for ax in axes)\n\n return itertools.product(*axes)\n\n def get_value(self, name, f, i, values=False):\n \"\"\" return the value for the location i \"\"\"\n # check against values\n if values:\n return f.values[i]\n\n elif name == \"iat\":\n return f.iloc[i]\n else:\n assert name == \"at\"\n return f.loc[i]\n\n def check_values(self, f, func, values=False):\n\n if f is None:\n return\n axes = f.axes\n indicies = itertools.product(*axes)\n\n for i in indicies:\n result = getattr(f, func)[i]\n\n # check against values\n if values:\n expected = f.values[i]\n else:\n expected = f\n for a in reversed(i):\n expected = expected.__getitem__(a)\n\n tm.assert_almost_equal(result, expected)\n\n def check_result(self, method, key, typs=None, axes=None, fails=None):\n def _eq(axis, obj, key):\n \"\"\" compare equal for these 2 keys \"\"\"\n axified = _axify(obj, key, axis)\n try:\n getattr(obj, method).__getitem__(axified)\n\n except (IndexError, TypeError, KeyError) as detail:\n\n # if we are in fails, the ok, otherwise raise it\n if fails is not None:\n if isinstance(detail, fails):\n return\n raise\n\n if typs is None:\n typs = self._typs\n\n if axes is None:\n axes = [0, 1]\n else:\n assert axes in [0, 1]\n axes = [axes]\n\n # check\n for kind in self._kinds:\n\n d = getattr(self, kind)\n for ax in axes:\n for typ in typs:\n assert typ in self._typs\n\n obj = d[typ]\n if ax < obj.ndim:\n _eq(axis=ax, obj=obj, key=key)\n",
"import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\nimport pandas.core.common as com\n\n\ndef test_detect_chained_assignment():\n # Inplace ops, originally from:\n # https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug\n a = [12, 23]\n b = [123, None]\n c = [1234, 2345]\n d = [12345, 23456]\n tuples = [(\"eyes\", \"left\"), (\"eyes\", \"right\"), (\"ears\", \"left\"), (\"ears\", \"right\")]\n events = {\n (\"eyes\", \"left\"): a,\n (\"eyes\", \"right\"): b,\n (\"ears\", \"left\"): c,\n (\"ears\", \"right\"): d,\n }\n multiind = MultiIndex.from_tuples(tuples, names=[\"part\", \"side\"])\n zed = DataFrame(events, index=[\"a\", \"b\"], columns=multiind)\n\n msg = \"A value is trying to be set on a copy of a slice from a DataFrame\"\n with pytest.raises(com.SettingWithCopyError, match=msg):\n zed[\"eyes\"][\"right\"].fillna(value=555, inplace=True)\n\n\ndef test_cache_updating():\n # 5216\n # make sure that we don't try to set a dead cache\n a = np.random.rand(10, 3)\n df = DataFrame(a, columns=[\"x\", \"y\", \"z\"])\n tuples = [(i, j) for i in range(5) for j in range(2)]\n index = MultiIndex.from_tuples(tuples)\n df.index = index\n\n # setting via chained assignment\n # but actually works, since everything is a view\n df.loc[0][\"z\"].iloc[0] = 1.0\n result = df.loc[(0, 0), \"z\"]\n assert result == 1\n\n # correct setting\n df.loc[(0, 0), \"z\"] = 2\n result = df.loc[(0, 0), \"z\"]\n assert result == 2\n\n\n@pytest.mark.arm_slow\ndef test_indexer_caching():\n # GH5727\n # make sure that indexers are in the _internal_names_set\n n = 1000001\n arrays = (range(n), range(n))\n index = MultiIndex.from_tuples(zip(*arrays))\n s = Series(np.zeros(n), index=index)\n str(s)\n\n # setitem\n expected = Series(np.ones(n), index=index)\n s = Series(np.zeros(n), index=index)\n s[s == 0] = 1\n tm.assert_series_equal(s, expected)\n"
] | [
[
"pandas.Series",
"pandas.date_range",
"pandas.MultiIndex.from_product",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.arange",
"pandas._testing.assert_almost_equal",
"numpy.random.rand"
],
[
"numpy.ones",
"numpy.zeros",
"pandas.DataFrame",
"pandas._testing.assert_series_equal",
"pandas.MultiIndex.from_tuples",
"numpy.random.rand"
]
] |
Napkin-DL/PyTorch-GAN | [
"4668fb434a74a4e4771631944e4abfb0ec1c8795",
"4668fb434a74a4e4771631944e4abfb0ec1c8795"
] | [
".history/implementations/pixelda/pixelda_20190101201505.py",
".history/implementations/pixelda/pixelda_20190101224024.py"
] | [
"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom mnistm import MNISTM\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs('images', exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\nparser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\nparser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\nparser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')\nparser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')\nparser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')\nparser.add_argument('--channels', type=int, default=3, help='number of image channels')\nparser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')\nparser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')\nopt = parser.parse_args()\nprint(opt)\n\n# Calculate output of image discriminator (PatchGAN)\npatch = int(opt.img_size / 2**4)\npatch = (1, patch, patch)\n\ncuda = True if torch.cuda.is_available() else False\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n print(\"classname : {}\".format(classname))\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nclass ResidualBlock_back(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(ResidualBlock, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features)\n )\n\n def forward(self, x):\n return x + self.block(x)\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, in_features=64, out_features=64):\n super(ResidualBlock, self).__init__()\n \n # calculate same padding:\n # (w - k + 2*p)/s + 1 = o\n # => p = (s(o-1) - w + k)/2\n (2(128-1)-64 +3)/2\n ### ENCODER\n self.encode_block = nn.Sequential(\n nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(2*in_features),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True)\n )\n print(\"self.encode_block : {}\".format(self.encode_block))\n\n self.decode_block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2),\n nn.BatchNorm2d(2*in_features),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(1*in_features),\n nn.LeakyReLU(inplace=True)\n )\n\n print(\"self.decode_block : {}\".format(self.decode_block))\n\n def forward(self, x):\n encode_x = self.encode_block(x)\n decode_x = self.decode_block(encode_x)\n # decode_x = decode_x[:, :, :-1, :-1]\n # decode_x = F.sigmoid(decode_x)\n return x + decode_x \n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n\n self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n # resblocks.append(ResidualBlock())\n resblocks.append(ResidualBlock())\n self.resblocks = nn.Sequential(*resblocks)\n\n self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())\n\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)\n out = self.l1(gen_input)\n out = self.resblocks(out)\n img_ = self.l2(out)\n\n return img_\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, img):\n validity = self.model(img)\n\n return validity\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(512*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\n# Loss function\nadversarial_loss = torch.nn.MSELoss()\ntask_loss = torch.nn.CrossEntropyLoss()\n\n# Loss weights\nlambda_adv = 1\nlambda_task = 0.1\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\nclassifier = Classifier()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n classifier.cuda()\n adversarial_loss.cuda()\n task_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\nclassifier.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs('../../data/mnist', exist_ok=True)\ndataloader_A = torch.utils.data.DataLoader(\n datasets.MNIST('../../data/mnist', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\nos.makedirs('../../data/mnistm', exist_ok=True)\ndataloader_B = torch.utils.data.DataLoader(\n MNISTM('../../data/mnistm', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\noptimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()),\n lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n# ----------\n# Training\n# ----------\n\n# Keeps 100 accuracy measurements\ntask_performance = []\ntarget_performance = []\n\nfor epoch in range(opt.n_epochs):\n for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):\n\n batch_size = imgs_A.size(0)\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n\n # Configure input\n imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))\n labels_A = Variable(labels_A.type(LongTensor))\n imgs_B = Variable(imgs_B.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise\n z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))\n\n # Generate a batch of images\n fake_B = generator(imgs_A, z)\n\n # Perform task on translated source image\n label_pred = classifier(fake_B)\n\n # Calculate the task loss\n task_loss_ = (task_loss(label_pred, labels_A) + \\\n task_loss(classifier(imgs_A), labels_A)) / 2\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \\\n lambda_task * task_loss_\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n real_loss = adversarial_loss(discriminator(imgs_B), valid)\n fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n\n # ---------------------------------------\n # Evaluate Performance on target domain\n # ---------------------------------------\n\n # Evaluate performance on translated Domain A\n acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())\n task_performance.append(acc)\n if len(task_performance) > 100:\n task_performance.pop(0)\n\n # Evaluate performance on Domain B\n pred_B = classifier(imgs_B)\n target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())\n target_performance.append(target_acc)\n if len(target_performance) > 100:\n target_performance.pop(0)\n\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]\" %\n (epoch, opt.n_epochs,\n i, len(dataloader_A),\n d_loss.item(), g_loss.item(),\n 100*acc, 100*np.mean(task_performance),\n 100*target_acc, 100*np.mean(target_performance)))\n\n batches_done = len(dataloader_A) * epoch + i\n if batches_done % opt.sample_interval == 0:\n sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)\n save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)\n",
"import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom mnistm import MNISTM\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs('images', exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\nparser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\nparser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\nparser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')\nparser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')\nparser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')\nparser.add_argument('--channels', type=int, default=3, help='number of image channels')\nparser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')\nparser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')\nopt = parser.parse_args()\nprint(opt)\n\n# Calculate output of image discriminator (PatchGAN)\npatch = int(opt.img_size / 2**4)\npatch = (1, patch, patch)\n\ncuda = True if torch.cuda.is_available() else False\n\nprint(\"cuda : {}\".format(cuda))\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n print(\"classname : {}\".format(classname))\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nclass ResidualBlock_back(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(ResidualBlock, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_features, in_features, 3, 1, 1),\n nn.BatchNorm2d(in_features)\n )\n\n def forward(self, x):\n return x + self.block(x)\n\nclass sencode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(sencode_ResidualBlock, self).__init__()\n \n ### ENCODER\n self.sencode_block = nn.Sequential(\n nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(8*in_features),\n nn.LeakyReLU(inplace=True)\n )\n \n \n def forward(self, x):\n encode_x = self.sencode_block(x)\n return x, encode_x \n\nclass sdecode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(sdecode_ResidualBlock, self).__init__()\n\n self.sdecode_block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(1*in_features),\n nn.LeakyReLU(inplace=True),\n \n )\n\n def forward(self, encode_x):\n decode_x = self.sdecode_block(encode_x)\n decode_x = decode_x[:, :, :-1, :-1]\n decode_x = F.sigmoid(decode_x)\n return decode_x \n\nclass tencode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(tencode_ResidualBlock, self).__init__()\n \n ### ENCODER\n self.tencode_block = nn.Sequential(\n nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(8*in_features),\n nn.LeakyReLU(inplace=True)\n )\n \n \n def forward(self, x):\n encode_x = self.tencode_block(x)\n return x, encode_x \n\nclass tdecode_ResidualBlock(nn.Module):\n def __init__(self, in_features=64, out_features=64):\n super(tdecode_ResidualBlock, self).__init__()\n\n self.tdecode_block = nn.Sequential(\n nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),\n nn.BatchNorm2d(4*in_features),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),\n nn.BatchNorm2d(1*in_features),\n nn.LeakyReLU(inplace=True),\n \n )\n\n def forward(self, encode_x):\n decode_x = self.tdecode_block(encode_x)\n decode_x = decode_x[:, :, :-1, :-1]\n decode_x = F.sigmoid(decode_x)\n return decode_x \n\n\nclass target_encode_Generator(nn.Module):\n def __init__(self):\n super(target_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.tfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.tl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(tencode_ResidualBlock())\n self.tencode_resblocks = nn.Sequential(*resblocks)\n\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.tfc(z).view(*img.shape)), 1)\n out = self.tl1(gen_input)\n x, encode_out = self.tencode_resblocks(out)\n\n\n return x, encode_out\n\n\nclass source_encode_Generator(nn.Module):\n def __init__(self):\n super(source_encode_Generator, self).__init__()\n\n # Fully-connected layer which constructs image channel shaped output from noise\n self.sfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)\n self.sl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(sencode_ResidualBlock())\n self.sencode_resblocks = nn.Sequential(*resblocks)\n\n\n def forward(self, img, z):\n gen_input = torch.cat((img, self.sfc(z).view(*img.shape)), 1)\n out = self.sl1(gen_input)\n x, encode_out = self.sencode_resblocks(out)\n\n\n return x, encode_out\n\nclass target_decode_Generator(nn.Module):\n def __init__(self):\n super(target_decode_Generator, self).__init__()\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(tdecode_ResidualBlock())\n self.target_decode_resblocks = nn.Sequential(*resblocks)\n\n self.tl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())\n\n\n def forward(self, img, encode_out):\n out = img + self.target_decode_resblocks(encode_out)\n img_ = self.tl2(out)\n\n return img_\n\nclass source_decode_Generator(nn.Module):\n def __init__(self):\n super(source_decode_Generator, self).__init__()\n\n resblocks = []\n for _ in range(opt.n_residual_blocks):\n resblocks.append(sdecode_ResidualBlock())\n self.source_decode_resblocks = nn.Sequential(*resblocks)\n\n self.sl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())\n\n\n def forward(self, img, encode_out):\n out = img + self.source_decode_resblocks(encode_out)\n img_ = self.sl2(out)\n\n return img_\n\n\nclass encode_Discriminator(nn.Module):\n def __init__(self):\n super(encode_Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(256, 512, normalization=False),\n *block(512, 1024),\n nn.Conv2d(1024, 1, 3, 1, 1)\n )\n\n def forward(self, encode_x):\n validity = self.model(encode_x)\n\n return validity\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Discriminator block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512),\n nn.Conv2d(512, 1, 3, 1, 1)\n )\n\n def forward(self, img):\n validity = self.model(img)\n\n return validity\n\nclass encode_Classifier(nn.Module):\n def __init__(self):\n super(encode_Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(256, 512, normalization=False),\n *block(512, 1024)\n *block(1024, 2048)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(2048*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n def block(in_features, out_features, normalization=True):\n \"\"\"Classifier block\"\"\"\n layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n if normalization:\n layers.append(nn.InstanceNorm2d(out_features))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.channels, 64, normalization=False),\n *block(64, 128),\n *block(128, 256),\n *block(256, 512)\n )\n\n input_size = opt.img_size // 2**4\n self.output_layer = nn.Sequential(\n nn.Linear(512*input_size**2, opt.n_classes),\n nn.Softmax()\n )\n\n def forward(self, img):\n feature_repr = self.model(img)\n feature_repr = feature_repr.view(feature_repr.size(0), -1)\n label = self.output_layer(feature_repr)\n return label\n\n# Loss function\nadversarial_loss = torch.nn.MSELoss()\nencode_adversarial_loss = torch.nn.MSELoss()\ntask_loss = torch.nn.CrossEntropyLoss()\n\n# Loss weights\nlambda_adv = 1\nlambda_task = 0.1\n\n# Initialize generator and discriminator\ntarget_encode_generator = target_encode_Generator()\ntarget_decode_generator = target_decode_Generator()\nsource_encode_generator = source_encode_Generator()\nsource_decode_generator = source_decode_Generator()\nencode_discriminator = encode_Discriminator()\ndiscriminator = Discriminator()\nclassifier = Classifier()\n\nif cuda:\n target_encode_generator.cuda()\n target_decode_generator.cuda()\n source_encode_generator.cuda()\n source_decode_generator.cuda()\n encode_discriminator.cuda()\n discriminator.cuda()\n classifier.cuda()\n adversarial_loss.cuda()\n encode_adversarial_loss.cuda()\n task_loss.cuda()\n\n# Initialize weights\ntarget_encode_generator.apply(weights_init_normal)\ntarget_decode_generator.apply(weights_init_normal)\nsource_encode_generator.apply(weights_init_normal)\nsource_decode_generator.apply(weights_init_normal)\nencode_discriminator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\nclassifier.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs('../../data/mnist', exist_ok=True)\ndataloader_A = torch.utils.data.DataLoader(\n datasets.MNIST('../../data/mnist', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\nos.makedirs('../../data/mnistm', exist_ok=True)\ndataloader_B = torch.utils.data.DataLoader(\n MNISTM('../../data/mnistm', train=True, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\n\noptimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(), \n source_encode_generator.parameters(), target_decode_generator.parameters(), \n source_decode_generator.parameters(),\n classifier.parameters()),\n lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n# ----------\n# Training\n# ----------\n\n# Keeps 100 accuracy measurements\ntask_performance = []\ntarget_performance = []\n\nfor epoch in range(opt.n_epochs):\n for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):\n\n batch_size = imgs_A.size(0)\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)\n\n # Configure input\n imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))\n labels_A = Variable(labels_A.type(LongTensor))\n imgs_B = Variable(imgs_B.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise\n z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))\n\n # Generate a batch of images\n imgs_A_x, encode_fake_B = source_encode_generator(imgs_A, z)\n decode_fake_B = source_decode_generator(imgs_A_x, encode_fake_B)\n\n # Perform task on translated source image\n label_pred = classifier(decode_fake_B)\n\n # Calculate the task loss\n task_loss_ = (task_loss(label_pred, labels_A) + \\\n task_loss(classifier(imgs_A), labels_A)) / 2\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \\\n 0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), valid) + \\\n lambda_task * task_loss_\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n imgs_B_x, encode_real_B = target_encode_generator(imgs_B, z)\n decode_real_B = target_decode_generator(imgs_B_x, encode_real_B)\n # Measure discriminator's ability to classify real from generated samples\n encode_real_loss = adversarial_loss(encode_discriminator(encode_real_B), valid)\n encode_fake_loss = adversarial_loss(encode_discriminator(encode_fake_B.detach()), fake)\n decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)\n decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)\n encode_d_loss = (encode_real_loss + encode_fake_loss) / 2\n decode_d_loss = (decode_real_loss + decode_fake_loss) / 2\n d_loss = encode_d_loss + decode_d_loss\n\n d_loss.backward()\n optimizer_D.step()\n\n # ---------------------------------------\n # Evaluate Performance on target domain\n # ---------------------------------------\n\n # Evaluate performance on translated Domain A\n acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())\n task_performance.append(acc)\n if len(task_performance) > 100:\n task_performance.pop(0)\n\n # Evaluate performance on Domain B\n pred_B = classifier(imgs_B)\n target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())\n target_performance.append(target_acc)\n if len(target_performance) > 100:\n target_performance.pop(0)\n\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]\" %\n (epoch, opt.n_epochs,\n i, len(dataloader_A),\n d_loss.item(), g_loss.item(),\n 100*acc, 100*np.mean(task_performance),\n 100*target_acc, 100*np.mean(target_performance)))\n\n batches_done = len(dataloader_A) * epoch + i\n if batches_done % opt.sample_interval == 0:\n sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)\n save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)\n"
] | [
[
"numpy.random.uniform",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torch.nn.init.normal_",
"torch.nn.CrossEntropyLoss",
"torch.nn.Tanh",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.InstanceNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.cat",
"numpy.mean",
"torch.nn.LeakyReLU"
],
[
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.cat",
"torch.nn.ConvTranspose2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.sigmoid",
"torch.nn.Softmax",
"torch.nn.init.normal_",
"numpy.mean",
"numpy.random.uniform",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.init.constant_",
"torch.nn.CrossEntropyLoss",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.LeakyReLU"
]
] |
hrayatnia/SciPy | [
"a50dcbb6b8adffbc526eec93f5009f09943786e3"
] | [
"plotting-beginner-plotting-cookbook/pltcp.py"
] | [
"import numpy as np\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nax = plt.axes(polar = True)\ntheta = np.linspace(0, 2 * np.pi, 8, endpoint = False)\nradius = .25 + .75 * np.random.random(size = len(theta))\npoints = np.vstack((theta, radius)).transpose()\nplt.gca().add_patch(patches.Polygon(points, color = '.75'))\nplt.show()"
] | [
[
"numpy.vstack",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axes",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
mitchellgordon95/lottery-ticket-hypothesis | [
"3b2abee4b1e9ba00fe8501ac86652e2604736405"
] | [
"lottery_ticket/foundations/trainer.py"
] | [
"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A function that trains a network on a dataset.\"\"\"\n\nfrom lottery_ticket.foundations import paths\nfrom lottery_ticket.foundations import save_restore\nimport tensorflow as tf\n\n\ndef train(sess, dataset, model, optimizer_fn, training_len, output_dir,\n **params):\n \"\"\"Train a model on a dataset.\n\n Training continues until training_len iterations or epochs have taken place.\n\n Args:\n sess: A tensorflow session\n dataset: The dataset on which to train (a child of dataset_base.DatasetBase)\n model: The model to train (a child of model_base.ModelBase)\n optimizer_fn: A function that, when called, returns an instance of an\n optimizer object to be used to optimize the network.\n training_len: A tuple whose first value is the unit of measure\n (\"epochs\" or \"iterations\") and whose second value is the number of\n units for which the network should be trained.\n output_dir: The directory to which any output should be saved.\n **params: Other parameters.\n save_summaries is whether to save summary data.\n save_network is whether to save the network before and after training.\n test_interval is None if the test set should not be evaluated; otherwise,\n frequency (in iterations) at which the test set should be run.\n validate_interval is analogous to test_interval.\n\n Returns:\n A dictionary containing the weights before training and the weights after\n training, as well as the trained model.\n \"\"\"\n # Create initial session parameters.\n optimize = optimizer_fn().minimize(model.loss)\n sess.run(tf.global_variables_initializer())\n initial_weights = model.get_current_weights(sess)\n\n train_handle = dataset.get_train_handle(sess)\n test_handle = dataset.get_test_handle(sess)\n validate_handle = dataset.get_validate_handle(sess)\n\n # Optional operations to perform before training.\n if params.get('save_summaries', False):\n writer = tf.summary.FileWriter(paths.summaries(output_dir))\n train_file = tf.gfile.GFile(paths.log(output_dir, 'train'), 'w')\n test_file = tf.gfile.GFile(paths.log(output_dir, 'test'), 'w')\n validate_file = tf.gfile.GFile(paths.log(output_dir, 'validate'), 'w')\n\n if params.get('save_network', False):\n save_restore.save_network(paths.initial(output_dir), initial_weights)\n save_restore.save_network(paths.masks(output_dir), model.masks)\n\n # Helper functions to collect and record summaries.\n def record_summaries(iteration, records, fp):\n \"\"\"Records summaries obtained from evaluating the network.\n\n Args:\n iteration: The current training iteration as an integer.\n records: A list of records to be written.\n fp: A file to which the records should be logged in an easier-to-parse\n format than the tensorflow summary files.\n \"\"\"\n if params.get('save_summaries', False):\n log = ['iteration', str(iteration)]\n for record in records:\n # Log to tensorflow summaries for tensorboard.\n writer.add_summary(record, iteration)\n\n # Log to text file for convenience.\n summary_proto = tf.Summary()\n summary_proto.ParseFromString(record)\n value = summary_proto.value[0]\n log += [value.tag, str(value.simple_value)]\n fp.write(','.join(log) + '\\n')\n\n def collect_test_summaries(iteration):\n if (params.get('save_summaries', False) and\n 'test_interval' in params and\n iteration % params['test_interval'] == 0):\n sess.run(dataset.test_initializer)\n records = sess.run(model.test_summaries, {dataset.handle: test_handle})\n record_summaries(iteration, records, test_file)\n\n def collect_validate_summaries(iteration):\n if (params.get('save_summaries', False) and\n 'validate_interval' in params and\n iteration % params['validate_interval'] == 0):\n sess.run(dataset.validate_initializer)\n records = sess.run(model.validate_summaries,\n {dataset.handle: validate_handle})\n record_summaries(iteration, records, validate_file)\n\n # Train for the specified number of epochs. This behavior is encapsulated\n # in a function so that it is possible to break out of multiple loops\n # simultaneously.\n def training_loop():\n \"\"\"The main training loop encapsulated in a function.\"\"\"\n iteration = 0\n epoch = 0\n last_train_acc = None\n while True:\n sess.run(dataset.train_initializer)\n epoch += 1\n\n # End training if we have passed the epoch limit.\n if training_len[0] == 'epochs' and epoch > training_len[1]:\n return last_train_acc\n\n # One training epoch.\n while True:\n try:\n iteration += 1\n\n # End training if we have passed the iteration limit.\n if training_len[0] == 'iterations' and iteration > training_len[1]:\n return last_train_acc\n\n # Train.\n results = sess.run([optimize, model.accuracy] + model.train_summaries,\n {dataset.handle: train_handle})\n last_train_acc = results[1]\n records = results[2:]\n record_summaries(iteration, records, train_file)\n\n # Collect test and validation data if applicable.\n collect_test_summaries(iteration)\n collect_validate_summaries(iteration)\n\n # End of epoch handling.\n except tf.errors.OutOfRangeError:\n break\n\n # Run the training loop.\n final_train_acc = training_loop()\n\n # Clean up.\n if params.get('save_summaries', False):\n train_file.close()\n test_file.close()\n validate_file.close()\n\n # Retrieve the final weights of the model.\n final_weights = model.get_current_weights(sess)\n if params.get('save_network', False):\n save_restore.save_network(paths.final(output_dir), final_weights)\n\n return initial_weights, final_weights, final_train_acc\n"
] | [
[
"tensorflow.Summary",
"tensorflow.global_variables_initializer"
]
] |
jakee417/probability-1 | [
"ae7117f37ac441bc7a888167ea23e5e620c5bcde",
"ae7117f37ac441bc7a888167ea23e5e620c5bcde"
] | [
"tensorflow_probability/python/experimental/mcmc/windowed_sampling_test.py",
"tensorflow_probability/python/distributions/student_t_process_regression_model_test.py"
] | [
"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the _License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for windowed sampling.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.experimental import distribute\nfrom tensorflow_probability.python.experimental.mcmc import windowed_sampling\nfrom tensorflow_probability.python.internal import callable_util\nfrom tensorflow_probability.python.internal import distribute_test_lib\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.internal import unnest\n\nJAX_MODE = False\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\nRoot = tfd.JointDistributionCoroutine.Root\n\nNUM_SCHOOLS = 8 # number of schools\nTREATMENT_EFFECTS = [28., 8, -3, 7, -1, 1, 18, 12]\nTREATMENT_STDDEVS = [15., 10, 16, 11, 9, 11, 10, 18]\n\n\ndef eight_schools_coroutine():\n\n @tfd.JointDistributionCoroutine\n def model():\n avg_effect = yield Root(tfd.Normal(0., 5., name='avg_effect'))\n avg_stddev = yield Root(tfd.HalfNormal(5., name='avg_stddev'))\n school_effects_std = yield Root(\n tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'))\n yield tfd.Independent(\n tfd.Normal(loc=(avg_effect[..., tf.newaxis] +\n avg_stddev[..., tf.newaxis] * school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')\n return model\n\n\ndef eight_schools_sequential():\n model = tfd.JointDistributionSequential([\n tfd.Normal(0., 5., name='avg_effect'),\n tfd.HalfNormal(5., name='avg_stddev'),\n tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),\n # pylint: disable=g-long-lambda\n lambda school_effects_std, avg_stddev, avg_effect: tfd.Independent(\n tfd.Normal(loc=(avg_effect[..., tf.newaxis] +\n avg_stddev[..., tf.newaxis] * school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')])\n # pylint: enable=g-long-lambda\n return model\n\n\ndef eight_schools_named():\n model = tfd.JointDistributionNamed(\n dict(\n avg_effect=tfd.Normal(0., 5., name='avg_effect'),\n avg_stddev=tfd.HalfNormal(5., name='avg_stddev'),\n school_effects_std=tfd.Sample(\n tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),\n # pylint: disable=g-long-lambda\n treatment_effects=lambda school_effects_std, avg_stddev, avg_effect:\n tfd.Independent(\n tfd.Normal(loc=(avg_effect[..., tf.newaxis] +\n avg_stddev[..., tf.newaxis] * school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')))\n # pylint: enable=g-long-lambda\n return model\n\n\ndef eight_schools_nested():\n model = tfd.JointDistributionNamed(\n dict(\n effect_and_stddev=tfd.JointDistributionSequential([\n tfd.Normal(0., 5., name='avg_effect'),\n tfd.HalfNormal(5., name='avg_stddev')], name='effect_and_stddev'),\n school_effects_std=tfd.Sample(\n tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),\n # pylint: disable=g-long-lambda\n treatment_effects=lambda school_effects_std, effect_and_stddev:\n tfd.Independent(\n tfd.Normal(loc=(effect_and_stddev[0][..., tf.newaxis] +\n effect_and_stddev[1][..., tf.newaxis] *\n school_effects_std),\n scale=tf.constant(TREATMENT_STDDEVS)),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')))\n # pylint: enable=g-long-lambda\n return model\n\n\ndef _gen_gaussian_updating_example(x_dim, y_dim, seed):\n \"\"\"An implementation of section 2.3.3 from [1].\n\n We initialize a joint distribution\n\n x ~ N(mu, Lambda^{-1})\n y ~ N(Ax, L^{-1})\n\n Then condition the model on an observation for y. We can test to confirm that\n Cov(p(x | y_obs)) is near to\n\n Sigma = (Lambda + A^T L A)^{-1}\n\n This test can actually check whether the posterior samples have the proper\n covariance, and whether the windowed tuning recovers 1 / diag(Sigma) as the\n diagonal scaling factor.\n\n References:\n [1] Bishop, Christopher M. Pattern Recognition and Machine Learning.\n Springer, 2006.\n\n Args:\n x_dim: int\n y_dim: int\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n Returns:\n (tfd.JointDistribution, tf.Tensor), representing the joint distribution\n above, and the posterior variance.\n \"\"\"\n seeds = samplers.split_seed(seed, 6)\n x_mean = samplers.normal((x_dim,), seed=seeds[0])\n x_scale_diag = samplers.normal((x_dim,), seed=seeds[1])\n y_scale_diag = samplers.normal((y_dim,), seed=seeds[2])\n scale_mat = samplers.normal((y_dim, x_dim), seed=seeds[3])\n y_shift = samplers.normal((y_dim,), seed=seeds[4])\n\n @tfd.JointDistributionCoroutine\n def model():\n x = yield Root(tfd.MultivariateNormalDiag(\n x_mean, scale_diag=x_scale_diag, name='x'))\n yield tfd.MultivariateNormalDiag(\n tf.linalg.matvec(scale_mat, x) + y_shift,\n scale_diag=y_scale_diag,\n name='y')\n\n dists, _ = model.sample_distributions(seed=seeds[5])\n precision_x = tf.linalg.inv(dists.x.covariance())\n precision_y = tf.linalg.inv(dists.y.covariance())\n true_cov = tf.linalg.inv(precision_x +\n tf.linalg.matmul(\n tf.linalg.matmul(scale_mat, precision_y,\n transpose_a=True),\n scale_mat))\n return model, tf.linalg.diag_part(true_cov)\n\n\n@test_util.test_graph_and_eager_modes\nclass WindowedSamplingTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in\n [eight_schools_coroutine, eight_schools_named, eight_schools_sequential,\n eight_schools_nested])\n def test_hmc_type_checks(self, model_fn):\n model = model_fn()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function(autograph=False)\n def do_sample(seed):\n return tfp.experimental.mcmc.windowed_adaptive_hmc(\n 3, model, num_leapfrog_steps=2, num_adaptation_steps=21,\n seed=seed, **pins)\n\n draws, _ = do_sample(test_util.test_seed())\n self.evaluate(draws)\n\n @parameterized.named_parameters(\n dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in\n [eight_schools_coroutine, eight_schools_named, eight_schools_sequential,\n eight_schools_nested])\n def test_nuts_type_checks(self, model_fn):\n model = model_fn()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function\n def do_sample(seed):\n return tfp.experimental.mcmc.windowed_adaptive_nuts(\n 3, model, max_tree_depth=2, num_adaptation_steps=50,\n seed=seed, **pins)\n\n draws, _ = do_sample(test_util.test_seed())\n self.evaluate(draws)\n\n def test_hmc_samples_well(self):\n model = eight_schools_named()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function\n def do_sample(seed):\n return tfp.experimental.mcmc.windowed_adaptive_hmc(\n 400, model, num_leapfrog_steps=12, seed=seed,\n **pins)\n\n draws, _ = do_sample(test_util.test_seed())\n flat_draws = tf.nest.flatten(\n model.experimental_pin(**pins)._model_flatten(draws))\n max_scale_reduction = tf.reduce_max(\n tf.nest.map_structure(tf.reduce_max,\n tfp.mcmc.potential_scale_reduction(flat_draws)))\n self.assertLess(self.evaluate(max_scale_reduction), 1.5)\n\n def test_nuts_samples_well(self):\n model = eight_schools_named()\n pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}\n\n @tf.function\n def do_sample():\n return tfp.experimental.mcmc.windowed_adaptive_nuts(\n 200, model, max_tree_depth=5, seed=test_util.test_seed(),\n **pins)\n\n draws, _ = do_sample()\n flat_draws = tf.nest.flatten(\n model.experimental_pin(**pins)._model_flatten(draws))\n max_scale_reduction = tf.reduce_max(\n tf.nest.map_structure(tf.reduce_max,\n tfp.mcmc.potential_scale_reduction(flat_draws)))\n self.assertLess(self.evaluate(max_scale_reduction), 1.05)\n\n @parameterized.named_parameters(\n dict(testcase_name=f'_{num_draws}', num_draws=num_draws)\n for num_draws in [0, 1, 500, 499, 100, 10000])\n def test_get_window_sizes(self, num_draws):\n [first_window,\n slow_window,\n last_window] = windowed_sampling._get_window_sizes(num_draws)\n self.assertEqual(first_window +\n slow_window +\n 2 * slow_window +\n 4 * slow_window +\n 8 * slow_window +\n last_window, num_draws)\n if num_draws == 500:\n self.assertEqual(slow_window, 25)\n self.assertEqual(first_window, 75)\n self.assertEqual(last_window, 50)\n\n def test_explicit_init(self):\n sample_dist = tfd.JointDistributionSequential(\n [tfd.HalfNormal(1., name=f'dist_{idx}') for idx in range(4)])\n\n explicit_init = [tf.ones(20) for _ in range(3)]\n _, init, bijector, _, _, _ = windowed_sampling._setup_mcmc(\n model=sample_dist,\n n_chains=[20],\n init_position=explicit_init,\n seed=test_util.test_seed(),\n dist_3=1.)\n\n self.assertAllEqual(self.evaluate(init),\n tf.convert_to_tensor(bijector(explicit_init)))\n\n def test_explicit_init_samples(self):\n stream = test_util.test_seed_stream()\n\n # Compute everything in a function so it is consistent in graph mode\n @tf.function\n def do_sample():\n jd_model = tfd.JointDistributionNamed({\n 'x': tfd.HalfNormal(1.),\n 'y': lambda x: tfd.Normal(0., x)})\n init = {'x': tf.ones(64)}\n return tfp.experimental.mcmc.windowed_adaptive_hmc(\n 10,\n jd_model,\n num_adaptation_steps=200,\n current_state=init,\n num_leapfrog_steps=5,\n discard_tuning=False,\n y=tf.constant(1.),\n seed=stream(),\n trace_fn=None)\n\n self.evaluate(do_sample())\n\n def test_valid_init(self):\n\n class _HalfNormal(tfd.HalfNormal):\n\n def _default_event_space_bijector(self):\n # This bijector is intentionally mis-specified so that ~50% of\n # initialiations will fail.\n return tfb.Identity(validate_args=self.validate_args)\n\n tough_dist = tfd.JointDistributionSequential(\n [_HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])\n\n # Twenty chains with three parameters gives a 1 / 2^60 chance of\n # initializing with a finite log probability by chance.\n _, init, _, _, _, _ = windowed_sampling._setup_mcmc(\n model=tough_dist,\n n_chains=[20],\n seed=test_util.test_seed(),\n dist_3=1.)\n\n self.assertAllGreater(self.evaluate(init), 0.)\n\n def test_extra_pins_not_required(self):\n model = tfd.JointDistributionSequential([\n tfd.Normal(0., 1., name='x'),\n lambda x: tfd.Normal(x, 1., name='y')\n ])\n pinned = model.experimental_pin(y=4.2)\n\n # No explicit pins are passed, since the model is already pinned.\n _, init, _, _, _, _ = windowed_sampling._setup_mcmc(\n model=pinned, n_chains=[20],\n seed=test_util.test_seed())\n self.assertLen(init, 1)\n\n def test_hmc_fitting_gaussian(self):\n # See docstring to _gen_gaussian_updating_example\n x_dim = 3\n y_dim = 12\n\n stream = test_util.test_seed_stream()\n\n # Compute everything in a function so it is consistent in graph mode\n @tf.function\n def do_sample():\n jd_model, true_var = _gen_gaussian_updating_example(\n x_dim, y_dim, stream())\n y_val = jd_model.sample(seed=stream()).y\n _, trace = tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n n_chains=1,\n num_adaptation_steps=10000,\n num_leapfrog_steps=16,\n discard_tuning=False,\n y=y_val,\n seed=stream())\n\n # Get the final scaling used for the mass matrix - this is a measure\n # of how well the windowed adaptation recovered the true variance\n final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]\n return final_scaling, true_var\n final_scaling, true_var = do_sample()\n self.assertAllClose(true_var, final_scaling, rtol=0.15)\n\n def test_nuts_fitting_gaussian(self):\n # See docstring to _gen_gaussian_updating_example\n x_dim = 3\n y_dim = 12\n\n stream = test_util.test_seed_stream()\n\n # Compute everything in a function so it is consistent in graph mode\n @tf.function\n def do_sample():\n jd_model, true_var = _gen_gaussian_updating_example(\n x_dim, y_dim, stream())\n y_val = jd_model.sample(seed=stream()).y\n _, trace = tfp.experimental.mcmc.windowed_adaptive_nuts(\n 1,\n jd_model,\n n_chains=1,\n num_adaptation_steps=10000,\n max_tree_depth=5,\n discard_tuning=False,\n y=y_val,\n seed=stream())\n\n # Get the final scaling used for the mass matrix - this is a measure\n # of how well the windowed adaptation recovered the true variance\n final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]\n return final_scaling, true_var\n final_scaling, true_var = do_sample()\n self.assertAllClose(true_var, final_scaling, rtol=0.1, atol=1e-3)\n\n def test_f64_step_size(self):\n dist = tfd.JointDistributionSequential([\n tfd.Normal(\n tf.constant(0., dtype=tf.float64),\n tf.constant(1., dtype=tf.float64))\n ])\n (target_log_prob_fn, initial_transformed_position, _, _, _, _\n ) = windowed_sampling._setup_mcmc(\n dist, n_chains=[5], init_position=None, seed=test_util.test_seed())\n init_step_size = windowed_sampling._get_step_size(\n initial_transformed_position, target_log_prob_fn)\n self.assertDTypeEqual(init_step_size, np.float64)\n self.assertAllFinite(init_step_size)\n\n def test_batch_of_problems_autobatched(self):\n\n def model_fn():\n x = yield tfd.MultivariateNormalDiag(\n tf.zeros([10, 3]), tf.ones(3), name='x')\n yield tfd.Multinomial(\n logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')\n\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)\n samp = model.sample(seed=test_util.test_seed())\n self.assertEqual((10, 3), samp.x.shape)\n self.assertEqual((10, 4), samp.y.shape)\n\n states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(\n 2, model.experimental_pin(y=samp.y), num_leapfrog_steps=3,\n num_adaptation_steps=100, init_step_size=tf.ones([10, 1]),\n seed=test_util.test_seed()))\n self.assertEqual((2, 64, 10, 3), states.x.shape)\n self.assertEqual((2, 10, 1), trace['step_size'].shape)\n\n def test_batch_of_problems_named(self):\n\n def mk_y(x):\n return tfd.Multinomial(logits=tfb.Pad([(0, 1)])(x), total_count=10)\n\n model = tfd.JointDistributionNamed(dict(\n x=tfd.MultivariateNormalDiag(tf.zeros([10, 3]), tf.ones(3)),\n y=mk_y))\n\n samp = model.sample(seed=test_util.test_seed())\n self.assertEqual((10, 3), samp['x'].shape)\n self.assertEqual((10, 4), samp['y'].shape)\n\n states, trace = self.evaluate(\n tfp.experimental.mcmc.windowed_adaptive_hmc(\n 2,\n model.experimental_pin(y=samp['y']),\n num_leapfrog_steps=3,\n num_adaptation_steps=100,\n init_step_size=tf.ones([10, 1]),\n seed=test_util.test_seed()))\n self.assertEqual((2, 64, 10, 3), states['x'].shape)\n self.assertEqual((2, 10, 1), trace['step_size'].shape)\n\n def test_bijector(self):\n dist = tfd.JointDistributionSequential([tfd.Dirichlet(tf.ones(2))])\n bij, _ = windowed_sampling._get_flat_unconstraining_bijector(dist)\n draw = dist.sample(seed=test_util.test_seed())\n self.assertAllCloseNested(bij.inverse(bij(draw)), draw)\n\n @parameterized.named_parameters(*(\n (f'{kind}_{n_chains}', kind, n_chains) # pylint: disable=g-complex-comprehension\n for kind in ('hmc', 'nuts') for n_chains in ([], 3, [2, 1], [2, 2, 2])))\n def test_batches_of_chains(self, kind, n_chains):\n\n def model_fn():\n x = yield tfd.MultivariateNormalDiag(\n tf.zeros(3), tf.ones(3), name='x')\n yield tfd.Multinomial(\n logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')\n\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)\n samp = model.sample(seed=test_util.test_seed())\n states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(\n 5, model.experimental_pin(y=samp.y), n_chains=n_chains,\n num_leapfrog_steps=3, num_adaptation_steps=100,\n seed=test_util.test_seed()))\n if isinstance(n_chains, int):\n n_chains = [n_chains]\n self.assertEqual((5, *n_chains, 3), states.x.shape)\n self.assertEqual((5,), trace['step_size'].shape)\n\n def test_dynamic_batch_shape(self):\n \"\"\"Test correct handling of `TensorShape(None)`.\"\"\"\n if JAX_MODE:\n self.skipTest('b/203858802')\n\n n_features = 5\n n_timepoints = 100\n features = tfd.Normal(0., 1.).sample([100, n_features],\n test_util.test_seed())\n ar_sigma = 1.\n rho = .25\n\n @tfd.JointDistributionCoroutine\n def jd_model():\n beta = yield Root(tfd.Sample(tfd.Normal(0., 1.), n_features))\n yhat = tf.einsum('ij,...j->...i', features, beta)\n\n def ar_fun(y):\n loc = tf.concat([tf.zeros_like(y[..., :1]), y[..., :-1]], axis=-1)\n return tfd.Independent(\n tfd.Normal(loc=loc * rho, scale=ar_sigma),\n reinterpreted_batch_ndims=1)\n # Autoregressive distribution defined as below introduce a batch shape:\n # TensorShape(None)\n yield tfd.Autoregressive(\n distribution_fn=ar_fun,\n sample0=tf.zeros_like(yhat),\n num_steps=yhat.shape[-1],\n name='y')\n\n states, _ = self.evaluate(\n tfp.experimental.mcmc.windowed_adaptive_nuts(\n 2,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n seed=test_util.test_seed()))\n self.assertEqual((2, 3, n_timepoints), states.y.shape)\n\n @parameterized.named_parameters(\n ('_nuts', tfp.experimental.mcmc.windowed_adaptive_nuts, {}),\n ('_hmc', tfp.experimental.mcmc.windowed_adaptive_hmc, {\n 'num_leapfrog_steps': 1\n }),\n )\n def test_f64_state(self, method, method_kwargs):\n states, _ = callable_util.get_output_spec(lambda: method( # pylint: disable=g-long-lambda\n 5,\n tfd.Normal(tf.constant(0., tf.float64), 1.),\n n_chains=2,\n num_adaptation_steps=100,\n seed=test_util.test_seed(),\n **method_kwargs))\n\n self.assertEqual(tf.float64, states.dtype)\n\n\n@test_util.test_graph_and_eager_modes\nclass WindowedSamplingStepSizeTest(test_util.TestCase):\n\n def test_supply_full_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionNamed({\n 'a': tfd.Normal(0., 1.),\n 'b': tfd.MultivariateNormalDiag(\n loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))\n })\n\n init_step_size = {'a': tf.reshape(tf.linspace(1., 2., 3), (3, 1)),\n 'b': tf.reshape(tf.linspace(1., 2., 9), (3, 3))}\n\n _, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n init_step_size=init_step_size,\n num_leapfrog_steps=5,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n seed=stream(),\n )\n\n # Gets a newaxis because step size needs to have an event dimension.\n self.assertAllCloseNested([init_step_size['a'],\n init_step_size['b']],\n [j[0] for j in actual_step_size])\n\n def test_supply_partial_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionNamed({\n 'a': tfd.Normal(0., 1.),\n 'b': tfd.MultivariateNormalDiag(\n loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))\n })\n\n init_step_size = {'a': 1., 'b': 2.}\n _, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n init_step_size=init_step_size,\n num_leapfrog_steps=5,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n seed=stream(),\n )\n\n actual_step = [j[0] for j in actual_step_size]\n expected_step = [1., 2.]\n self.assertAllCloseNested(expected_step, actual_step)\n\n def test_supply_single_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionNamed({\n 'a': tfd.Normal(0., 1.),\n 'b': tfd.MultivariateNormalDiag(\n loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))\n })\n\n init_step_size = 1.\n _, traced_step_size = self.evaluate(\n tfp.experimental.mcmc.windowed_adaptive_hmc(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=20,\n init_step_size=init_step_size,\n num_leapfrog_steps=5,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n seed=stream()))\n\n self.assertEqual((25 + 1,), traced_step_size.shape)\n self.assertAllClose(1., traced_step_size[0])\n\n def test_sequential_step_size(self):\n stream = test_util.test_seed_stream()\n\n jd_model = tfd.JointDistributionSequential(\n [tfd.HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])\n init_step_size = [1., 2., 3.]\n _, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_nuts(\n 1,\n jd_model,\n num_adaptation_steps=25,\n n_chains=3,\n init_step_size=init_step_size,\n discard_tuning=False,\n trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),\n dist_3=tf.constant(1.),\n seed=stream(),\n )\n\n self.assertAllCloseNested(init_step_size,\n [j[0] for j in actual_step_size])\n\n\ndef _beta_binomial(trials):\n \"\"\"Returns a function that constructs a beta binomial distribution.\"\"\"\n\n def _beta_binomial_distribution(mean, inverse_concentration):\n \"\"\"Returns a beta binomial distribution with the given parameters.\"\"\"\n # Mean and inverse concentration are broadcast across days.\n mean = mean[..., tf.newaxis]\n inverse_concentration = inverse_concentration[..., tf.newaxis]\n\n beta_binomial = tfd.BetaBinomial(\n total_count=trials,\n concentration0=(1 - mean) / inverse_concentration,\n concentration1=mean / inverse_concentration)\n return tfd.Independent(beta_binomial, reinterpreted_batch_ndims=2)\n\n return _beta_binomial_distribution\n\n\ndef get_joint_distribution(\n trials,\n mean_prior=lambda: tfd.Uniform(0., 1.),\n inverse_concentration_prior=lambda: tfd.HalfNormal(5.)):\n \"\"\"Returns a joint distribution over parameters and successes.\"\"\"\n param_shape = ps.shape(trials)[:1]\n mean = tfd.Sample(mean_prior(), param_shape)\n inverse_concentration = tfd.Sample(inverse_concentration_prior(), param_shape)\n return tfd.JointDistributionNamed(\n dict(mean=mean,\n inverse_concentration=inverse_concentration,\n successes=_beta_binomial(trials)),\n name='jd')\n\n\nclass PrecompiledTest(test_util.TestCase):\n\n def setUp(self):\n super().setUp()\n arms = 2\n days = 3\n\n seed = test_util.test_seed()\n trial_seed, value_seed = tfp.random.split_seed(seed)\n self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)\n dist = get_joint_distribution(self.trials)\n self.true_values = dist.sample(seed=value_seed)\n\n def nuts_kwargs(self):\n return {'max_tree_depth': 2}\n\n def hmc_kwargs(self):\n return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}\n\n @parameterized.named_parameters(('hmc_jit_sig', 'hmc'),\n ('nuts_jit_sig', 'nuts'))\n def test_base_kernel(self, kind):\n self.skip_if_no_xla()\n self.skipTest('b/195070752') # Test is broken by cl/393807414.\n\n if JAX_MODE:\n input_signature = None\n else:\n input_signature = (\n tf.TensorSpec(\n shape=[None, None], dtype=tf.float32, name='trials'),\n tf.TensorSpec(\n shape=[None, None], dtype=tf.float32, name='successes'),\n tf.TensorSpec(\n shape=[2], dtype=tf.int32, name='seed'))\n @tf.function(jit_compile=True, input_signature=input_signature)\n def do(trials, successes, seed):\n if kind == 'hmc':\n proposal_kernel_kwargs = self.hmc_kwargs()\n else:\n proposal_kernel_kwargs = self.nuts_kwargs()\n\n return windowed_sampling._windowed_adaptive_impl(\n n_draws=9,\n joint_dist=get_joint_distribution(trials),\n kind=kind,\n n_chains=11,\n proposal_kernel_kwargs=proposal_kernel_kwargs,\n num_adaptation_steps=50,\n current_state=None,\n dual_averaging_kwargs={'target_accept_prob': 0.76},\n trace_fn=None,\n return_final_kernel_results=False,\n discard_tuning=True,\n chain_axis_names=None,\n seed=seed,\n successes=successes)\n\n self.evaluate(do(self.trials + 0., self.true_values['successes'],\n test_util.test_seed(sampler_type='stateless')))\n\nif JAX_MODE:\n # TF runs into the `merge_call` error here (b/181800108).\n\n @test_util.disable_test_for_backend(\n disable_numpy=True,\n reason='Sharding not available for NumPy backend.')\n class DistributedTest(distribute_test_lib.DistributedTest):\n\n def setUp(self):\n super().setUp()\n arms = 2\n days = 3\n\n seed = test_util.test_seed()\n trial_seed, value_seed = tfp.random.split_seed(seed)\n self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)\n dist = get_joint_distribution(self.trials)\n self.true_values = dist.sample(seed=value_seed)\n\n def nuts_kwargs(self):\n return {'max_tree_depth': 2}\n\n def hmc_kwargs(self):\n return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}\n\n def test_can_extract_shard_axis_names_from_model(self):\n joint_dist = distribute.JointDistributionNamed(dict(\n x=tfd.Normal(0., 1.),\n y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),\n z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)\n ))\n\n def do():\n _, _, _, _, _, shard_axis_names = windowed_sampling._setup_mcmc(\n model=joint_dist,\n n_chains=[20],\n seed=test_util.test_seed(), z=1.)\n # _setup_mcmc will flatten the distribution\n self.assertListEqual(shard_axis_names, [[], ['i']])\n self.strategy_run(do, args=(), in_axes=None)\n\n @parameterized.named_parameters(('hmc_jit_sig', 'hmc'),\n ('nuts_jit_sig', 'nuts'))\n def test_data_sharding(self, kind):\n self.skip_if_no_xla()\n\n joint_dist = distribute.JointDistributionNamed(dict(\n x=tfd.Normal(0., 1.),\n y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),\n z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)\n ))\n\n def do(seed, z):\n if kind == 'hmc':\n proposal_kernel_kwargs = self.hmc_kwargs()\n else:\n proposal_kernel_kwargs = self.nuts_kwargs()\n\n return windowed_sampling._windowed_adaptive_impl(\n n_draws=10,\n joint_dist=joint_dist,\n kind=kind,\n n_chains=2,\n proposal_kernel_kwargs=proposal_kernel_kwargs,\n num_adaptation_steps=21,\n current_state=None,\n dual_averaging_kwargs={'target_accept_prob': 0.76},\n trace_fn=None,\n return_final_kernel_results=False,\n discard_tuning=True,\n seed=seed,\n chain_axis_names=None,\n z=z)\n\n self.evaluate(self.strategy_run(\n do,\n in_axes=(None, 0),\n args=(samplers.zeros_seed(), self.shard_values(\n tf.ones(distribute_test_lib.NUM_DEVICES)))))\n\n @parameterized.named_parameters(('hmc_jit_sig', 'hmc'),\n ('nuts_jit_sig', 'nuts'))\n def test_chain_sharding(self, kind):\n self.skip_if_no_xla()\n\n joint_dist = tfd.JointDistributionNamed(dict(\n x=tfd.Normal(0., 1.),\n y=lambda x: tfd.Sample(tfd.Normal(x, 1.), 4),\n z=lambda y: tfd.Independent(tfd.Normal(y, 1.), 1)\n ))\n\n def do(seed, z):\n if kind == 'hmc':\n proposal_kernel_kwargs = self.hmc_kwargs()\n else:\n proposal_kernel_kwargs = self.nuts_kwargs()\n\n return windowed_sampling._windowed_adaptive_impl(\n n_draws=10,\n joint_dist=joint_dist,\n kind=kind,\n n_chains=2,\n proposal_kernel_kwargs=proposal_kernel_kwargs,\n num_adaptation_steps=21,\n current_state=None,\n dual_averaging_kwargs={'target_accept_prob': 0.76},\n trace_fn=None,\n return_final_kernel_results=False,\n discard_tuning=True,\n seed=seed,\n chain_axis_names=self.axis_name,\n z=z)\n\n self.evaluate(self.strategy_run(\n do,\n in_axes=None,\n args=(samplers.zeros_seed(),\n tf.ones(distribute_test_lib.NUM_DEVICES))))\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2021 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.math import psd_kernels\n\n\n@test_util.test_all_tf_execution_regimes\nclass StudentTProcessRegressionModelTest(test_util.TestCase):\n\n def testInstantiate(self):\n df = np.float64(1.)\n # 5x5 grid of index points in R^2 and flatten to 25x2\n index_points = np.linspace(-4., 4., 5, dtype=np.float64)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n # ==> shape = [25, 2]\n\n # Kernel with batch_shape [2, 4, 1, 3]\n amplitude = np.array([1., 2.], np.float64).reshape([2, 1, 1, 1])\n length_scale = np.array([.1, .2, .3, .4], np.float64).reshape(\n [1, 4, 1, 1])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-9], np.float64).reshape([1, 1, 1, 3])\n\n observation_index_points = (\n np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)\n\n def cholesky_fn(x):\n return tf.linalg.cholesky(\n tf.linalg.set_diag(x, tf.linalg.diag_part(x) + 1.))\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n cholesky_fn=cholesky_fn)\n batch_shape = [2, 4, 1, 3]\n event_shape = [25]\n sample_shape = [7, 2]\n\n print(stprm.batch_shape)\n print(stprm.kernel.batch_shape)\n print(stprm.kernel.schur_complement.batch_shape)\n print(stprm.kernel.schur_complement.base_kernel.batch_shape)\n\n self.assertIs(cholesky_fn, stprm.cholesky_fn)\n\n samples = stprm.sample(sample_shape, seed=test_util.test_seed())\n self.assertAllEqual(stprm.batch_shape_tensor(), batch_shape)\n self.assertAllEqual(stprm.event_shape_tensor(), event_shape)\n self.assertAllEqual(self.evaluate(samples).shape,\n sample_shape + batch_shape + event_shape)\n\n def testMeanSameAsGPRM(self):\n df = np.float64(3.)\n index_points = np.linspace(-4., 4., 5, dtype=np.float64)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n\n # Kernel with batch_shape [5, 3]\n amplitude = np.array([1., 2., 3., 4., 5.], np.float64).reshape([5, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape(\n [1, 3])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-9], np.float64).reshape([1, 3])\n\n observation_index_points = (\n np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n gprm = tfd.GaussianProcessRegressionModel(\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n\n self.assertAllClose(self.evaluate(stprm.mean()), self.evaluate(gprm.mean()))\n\n def testLogProbNearGPRM(self):\n # For large df, the log_prob calculations should be the same.\n df = np.float64(1e6)\n index_points = np.linspace(-4., 4., 5, dtype=np.float64)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n\n # Kernel with batch_shape [5, 3]\n amplitude = np.array([1., 2., 3., 4., 5.], np.float64).reshape([5, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape(\n [1, 3])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-9], np.float64).reshape([1, 3])\n\n observation_index_points = (\n np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n gprm = tfd.GaussianProcessRegressionModel(\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance)\n\n x = np.linspace(-3., 3., 25)\n\n self.assertAllClose(\n self.evaluate(stprm.log_prob(x)),\n self.evaluate(gprm.log_prob(x)), rtol=2e-5)\n\n def testMeanVarianceAndCovariancePrecomputed(self):\n amplitude = np.array([1., 2.], np.float64).reshape([2, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape([1, 3])\n observation_noise_variance = np.array([1e-9], np.float64)\n df = np.float64(3.)\n\n observation_index_points = (\n np.random.uniform(-1., 1., (1, 1, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (1, 1, 7)).astype(np.float64)\n\n index_points = np.random.uniform(-1., 1., (6, 2)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n stprm = tfd.StudentTProcessRegressionModel(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n validate_args=True)\n\n precomputed_stprm = tfd.StudentTProcessRegressionModel.precompute_regression_model(\n df=df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n validate_args=True)\n\n self.assertAllClose(self.evaluate(precomputed_stprm.covariance()),\n self.evaluate(stprm.covariance()))\n self.assertAllClose(self.evaluate(precomputed_stprm.variance()),\n self.evaluate(stprm.variance()))\n self.assertAllClose(self.evaluate(precomputed_stprm.mean()),\n self.evaluate(stprm.mean()))\n\n @test_util.disable_test_for_backend(\n disable_numpy=True, disable_jax=True,\n reason='Numpy and JAX have no notion of CompositeTensor/saved_model')\n def testPrecomputedCompositeTensor(self):\n amplitude = np.array([1., 2.], np.float64).reshape([2, 1])\n length_scale = np.array([.1, .2, .3], np.float64).reshape([1, 3])\n observation_noise_variance = np.array([1e-9], np.float64)\n\n observation_index_points = (\n np.random.uniform(-1., 1., (1, 1, 7, 2)).astype(np.float64))\n observations = np.random.uniform(-1., 1., (1, 1, 7)).astype(np.float64)\n\n index_points = np.random.uniform(-1., 1., (6, 2)).astype(np.float64)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n\n precomputed_stprm = tfd.StudentTProcessRegressionModel.precompute_regression_model(\n df=3.,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=observation_index_points,\n observations=observations,\n observation_noise_variance=observation_noise_variance,\n validate_args=True)\n\n flat = tf.nest.flatten(precomputed_stprm, expand_composites=True)\n unflat = tf.nest.pack_sequence_as(\n precomputed_stprm, flat, expand_composites=True)\n self.assertIsInstance(unflat, tfd.StudentTProcessRegressionModel)\n # Check that we don't recompute the divisor matrix on flattening /\n # unflattening.\n self.assertIs(\n precomputed_stprm.kernel.schur_complement._precomputed_divisor_matrix_cholesky, # pylint:disable=line-too-long\n unflat.kernel.schur_complement._precomputed_divisor_matrix_cholesky)\n\n # TODO(b/196219597): Enable this test once STPRM works across TF function\n # boundaries.\n # index_observations = np.random.uniform(-1., 1., (6,)).astype(np.float64)\n # @tf.function\n # def log_prob(d):\n # return d.log_prob(index_observations)\n\n # lp = self.evaluate(precomputed_stprm.log_prob(index_observations))\n\n # self.assertAllClose(lp, self.evaluate(log_prob(precomputed_stprm)))\n # self.assertAllClose(lp, self.evaluate(log_prob(unflat)))\n\n def testEmptyDataMatchesStPPrior(self):\n df = np.float64(3.5)\n amp = np.float64(.5)\n len_scale = np.float64(.2)\n index_points = np.random.uniform(-1., 1., (10, 1)).astype(np.float64)\n\n # k_xx - k_xn @ (k_nn + sigma^2) @ k_nx + sigma^2\n mean_fn = lambda x: x[:, 0]**2\n\n kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)\n stp = tfd.StudentTProcess(\n df,\n kernel,\n index_points,\n mean_fn=mean_fn,\n validate_args=True)\n\n stprm_nones = tfd.StudentTProcessRegressionModel(\n df,\n kernel=kernel,\n index_points=index_points,\n mean_fn=mean_fn,\n validate_args=True)\n\n stprm_zero_shapes = tfd.StudentTProcessRegressionModel(\n df,\n kernel=kernel,\n index_points=index_points,\n observation_index_points=tf.ones([0, 1], tf.float64),\n observations=tf.ones([0], tf.float64),\n mean_fn=mean_fn,\n validate_args=True)\n\n for stprm in [stprm_nones, stprm_zero_shapes]:\n self.assertAllClose(\n self.evaluate(stp.mean()), self.evaluate(stprm.mean()))\n self.assertAllClose(self.evaluate(stp.covariance()),\n self.evaluate(stprm.covariance()))\n self.assertAllClose(self.evaluate(stp.variance()),\n self.evaluate(stprm.variance()))\n\n observations = np.random.uniform(-1., 1., 10).astype(np.float64)\n self.assertAllClose(self.evaluate(stp.log_prob(observations)),\n self.evaluate(stprm.log_prob(observations)))\n\n def testCopy(self):\n # 5 random index points in R^2\n index_points_1 = np.random.uniform(-4., 4., (5, 2)).astype(np.float32)\n # 10 random index points in R^2\n index_points_2 = np.random.uniform(-4., 4., (10, 2)).astype(np.float32)\n\n observation_index_points_1 = (\n np.random.uniform(-4., 4., (7, 2)).astype(np.float32))\n observation_index_points_2 = (\n np.random.uniform(-4., 4., (9, 2)).astype(np.float32))\n\n observations_1 = np.random.uniform(-1., 1., 7).astype(np.float32)\n observations_2 = np.random.uniform(-1., 1., 9).astype(np.float32)\n\n # ==> shape = [6, 25, 2]\n mean_fn = lambda x: np.array([0.], np.float32)\n kernel_1 = psd_kernels.ExponentiatedQuadratic()\n kernel_2 = psd_kernels.ExpSinSquared()\n\n stprm1 = tfd.StudentTProcessRegressionModel(\n df=5.,\n kernel=kernel_1,\n index_points=index_points_1,\n observation_index_points=observation_index_points_1,\n observations=observations_1,\n mean_fn=mean_fn,\n validate_args=True)\n stprm2 = stprm1.copy(\n kernel=kernel_2,\n index_points=index_points_2,\n observation_index_points=observation_index_points_2,\n observations=observations_2)\n\n precomputed_stprm1 = (\n tfd.StudentTProcessRegressionModel.precompute_regression_model(\n df=5.,\n kernel=kernel_1,\n index_points=index_points_1,\n observation_index_points=observation_index_points_1,\n observations=observations_1,\n mean_fn=mean_fn,\n validate_args=True))\n precomputed_stprm2 = precomputed_stprm1.copy(index_points=index_points_2)\n self.assertIs(precomputed_stprm1.mean_fn, precomputed_stprm2.mean_fn)\n self.assertIs(precomputed_stprm1.kernel, precomputed_stprm2.kernel)\n\n event_shape_1 = [5]\n event_shape_2 = [10]\n\n self.assertIsInstance(stprm1.kernel.schur_complement.base_kernel,\n psd_kernels.ExponentiatedQuadratic)\n self.assertIsInstance(stprm2.kernel.schur_complement.base_kernel,\n psd_kernels.ExpSinSquared)\n self.assertAllEqual(self.evaluate(stprm1.batch_shape_tensor()),\n self.evaluate(stprm2.batch_shape_tensor()))\n self.assertAllEqual(self.evaluate(stprm1.event_shape_tensor()),\n event_shape_1)\n self.assertAllEqual(self.evaluate(stprm2.event_shape_tensor()),\n event_shape_2)\n self.assertAllEqual(self.evaluate(stprm1.index_points), index_points_1)\n self.assertAllEqual(self.evaluate(stprm2.index_points), index_points_2)\n\n\nif __name__ == '__main__':\n test_util.main()\n"
] | [
[
"tensorflow.compat.v2.function",
"tensorflow.compat.v2.linalg.matmul",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.einsum",
"tensorflow.compat.v2.linspace",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.linalg.matvec",
"tensorflow.compat.v2.linalg.diag_part"
],
[
"numpy.random.uniform",
"numpy.reshape",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.ones",
"numpy.array",
"numpy.meshgrid",
"numpy.linspace",
"numpy.float64",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.linalg.diag_part"
]
] |
andybi7676/s3prl | [
"0e5acc5d499a629f946d561d87e8924ba3eb004b"
] | [
"s3prl/downstream/voxceleb1/expert.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ expert.py ]\n# Synopsis [ the phone linear downstream wrapper ]\n# Author [ S3PRL ]\n# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\n\n###############\n# IMPORTATION #\n###############\nimport os\nimport math\nimport torch\nimport random\nimport pathlib\n#-------------#\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, DistributedSampler\nfrom torch.distributed import is_initialized\nfrom torch.nn.utils.rnn import pad_sequence\n#-------------#\nfrom ..model import *\nfrom .dataset import SpeakerClassifiDataset\nfrom argparse import Namespace\nfrom pathlib import Path\n\n\nclass DownstreamExpert(nn.Module):\n \"\"\"\n Used to handle downstream-specific operations\n eg. downstream forward, metric computation, contents to log\n \"\"\"\n\n def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):\n super(DownstreamExpert, self).__init__()\n self.upstream_dim = upstream_dim\n self.downstream = downstream_expert\n self.datarc = downstream_expert['datarc']\n self.modelrc = downstream_expert['modelrc']\n\n root_dir = Path(self.datarc['file_path'])\n\n self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])\n self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])\n self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])\n \n model_cls = eval(self.modelrc['select'])\n model_conf = self.modelrc.get(self.modelrc['select'], {})\n self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])\n self.model = model_cls(\n input_dim = self.modelrc['projector_dim'],\n output_dim = self.train_dataset.speaker_num,\n **model_conf,\n )\n self.objective = nn.CrossEntropyLoss()\n \n self.logging = os.path.join(expdir, 'log.log')\n self.register_buffer('best_score', torch.zeros(1))\n\n def _get_train_dataloader(self, dataset):\n sampler = DistributedSampler(dataset) if is_initialized() else None\n return DataLoader(\n dataset, batch_size=self.datarc['train_batch_size'], \n shuffle=(sampler is None), sampler=sampler,\n num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n def _get_eval_dataloader(self, dataset):\n return DataLoader(\n dataset, batch_size=self.datarc['eval_batch_size'],\n shuffle=False, num_workers=self.datarc['num_workers'],\n collate_fn=dataset.collate_fn\n )\n\n def get_train_dataloader(self):\n return self._get_train_dataloader(self.train_dataset)\n\n def get_dev_dataloader(self):\n return self._get_eval_dataloader(self.dev_dataset)\n\n def get_test_dataloader(self):\n return self._get_eval_dataloader(self.test_dataset)\n\n # Interface\n def get_dataloader(self, mode):\n return eval(f'self.get_{mode}_dataloader')()\n\n # Interface\n def forward(self, mode, features, labels, records, **kwargs):\n device = features[0].device\n features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)\n features = pad_sequence(features, batch_first=True)\n features = self.projector(features)\n predicted, _ = self.model(features, features_len)\n\n labels = torch.LongTensor(labels).to(features.device)\n loss = self.objective(predicted, labels)\n\n predicted_classid = predicted.max(dim=-1).indices\n records['acc'] += (predicted_classid == labels).view(-1).cpu().float().tolist()\n records['loss'].append(loss.item())\n\n return loss\n\n # interface\n def log_records(self, mode, records, logger, global_step, **kwargs):\n save_names = []\n for key, values in records.items():\n average = torch.FloatTensor(values).mean().item()\n logger.add_scalar(\n f'voxceleb1/{mode}-{key}',\n average,\n global_step=global_step\n )\n with open(self.logging, 'a') as f:\n if key == 'acc':\n f.write(f'{mode} at step {global_step}: {average}\\n')\n if mode == 'dev' and average > self.best_score:\n self.best_score = torch.ones(1) * average\n f.write(f'New best on {mode} at step {global_step}: {average}\\n')\n save_names.append(f'{mode}-best.ckpt')\n return save_names\n"
] | [
[
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"torch.nn.Linear",
"torch.ones",
"torch.utils.data.DistributedSampler",
"torch.nn.CrossEntropyLoss",
"torch.distributed.is_initialized",
"torch.zeros",
"torch.LongTensor"
]
] |
arielclj/singa-easy | [
"fd4bc601a5501062936f874df14711a3cefa1346"
] | [
"singa_easy/modules/mod_modelslicing/utils/lr_scheduler.py"
] | [
"from torch.optim.lr_scheduler import _LRScheduler\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\n\nclass GradualWarmupScheduler(_LRScheduler):\n \"\"\" Gradually warm-up(increasing) learning rate in optimizer.\n Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n multiplier: target learning rate = base lr * multiplier\n warmup_epoch: target learning rate is linearly reached at the warmup_epoch\n scheduler: scheduler used after warmup_epoch (eg. ReduceLROnPlateau)\n \"\"\"\n\n def __init__(self, optimizer, warmup_epoch, multiplier=1.0, scheduler=None):\n assert multiplier > 1., 'multiplier should be greater than 1.'\n self.multiplier = multiplier\n self.warmup_epoch = warmup_epoch\n self.scheduler = scheduler\n self.finish_warmup = False\n super().__init__(optimizer)\n\n def get_lr(self):\n if self.last_epoch > self.warmup_epoch:\n if self.scheduler:\n if not self.finish_warmup:\n self.scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]\n self.finish_warmup = True\n return self.scheduler.get_lr()\n return [base_lr * self.multiplier for base_lr in self.base_lrs]\n\n return [base_lr*((self.multiplier-1.)*self.last_epoch/self.warmup_epoch+1.) for base_lr in self.base_lrs]\n\n def step(self, epoch=None, metrics=None):\n if self.finish_warmup and self.scheduler:\n if epoch is None:\n self.scheduler.step(None)\n else:\n self.scheduler.step(epoch - self.warmup_epoch)\n else:\n return super(GradualWarmupScheduler, self).step(epoch)\n\n\nif __name__ == '__main__':\n import torch\n v = torch.zeros(10, requires_grad=True)\n optim = torch.optim.SGD([v], lr=0.01)\n\n scheduler = CosineAnnealingLR(optim, 95)\n scheduler = GradualWarmupScheduler(optim, multiplier=10, warmup_epoch=5, scheduler=scheduler)\n\n for epoch in range(0, 100):\n scheduler.step(epoch)\n print(epoch, optim.param_groups[0]['lr'])\n\n"
] | [
[
"torch.zeros",
"torch.optim.SGD",
"torch.optim.lr_scheduler.CosineAnnealingLR"
]
] |
ThomasHoppe/pyflux | [
"297f2afc2095acd97c12e827dd500e8ea5da0c0f"
] | [
"pyflux/arma/tests/test_arima_laplace.py"
] | [
"import numpy as np\nfrom pyflux.arma import ARIMA\nfrom pyflux.families import Laplace\n\nnoise = np.random.normal(0,1,100)\ndata = np.zeros(100)\n\nfor i in range(1,len(data)):\n data[i] = 0.9*data[i-1] + noise[i]\n\ndef test_no_terms():\n \"\"\"\n Tests an ARIMA model with no AR or MA terms, and that\n the latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = ARIMA(data=data, ar=0, ma=0, family=Laplace())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 2)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_couple_terms():\n \"\"\"\n Tests an ARIMA model with 1 AR and 1 MA term and that\n the latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = ARIMA(data=data, ar=1, ma=1, family=Laplace())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_couple_terms_integ():\n \"\"\"\n Tests an ARIMA model with 1 AR and 1 MA term, integrated once, and that\n the latent variable list length is correct, and that the estimated\n latent variables are not nan\n \"\"\"\n model = ARIMA(data=data, ar=1, ma=1, integ=1, family=Laplace())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)\n\ndef test_predict_length():\n \"\"\"\n Tests that the prediction dataframe length is equal to the number of steps h\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(model.predict(h=5).shape[0] == 5)\n\ndef test_predict_is_length():\n \"\"\"\n Tests that the prediction IS dataframe length is equal to the number of steps h\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(model.predict_is(h=5).shape[0] == 5)\n\ndef test_predict_nans():\n \"\"\"\n Tests that the predictions are not nans\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)\n\ndef test_predict_is_nans():\n \"\"\"\n Tests that the in-sample predictions are not nans\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)\n\ndef test_predict_nonconstant():\n \"\"\"\n We should not really have predictions that are constant (should be some difference)...\n This captures bugs with the predict function not iterating forward\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict(h=10, intervals=False)\n assert(not np.all(predictions.values==predictions.values[0]))\n \ndef test_predict_is_nonconstant():\n \"\"\"\n We should not really have predictions that are constant (should be some difference)...\n This captures bugs with the predict function not iterating forward\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict_is(h=10, intervals=False)\n assert(not np.all(predictions.values==predictions.values[0]))\n \ndef test_predict_intervals():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict(h=10, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_is_intervals():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit()\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_intervals_bbvi():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n predictions = model.predict(h=10, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_is_intervals_bbvi():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_intervals_mh():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('M-H', nsims=200, quiet_progress=True)\n predictions = model.predict(h=10, intervals=True)\n\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_predict_is_intervals_mh():\n \"\"\"\n Tests prediction intervals are ordered correctly\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('M-H', nsims=200, quiet_progress=True)\n predictions = model.predict_is(h=10, intervals=True)\n assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))\n assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))\n assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))\n assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))\n\ndef test_sample_model():\n \"\"\"\n Tests sampling function\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n sample = model.sample(nsims=100)\n assert(sample.shape[0]==100)\n assert(sample.shape[1]==len(data)-2)\n\ndef test_ppc():\n \"\"\"\n Tests PPC value\n \"\"\"\n model = ARIMA(data=data, ar=2, ma=2, family=Laplace())\n x = model.fit('BBVI', iterations=100, quiet_progress=True)\n p_value = model.ppc()\n assert(0.0 <= p_value <= 1.0)\n"
] | [
[
"numpy.zeros",
"numpy.all",
"numpy.isnan",
"numpy.random.normal",
"numpy.array"
]
] |
modichirag/21cm_cleaning | [
"1615fea4e2d617bb6ef00770a49698901227daa8"
] | [
"code/plotting/plot_evalrep.py"
] | [
"#!/usr/bin/env python3\n#\n# Plots the power spectra and Fourier-space biases for the HI.\n#\nimport numpy as np\nimport os, sys\nimport matplotlib.pyplot as plt\nfrom pmesh.pm import ParticleMesh\nfrom scipy.interpolate import InterpolatedUnivariateSpline as ius\nfrom nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower\nfrom nbodykit.cosmology import Planck15, EHPower, Cosmology\n\nsys.path.append('../utils/')\nsys.path.append('../recon/')\nsys.path.append('../recon/cosmo4d/')\nfrom lab import mapbias as mapp\nfrom lab import report as rp\nfrom lab import dg\nfrom getbiasparams import getbias\nimport tools\n#\n\nfrom matplotlib import rc, rcParams, font_manager\nrcParams['font.family'] = 'serif'\nfsize = 12\nfontmanage = font_manager.FontProperties(family='serif', style='normal',\n size=fsize, weight='normal', stretch='normal')\nfont = {'family': fontmanage.get_family()[0],\n 'style': fontmanage.get_style(),\n 'weight': fontmanage.get_weight(),\n 'size': fontmanage.get_size(),\n }\n\nprint(font)\n\n\n#\nimport argparse\nparser = argparse.ArgumentParser()\n#parser.add_argument('-m', '--model', help='model name to use')\nparser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)\nparser.add_argument('-l', '--bs', help='boxsize', default=256, type=float)\nparser.add_argument('-n', '--nmesh', help='nmesh', default=128, type=int)\nparser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)\nparser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.01, type=float)\nargs = parser.parse_args()\n\nfigpath = './figs/'\n\nbs, nc, aa = args.bs, args.nmesh, args.aa\nzz = 1/aa- 1\nkmin = args.kmin\nang = args.angle\npm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])\nrank = pm.comm.rank\n\ndpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, kmin, ang)\ndpath += 'L%04d-N%04d/'%(bs, nc)\n\n################\ndef make_rep_plot():\n \"\"\"Does the work of making the real-space xi(r) and b(r) figure.\"\"\"\n \n\n noises = np.loadtxt('/global/u1/c/chmodi/Programs/21cm/21cm_cleaning/data/summaryHI.txt').T\n for i in range(noises[0].size):\n if noises[0][i] == np.round(1/aa-1, 2): noise = noises[3][i]\n print(noise)\n\n datap = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap')\n dataprsd = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap')\n try:\n datapup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap_up')\n dataprsdup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap_up')\n except Exception as e: print(e)\n\n fig, ax = plt.subplots(1, 2, figsize=(9, 4))\n\n def makeplot(bfit, datapp, lss, lww, cc, lbl=None):\n rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]\n ax[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc, label=lbl)\n ax[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc)\n \n\n #fits\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier/%d-0.00/'%(nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n print(bpaths)\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = datap\n lss, lww, cc, lbl = '-', 2, 'C0', 'Fid'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n \n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample1/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = datapup\n lss, lww, cc, lbl = '-', 2, 'C1', 'Up1'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample2/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = datapup\n lss, lww, cc, lbl = '-', 2, 'C2', 'Up2'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n #rsd\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/%d-0.00/'%(nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = dataprsd\n lss, lww, cc, lbl = '--', 2, 'C0', 'rsd'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample1/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = dataprsdup\n lss, lww, cc, lbl = '--', 2, 'C1', 'rsd up'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n try:\n basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample2/%d-0.00/'%(2*nc)\n bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]\n for path in bpaths:\n if os.path.isdir(path): break\n print(path)\n bfit = mapp.Observable.load(path)\n datapp = dataprsdup\n lss, lww, cc, lbl = '--', 2, 'C2', 'rsd up2'\n makeplot(bfit, datapp, lss, lww, cc, lbl)\n print('%s done'%lbl)\n except Exception as e: print(e)\n\n\n ax[0].set_ylabel('$r_{cc}$', fontdict=font)\n ax[1].set_ylabel(r'$\\sqrt{P_{\\rm mod}/P_{hh}}$', fontdict=font)\n for axis in ax:\n axis.set_xlabel(r'$k\\quad [h\\,{\\rm Mpc}^{-1}]$', fontdict=font)\n axis.set_xscale('log')\n axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')\n axis.legend(prop=fontmanage)\n\n # Put on some more labels.\n for axis in ax:\n axis.set_xscale('log')\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n ##and finish\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n if rank == 0: plt.savefig(figpath + '/rep_L%04d_%04d.pdf'%(bs, aa*10000))\n\n\n\n################\n\n\nif __name__==\"__main__\":\n make_rep_plot()\n #\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.font_manager.FontProperties",
"numpy.round",
"numpy.loadtxt"
]
] |
ashwanikumar04/ml-recommendation-engine | [
"57a7c0d5ac073b976e40c17d8892a4b7291d08ed"
] | [
"matrix_factorization/mf_keras.py"
] | [
"import pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Embedding, Dot, Add, Flatten\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.optimizers import Adam\n\n# df = pd.read_csv(\"./data/processed_rating.csv\")\n\n# N = df[\"user_idx\"].max() + 1\n# M = df[\"isbn_idx\"].max() + 1\n\n# df = shuffle(df)\n\n# cut_off = int(0.8 * len(df))\n\n# df_train = df.iloc[:cut_off]\n# df_test = df.iloc[cut_off:]\n\n# K = 15\n\n# mu = df_train[\"Book-Rating\"].mean()\n# epochs = 15\n# reg_penalty = 0.0\n\n# u = Input(shape=(1, ))\n# b = Input(shape=(1, ))\n\n# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)\n# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)\n\n# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)\n# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)\n\n# x = Dot(axes=2)([u_embedding, b_embedding])\n\n# x = Add()([x, u_bias, b_bias])\n# x = Flatten()(x)\n\n# model = Model(inputs=[u, b], outputs=x)\n# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=[\"mse\"])\n\n# r = model.fit(\n# x=[df_train[\"user_idx\"].values, df_train[\"isbn_idx\"].values],\n# y=df_train[\"Book-Rating\"].values - mu,\n# epochs=epochs,\n# batch_size=128,\n# validation_data=([df_test[\"user_idx\"].values,\n# df_test[\"isbn_idx\"].values], df_test[\"Book-Rating\"].values - mu))\n\n# plt.plot(r.history['loss'], label=\"train loss\")\n# plt.plot(r.history['val_loss'], label=\"test loss\")\n# plt.legend()\n# plt.show()\n\ndf = pd.read_csv(\"./data/archive/ratings.csv\")\n\n# N = len(set(df[\"user_id\"].values)) + 1\n# M = len(set(df[\"book_id\"].values)) + 1\n\n# df = shuffle(df)\n\n# cut_off = int(0.8 * len(df))\n\n# df_train = df.iloc[:cut_off]\n# df_test = df.iloc[cut_off:]\n\n# K = 15\n\n# mu = df_train[\"rating\"].mean()\n# epochs = 15\n# reg_penalty = 0.0\n\n# u = Input(shape=(1, ))\n# b = Input(shape=(1, ))\n\n# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)\n# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)\n\n# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)\n# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)\n\n# x = Dot(axes=2)([u_embedding, b_embedding])\n\n# x = Add()([x, u_bias, b_bias])\n# x = Flatten()(x)\n\n# model = Model(inputs=[u, b], outputs=x)\n# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=[\"mse\"])\n\n# r = model.fit(x=[df_train[\"user_id\"].values, df_train[\"book_id\"].values],\n# y=df_train[\"rating\"].values - mu,\n# epochs=epochs,\n# batch_size=128,\n# validation_data=([\n# df_test[\"user_id\"].values, df_test[\"book_id\"].values\n# ], df_test[\"rating\"].values - mu))\n\n# model.save('regression_model.h5')\n# plt.plot(r.history['loss'], label=\"train loss\")\n# plt.plot(r.history['val_loss'], label=\"test loss\")\n# plt.legend()\n# plt.show()\n\n\ndef predict(user_id):\n model = keras.models.load_model('regression_model.h5')\n book_data = np.array(list(set(df.book_id)))\n user = np.array([user_id for i in range(len(book_data))])\n predictions = model.predict([user, book_data])\n predictions = np.array([a[0] for a in predictions])\n recommended_book_ids = (-predictions).argsort()[:5]\n print(recommended_book_ids)\n print(predictions[recommended_book_ids])\n\npredict(1)\n"
] | [
[
"pandas.read_csv",
"numpy.array",
"tensorflow.keras.models.load_model"
]
] |
RandolphVI/Question-Difficulty-Prediction | [
"77b4b83b5bc747c5074926d7a37545a5d46ed343"
] | [
"TF/TARNN/test_tarnn.py"
] | [
"# -*- coding:utf-8 -*-\n__author__ = 'Randolph'\n\nimport os\nimport sys\nimport time\nimport logging\n\nsys.path.append('../')\nlogging.getLogger('tensorflow').disabled = True\n\nimport tensorflow as tf\nfrom utils import checkmate as cm\nfrom utils import data_helpers as dh\nfrom utils import param_parser as parser\nfrom sklearn.metrics import mean_squared_error, r2_score\n\nargs = parser.parameter_parser()\nMODEL = dh.get_model_name()\nlogger = dh.logger_fn(\"tflog\", \"logs/Test-{0}.log\".format(time.asctime()))\n\nCPT_DIR = 'runs/' + MODEL + '/checkpoints/'\nBEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'\nSAVE_DIR = 'output/' + MODEL\n\n\ndef test_tarnn():\n \"\"\"Test TARNN model.\"\"\"\n # Print parameters used for the model\n dh.tab_printer(args, logger)\n\n # Load data\n logger.info(\"Loading data...\")\n logger.info(\"Data processing...\")\n test_data = dh.load_data_and_labels(args.test_file, args.word2vec_file, data_aug_flag=False)\n\n logger.info(\"Data padding...\")\n x_test_content, x_test_question, x_test_option, y_test = dh.pad_data(test_data, args.pad_seq_len)\n\n # Load tarnn model\n OPTION = dh.option(pattern=1)\n if OPTION == 'B':\n logger.info(\"Loading best model...\")\n checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)\n else:\n logger.info(\"Loading latest model...\")\n checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)\n logger.info(checkpoint_file)\n\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=args.allow_soft_placement,\n log_device_placement=args.log_device_placement)\n session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{0}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n input_x_content = graph.get_operation_by_name(\"input_x_content\").outputs[0]\n input_x_question = graph.get_operation_by_name(\"input_x_question\").outputs[0]\n input_x_option = graph.get_operation_by_name(\"input_x_option\").outputs[0]\n input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n is_training = graph.get_operation_by_name(\"is_training\").outputs[0]\n\n # Tensors we want to evaluate\n scores = graph.get_operation_by_name(\"output/scores\").outputs[0]\n loss = graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n # Split the output nodes name by '|' if you have several output nodes\n output_node_names = \"output/scores\"\n\n # Save the .pb model file\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,\n output_node_names.split(\"|\"))\n tf.train.write_graph(output_graph_def, \"graph\", \"graph-tarnn-{0}.pb\".format(MODEL), as_text=False)\n\n # Generate batches for one epoch\n batches = dh.batch_iter(list(zip(x_test_content, x_test_question, x_test_option, y_test)),\n args.batch_size, 1, shuffle=False)\n\n test_counter, test_loss = 0, 0.0\n\n # Collect the predictions here\n true_labels = []\n predicted_scores = []\n\n for batch_test in batches:\n x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_test)\n feed_dict = {\n input_x_content: x_batch_content,\n input_x_question: x_batch_question,\n input_x_option: x_batch_option,\n input_y: y_batch,\n dropout_keep_prob: 1.0,\n is_training: False\n }\n batch_scores, cur_loss = sess.run([scores, loss], feed_dict)\n\n # Prepare for calculating metrics\n for i in y_batch:\n true_labels.append(i)\n for j in batch_scores:\n predicted_scores.append(j)\n\n test_loss = test_loss + cur_loss\n test_counter = test_counter + 1\n\n # Calculate PCC & DOA\n pcc, doa = dh.evaluation(true_labels, predicted_scores)\n # Calculate RMSE\n rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5\n r2 = r2_score(true_labels, predicted_scores)\n\n test_loss = float(test_loss / test_counter)\n\n logger.info(\"All Test Dataset: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}\"\n .format(test_loss, pcc, doa, rmse, r2))\n\n # Save the prediction result\n if not os.path.exists(SAVE_DIR):\n os.makedirs(SAVE_DIR)\n dh.create_prediction_file(output_file=SAVE_DIR + \"/predictions.json\", all_id=test_data.id,\n all_labels=true_labels, all_predict_scores=predicted_scores)\n\n logger.info(\"All Done.\")\n\n\nif __name__ == '__main__':\n test_tarnn()\n"
] | [
[
"sklearn.metrics.mean_squared_error",
"tensorflow.Graph",
"tensorflow.train.latest_checkpoint",
"tensorflow.Session",
"sklearn.metrics.r2_score",
"tensorflow.ConfigProto"
]
] |
Ambattz/Intelligent_Traffic_Management_System | [
"51c3100ddb3479538d8a6accbcc0ea9f751481a7"
] | [
"test_model_images.py"
] | [
"import numpy as np\r\nimport os\r\nimport six.moves.urllib as urllib\r\nimport sys\r\nimport tarfile\r\nimport tensorflow.compat.v1 as tf\r\ntf.disable_v2_behavior()\r\nimport zipfile\r\n\r\nfrom collections import defaultdict\r\nfrom io import StringIO\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\n\r\nfrom object_detection.utils import label_map_util\r\n\r\nfrom object_detection.utils import visualization_utils as vis_util\r\n\r\n# This is needed since the notebook is stored in the object_detection folder.\r\nsys.path.append(\"..\")\r\n\r\n\r\n# script repurposed from sentdex's edits and TensorFlow's example script. Pretty messy as not all unnecessary\r\n# parts of the original have been removed\r\n\r\n\r\n\r\n\r\n# # Model preparation\r\n\r\n# ## Variables\r\n#\r\n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.\r\n#\r\n# By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\r\n\r\n\r\n\r\n# What model to download.\r\nMODEL_NAME = 'trained_model' # change to whatever folder has the new graph\r\n# MODEL_FILE = MODEL_NAME + '.tar.gz' # these lines not needed as we are using our own model\r\n# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\r\n\r\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\r\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\r\n\r\n# List of the strings that is used to add correct label for each box.\r\nPATH_TO_LABELS = os.path.join('training', 'label.pbtxt') # our labels are in training/object-detection.pbkt\r\n\r\nNUM_CLASSES = 3 # we only are using one class at the moment (mask at the time of edit)\r\n\r\n\r\n# ## Download Model\r\n\r\n\r\n# opener = urllib.request.URLopener() # we don't need to download model since we have our own\r\n# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\r\n# tar_file = tarfile.open(MODEL_FILE)\r\n# for file in tar_file.getmembers():\r\n# file_name = os.path.basename(file.name)\r\n# if 'frozen_inference_graph.pb' in file_name:\r\n# tar_file.extract(file, os.getcwd())\r\n\r\n\r\n# ## Load a (frozen) Tensorflow model into memory.\r\n\r\n\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n\r\n# ## Loading label map\r\n# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\r\n\r\n# In[7]:\r\n\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n\r\n\r\n\r\ndef load_image_into_numpy_array(image):\r\n (im_width, im_height) = image.size\r\n return np.array(image.getdata()).reshape(\r\n (im_height, im_width, 3)).astype(np.uint8)\r\n\r\n\r\n\r\n\r\n# For the sake of simplicity we will use only 2 images:\r\n# image1.jpg\r\n# image2.jpg\r\n# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\r\nPATH_TO_TEST_IMAGES_DIR = 'test'\r\nTEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(0, 60)] # adjust range for # of images in folder\r\n\r\n# Size, in inches, of the output images.\r\nIMAGE_SIZE = (12, 8)\r\n\r\n\r\nwith detection_graph.as_default():\r\n with tf.Session(graph=detection_graph) as sess:\r\n i = 0\r\n for image_path in TEST_IMAGE_PATHS:\r\n image = Image.open(image_path)\r\n # the array based representation of the image will be used later in order to prepare the\r\n # result image with boxes and labels on it.\r\n image_np = load_image_into_numpy_array(image)\r\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\r\n image_np_expanded = np.expand_dims(image_np, axis=0)\r\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n # Each box represents a part of the image where a particular object was detected.\r\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n # Each score represent how level of confidence for each of the objects.\r\n # Score is shown on the result image, together with the class label.\r\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n # Actual detection.\r\n (boxes, scores, classes, num_detections) = sess.run(\r\n [boxes, scores, classes, num_detections],\r\n feed_dict={image_tensor: image_np_expanded})\r\n # Visualization of the results of a detection.\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8)\r\n\r\n plt.figure(figsize=IMAGE_SIZE)\r\n plt.imshow(image_np) # matplotlib is configured for command line only so we save the outputs instead\r\n plt.savefig(\"outputs/detection_output{}.png\".format(i)) # create an outputs folder for the images to be saved\r\n i = i+1 # this was a quick fix for iteration, create a pull request if you'd like\r\n"
] | [
[
"tensorflow.compat.v1.Session",
"numpy.squeeze",
"matplotlib.pyplot.figure",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.compat.v1.import_graph_def",
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"tensorflow.compat.v1.GraphDef",
"tensorflow.compat.v1.disable_v2_behavior"
]
] |
intel-analytics/WorldBankPoC | [
"49c19268601ff1aa7e396ddc5a8a23abfe73880e"
] | [
"vegnoveg/vegnonveg-fulltraining-nnframe.py"
] | [
"# Databricks notebook source\n\nimport pandas as pd\nfrom os import listdir\nfrom os.path import join, basename\nimport struct\nimport pickle\nimport json\nimport os\nfrom scipy import misc\nimport datetime as dt\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import udf\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n# import matplotlib.pyplot as plt\n# %matplotlib inline\n\n# COMMAND ----------\n\n# %pylab inline\nfrom bigdl.nn.layer import *\nfrom bigdl.nn.criterion import *\nfrom bigdl.optim.optimizer import *\nfrom bigdl.util.common import *\nfrom bigdl.dataset.transformer import *\nfrom bigdl.dataset import mnist\nfrom bigdl.transform.vision.image import *\nfrom zoo.pipeline.nnframes.nn_image_reader import *\nfrom zoo.pipeline.nnframes.nn_image_transformer import *\nfrom zoo.pipeline.nnframes.nn_classifier import *\nfrom zoo.common.nncontext import *\nimport urllib\n\n\n# COMMAND ----------\n\n\ndef scala_T(input_T):\n \"\"\"\n Helper function for building Inception layers. Transforms a list of numbers to a dictionary with ascending keys \n and 0 appended to the front. Ignores dictionary inputs. \n \n :param input_T: either list or dict\n :return: dictionary with ascending keys and 0 appended to front {0: 0, 1: realdata_1, 2: realdata_2, ...}\n \"\"\" \n if type(input_T) is list:\n # insert 0 into first index spot, such that the real data starts from index 1\n temp = [0]\n temp.extend(input_T)\n return dict(enumerate(temp))\n # if dictionary, return it back\n return input_T\n\n# COMMAND ----------\n\ndef Inception_Layer_v1(input_size, config, name_prefix=\"\"):\n \"\"\"\n Builds the inception-v1 submodule, a local network, that is stacked in the entire architecture when building\n the full model. \n \n :param input_size: dimensions of input coming into the local network\n :param config: ?\n :param name_prefix: string naming the layers of the particular local network\n :return: concat container object with all of the Sequential layers' ouput concatenated depthwise\n \"\"\" \n \n '''\n Concat is a container who concatenates the output of it's submodules along the provided dimension: all submodules \n take the same inputs, and their output is concatenated.\n '''\n concat = Concat(2)\n \n \"\"\"\n In the above code, we first create a container Sequential. Then add the layers into the container one by one. The \n order of the layers in the model is same with the insertion order. \n \n \"\"\"\n conv1 = Sequential()\n \n #Adding layes to the conv1 model we jus created\n \n #SpatialConvolution is a module that applies a 2D convolution over an input image.\n conv1.add(SpatialConvolution(input_size, config[1][1], 1, 1, 1, 1).set_name(name_prefix + \"1x1\"))\n conv1.add(ReLU(True).set_name(name_prefix + \"relu_1x1\"))\n concat.add(conv1)\n \n conv3 = Sequential()\n conv3.add(SpatialConvolution(input_size, config[2][1], 1, 1, 1, 1).set_name(name_prefix + \"3x3_reduce\"))\n conv3.add(ReLU(True).set_name(name_prefix + \"relu_3x3_reduce\"))\n conv3.add(SpatialConvolution(config[2][1], config[2][2], 3, 3, 1, 1, 1, 1).set_name(name_prefix + \"3x3\"))\n conv3.add(ReLU(True).set_name(name_prefix + \"relu_3x3\"))\n concat.add(conv3)\n \n \n conv5 = Sequential()\n conv5.add(SpatialConvolution(input_size,config[3][1], 1, 1, 1, 1).set_name(name_prefix + \"5x5_reduce\"))\n conv5.add(ReLU(True).set_name(name_prefix + \"relu_5x5_reduce\"))\n conv5.add(SpatialConvolution(config[3][1], config[3][2], 5, 5, 1, 1, 2, 2).set_name(name_prefix + \"5x5\"))\n conv5.add(ReLU(True).set_name(name_prefix + \"relu_5x5\"))\n concat.add(conv5)\n \n \n pool = Sequential()\n pool.add(SpatialMaxPooling(3, 3, 1, 1, 1, 1, to_ceil=True).set_name(name_prefix + \"pool\"))\n pool.add(SpatialConvolution(input_size, config[4][1], 1, 1, 1, 1).set_name(name_prefix + \"pool_proj\"))\n pool.add(ReLU(True).set_name(name_prefix + \"relu_pool_proj\"))\n concat.add(pool).set_name(name_prefix + \"output\")\n return concat\n\n# COMMAND ----------\n\ndef Inception_v1(class_num):\n model = Sequential()\n model.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False).set_name(\"conv1/7x7_s2\"))\n model.add(ReLU(True).set_name(\"conv1/relu_7x7\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name(\"pool1/3x3_s2\"))\n model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name(\"pool1/norm1\"))\n model.add(SpatialConvolution(64, 64, 1, 1, 1, 1).set_name(\"conv2/3x3_reduce\"))\n model.add(ReLU(True).set_name(\"conv2/relu_3x3_reduce\"))\n model.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1).set_name(\"conv2/3x3\"))\n model.add(ReLU(True).set_name(\"conv2/relu_3x3\"))\n model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name(\"conv2/norm2\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name(\"pool2/3x3_s2\"))\n model.add(Inception_Layer_v1(192, scala_T([scala_T([64]), scala_T(\n [96, 128]), scala_T([16, 32]), scala_T([32])]), \"inception_3a/\"))\n model.add(Inception_Layer_v1(256, scala_T([scala_T([128]), scala_T(\n [128, 192]), scala_T([32, 96]), scala_T([64])]), \"inception_3b/\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))\n model.add(Inception_Layer_v1(480, scala_T([scala_T([192]), scala_T(\n [96, 208]), scala_T([16, 48]), scala_T([64])]), \"inception_4a/\"))\n model.add(Inception_Layer_v1(512, scala_T([scala_T([160]), scala_T(\n [112, 224]), scala_T([24, 64]), scala_T([64])]), \"inception_4b/\"))\n model.add(Inception_Layer_v1(512, scala_T([scala_T([128]), scala_T(\n [128, 256]), scala_T([24, 64]), scala_T([64])]), \"inception_4c/\"))\n model.add(Inception_Layer_v1(512, scala_T([scala_T([112]), scala_T(\n [144, 288]), scala_T([32, 64]), scala_T([64])]), \"inception_4d/\"))\n model.add(Inception_Layer_v1(528, scala_T([scala_T([256]), scala_T(\n [160, 320]), scala_T([32, 128]), scala_T([128])]), \"inception_4e/\"))\n model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))\n model.add(Inception_Layer_v1(832, scala_T([scala_T([256]), scala_T(\n [160, 320]), scala_T([32, 128]), scala_T([128])]), \"inception_5a/\"))\n model.add(Inception_Layer_v1(832, scala_T([scala_T([384]), scala_T(\n [192, 384]), scala_T([48, 128]), scala_T([128])]), \"inception_5b/\"))\n model.add(SpatialAveragePooling(7, 7, 1, 1).set_name(\"pool5/7x7_s1\"))\n model.add(Dropout(0.4).set_name(\"pool5/drop_7x7_s1\"))\n model.add(View([1024], num_input_dims=3))\n model.add(Linear(1024, class_num).set_name(\"loss3/classifier\"))\n model.add(LogSoftMax().set_name(\"loss3/loss3\"))\n model.reset()\n return model\n\n# COMMAND ----------\n\n# MAGIC %md ## Download the images from Amazon s3\n# MAGIC \n# MAGIC Make sure you have AWS command line interface to recursively download all images in s3 folder. You can set up aws cli from this link: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html\n\n# COMMAND ----------\n\nimport urllib\nfrom os import path\nMODEL_ROOT = \"/mnt/nobigdl/few-inceptionv1\"\n# dbutils.fs.mkdirs(MODEL_ROOT)\n#local_folder = DATA_ROOT + '/vegnonveg-samples'\ncheckpoint_path = path.join(MODEL_ROOT, \"checkpoints\")\n\n# if not path.isdir(local_folder):\n# os.system('aws s3 cp --recursive s3://vegnonveg/vegnonveg-fewsamples %s' % local_folder)\n\n# COMMAND ----------\n\n# MAGIC %md ## Save images and load to Spark as BigDL ImageFrame\n# MAGIC \n# MAGIC save data to parquet files and load to spark. Add label to each image.\n\n# COMMAND ----------\n\nDATA_ROOT = \"/data/worldbank/\"\nsample_path = DATA_ROOT + 'samples/'\n# sample_path = DATA_ROOT + 'imagenet_samples/'\n# sample_path = '/mnt/nobigdl/vegnonveg-samples100/'\nlabel_path = DATA_ROOT + 'vegnonveg-samples_labels.csv'\nparquet_path = DATA_ROOT + 'sample_parquet/'\n# dbutils.fs.rm(parquet_path, True)\n\n\n\n# COMMAND ----------\nsparkConf = create_spark_conf().setMaster(\"local[2]\").setAppName(\"test_validation\")\nsc = get_spark_context(sparkConf)\nsqlContext = SQLContext(sc)\n#intializa bigdl\ninit_engine()\nredire_spark_logs()\n\n# This only runs at the first time to generate parquet files\nimage_frame = NNImageReader.readImages(sample_path, sc, minParitions=32)\n# save dataframe to parquet files\n# image_frame.write.parquet(parquet_path)\n# ImageFrame.write_parquet(sample_path, parquet_path, sc, partition_num=32)\n\n# COMMAND ----------\n\n# load parquet file into spark cluster\nimport time\nstart = time.time()\nimage_raw_DF = sqlContext.read.parquet(parquet_path)\nend = time.time()\nprint(\"Load data time is: \" + str(end-start) + \" seconds\")\n\n# COMMAND ----------\n\n# create dict from item_name to label\nlabels_csv = pd.read_csv(label_path)\nunique_labels = labels_csv['item_name'].unique().tolist()\nlabel_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))\nclass_num = len(label_dict)\n\n# COMMAND ----------\n\n# create label dataframe\nlabel_raw_DF = sqlContext.read.format(\"com.databricks.spark.csv\")\\\n .option(\"header\", \"true\")\\\n .option(\"mode\", \"DROPMALFORMED\")\\\n .load(label_path)\nget_label = udf(lambda item_name: float(label_dict[item_name]), FloatType())\nchange_name = udf(lambda uid: uid+\".jpg\", StringType())\nlabelDF = label_raw_DF.withColumn(\"label\", get_label(\"item_name\")).withColumn(\"image_name\", change_name(\"obs_uid\"))\nlabelDF.show(truncate=False)\n\n# COMMAND ----------\n\nget_name = udf(lambda row: row[0].split(\"/\")[-1], StringType())\nimageDF = image_raw_DF.withColumn(\"image_name\", get_name(\"image\"))\nimageDF.show(truncate=False)\ndataDF = imageDF.join(labelDF, \"image_name\", \"inner\").select(\"image\", \"image_name\", \"label\")\ndataDF.show(truncate=False)\n\n# COMMAND ----------\n\n# MAGIC %md ## Do Train/Test Split and preprocessing\n# MAGIC Split Train/Test split with some ratio and preprocess images.\n\n# COMMAND ----------\n\ndata = dataDF.randomSplit([0.8, 0.2], seed=10)\ntrain_image = data[0]\nval_image = data[1]\ntype(train_image)\n\n\n# COMMAND ----------\n\nIMAGE_SIZE = 224\n\ntrain_transformer = NNImageTransformer(\n Pipeline([Resize(256, 256), RandomCrop(IMAGE_SIZE, IMAGE_SIZE),\n ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),\n MatToTensor()])\n).setInputCol(\"image\").setOutputCol(\"features\")\n\ntrain_data = train_transformer.transform(train_image)\n\n\n# COMMAND ----------\n\ntrain_size = train_image.count()\n\n# COMMAND ----------\n\nprint(train_size)\n\n\n# COMMAND ----------\n\nval_transformer = NNImageTransformer(\n Pipeline([Resize(256,256),\n CenterCrop(IMAGE_SIZE, IMAGE_SIZE),\n ChannelNormalize(123.0, 117.0, 104.0, 1.0, 1.0, 1.0),\n MatToTensor(to_rgb=True)]\n )\n).setInputCol(\"image\").setOutputCol(\"features\")\n\n# COMMAND ----------\n\ntest_data = val_transformer.transform(val_image)\n\n# COMMAND ----------\n\n# MAGIC %md ## Define Model\n\n# COMMAND ----------\n\n# Network Parameters\nn_classes = len(label_dict)# item_name categories\nmodel = Inception_v1(n_classes)\n\n# COMMAND ----------\n\n# Parameters\nlearning_rate = 0.2\n# parameters for \nbatch_size = 2 #depends on dataset\nno_epochs = 1 #stop when validation accuracy doesn't improve anymore\n\n# COMMAND ----------\n\ncriterion = ClassNLLCriterion()\nclassifier = NNClassifier(model, criterion, [3,IMAGE_SIZE,IMAGE_SIZE])\\\n .setBatchSize(batch_size)\\\n .setMaxEpoch(no_epochs)\\\n .setLearningRate(learning_rate)\nstart = time.time()\ntrained_model = classifier.fit(train_data)\nend = time.time()\nprint(\"Optimization Done.\")\nprint(\"Training time is: %s seconds\" % str(end-start))\n# + dt.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# COMMAND ----------\n\nthroughput = train_size * no_epochs / (end - start)\nprint(\"Average throughput is: %s\" % str(throughput))\n\n# COMMAND ----------\n\n#predict\npredict_model = trained_model.setBatchSize(batch_size)\npredictionDF = predict_model.transform(test_data)\npredictionDF.show()\n\n# COMMAND ----------\n\nnum_preds = 1\npreds = predictionDF.select(\"label\", \"prediction\").take(num_preds)\nfor idx in range(num_preds):\n# true_label = str(map_to_label(map_groundtruth_label(truth[idx].label)))\n true_label = preds[idx][0]\n pred_label = preds[idx][1]\n print(idx + 1, ')', 'Ground Truth label: ', true_label)\n print(idx + 1, ')', 'Predicted label: ', pred_label)\n print(\"correct\" if true_label == pred_label else \"wrong\")\n\n# COMMAND ----------\n\n'''\nMeasure Test Accuracy w/Test Set\n'''\nevaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\naccuracy = evaluator.evaluate(predictionDF)\n# expected error should be less than 10%\nprint(\"Accuracy = %g \" % accuracy)\n"
] | [
[
"pandas.read_csv"
]
] |
TUM-AAS/motron | [
"2f8800d1d6e297fc4baab555ceb2d37f55841406"
] | [
"motion/components/structural.py"
] | [
"from typing import Tuple, Optional, List, Union\n\nimport torch\nfrom torch.nn import *\nimport math\n\ndef gmm(x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:\n return torch.einsum('ndo,bnd->bno', w, x)\n\n\nclass GraphLinear(Module):\n def __init__(self, in_features: int, out_features: int):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n def reset_parameters(self) -> None:\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n #stdv = 1. / math.sqrt(self.weight.size(1))\n #self.weight.data.uniform_(-stdv, stdv)\n #if self.learn_influence:\n # self.G.data.uniform_(-stdv, stdv)\n if len(self.weight.shape) == 3:\n self.weight.data[1:] = self.weight.data[0]\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input: torch.Tensor, g: Optional[torch.Tensor] = None) -> torch.Tensor:\n if g is None and self.learn_influence:\n g = torch.nn.functional.normalize(self.G, p=1., dim=1)\n #g = torch.softmax(self.G, dim=1)\n elif g is None:\n g = self.G\n w = self.weight[self.node_type_index]\n output = self.mm(input, w.transpose(-2, -1))\n if self.bias is not None:\n bias = self.bias[self.node_type_index]\n output += bias\n output = g.matmul(output)\n\n return output\n\n\nclass DynamicGraphLinear(GraphLinear):\n def __init__(self, num_node_types: int = 1, *args):\n super().__init__(*args)\n\n def forward(self, input: torch.Tensor, g: torch.Tensor = None, t: torch.Tensor = None) -> torch.Tensor:\n assert g is not None or t is not None, \"Either Graph Influence Matrix or Node Type Vector is needed\"\n if g is None:\n g = self.G[t][:, t]\n return super().forward(input, g)\n\n\n\nclass StaticGraphLinear(GraphLinear):\n def __init__(self, *args, bias: bool = True, num_nodes: int = None, graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_influence: bool = False, node_types: torch.Tensor = None, weights_per_type: bool = False):\n \"\"\"\n :param in_features: Size of each input sample\n :param out_features: Size of each output sample\n :param num_nodes: Number of nodes.\n :param graph_influence: Graph Influence Matrix\n :param learn_influence: If set to ``False``, the layer will not learn an the Graph Influence Matrix.\n :param node_types: List of Type for each node. All nodes of same type will share weights.\n Default: All nodes have unique types.\n :param weights_per_type: If set to ``False``, the layer will not learn weights for each node type.\n :param bias: If set to ``False``, the layer will not learn an additive bias.\n \"\"\"\n super().__init__(*args)\n\n self.learn_influence = learn_influence\n\n if graph_influence is not None:\n assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'\n num_nodes = graph_influence.shape[0]\n if type(graph_influence) is Parameter:\n assert learn_influence, \"Graph Influence Matrix is a Parameter, therefore it must be learnable.\"\n self.G = graph_influence\n elif learn_influence:\n self.G = Parameter(graph_influence)\n else:\n self.register_buffer('G', graph_influence)\n else:\n assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'\n eye_influence = torch.eye(num_nodes, num_nodes)\n if learn_influence:\n self.G = Parameter(eye_influence)\n else:\n self.register_buffer('G', eye_influence)\n\n if weights_per_type and node_types is None:\n node_types = torch.tensor([i for i in range(num_nodes)])\n if node_types is not None:\n num_node_types = node_types.max() + 1\n self.weight = Parameter(torch.Tensor(num_node_types, self.out_features, self.in_features))\n self.mm = gmm\n self.node_type_index = node_types\n else:\n self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))\n self.mm = torch.matmul\n self.node_type_index = None\n\n if bias:\n if node_types is not None:\n self.bias = Parameter(torch.Tensor(num_node_types, self.out_features))\n else:\n self.bias = Parameter(torch.Tensor(self.out_features))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n\n\nGraphLSTMState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]\n\nclass BN(Module):\n def __init__(self, num_nodes, num_features):\n super().__init__()\n self.num_nodes = num_nodes\n self.num_features = num_features\n self.bn = BatchNorm1d(num_nodes * num_features)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.bn(x.view(-1, self.num_nodes * self.num_features)).view(-1, self.num_nodes, self.num_features)\n\nclass LinearX(Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n return input\n\nclass StaticGraphLSTMCell_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,\n recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,\n weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):\n \"\"\"\n\n :param input_size: The number of expected features in the input `x`\n :param hidden_size: The number of features in the hidden state `h`\n :param num_nodes:\n :param dropout:\n :param recurrent_dropout:\n :param graph_influence:\n :param learn_influence:\n :param additive_graph_influence:\n :param learn_additive_graph_influence:\n :param node_types:\n :param weights_per_type:\n :param bias:\n \"\"\"\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.learn_influence = learn_influence\n self.learn_additive_graph_influence = learn_additive_graph_influence\n if graph_influence is not None:\n assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'\n num_nodes = graph_influence.shape[0]\n if type(graph_influence) is Parameter:\n assert learn_influence, \"Graph Influence Matrix is a Parameter, therefore it must be learnable.\"\n self.G = graph_influence\n elif learn_influence:\n self.G = Parameter(graph_influence)\n else:\n self.register_buffer('G', graph_influence)\n else:\n assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'\n eye_influence = torch.eye(num_nodes, num_nodes)\n if learn_influence:\n self.G = Parameter(eye_influence)\n else:\n self.register_buffer('G', eye_influence)\n\n if additive_graph_influence is not None:\n if type(additive_graph_influence) is Parameter:\n self.G_add = additive_graph_influence\n elif learn_additive_graph_influence:\n self.G_add = Parameter(additive_graph_influence)\n else:\n self.register_buffer('G_add', additive_graph_influence)\n else:\n if learn_additive_graph_influence:\n self.G_add = Parameter(torch.zeros_like(self.G))\n else:\n self.G_add = 0.\n\n if weights_per_type and node_types is None:\n node_types = torch.tensor([i for i in range(num_nodes)])\n if node_types is not None:\n num_node_types = node_types.max() + 1\n self.weight_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, hidden_size))\n self.mm = gmm\n self.register_buffer('node_type_index', node_types)\n else:\n self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))\n self.mm = torch.matmul\n self.register_buffer('node_type_index', None)\n\n if bias:\n if node_types is not None:\n self.bias_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))\n else:\n self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))\n else:\n self.bias_ih = None\n self.bias_hh = None\n\n self.clockwork = clockwork\n if clockwork:\n phase = torch.arange(0., hidden_size)\n phase = phase - phase.min()\n phase = (phase / phase.max()) * 8.\n phase += 1.\n phase = torch.floor(phase)\n self.register_buffer('phase', phase)\n else:\n phase = torch.ones(hidden_size)\n self.register_buffer('phase', phase)\n\n self.dropout = Dropout(dropout)\n self.r_dropout = Dropout(recurrent_dropout)\n\n self.num_nodes = num_nodes\n\n self.init_weights()\n\n def init_weights(self):\n stdv = 1.0 / math.sqrt(self.hidden_size)\n for weight in self.parameters():\n if weight is self.G:\n continue\n if weight is self.G_add:\n continue\n weight.data.uniform_(-stdv, stdv)\n if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:\n weight.data[1:] = weight.data[0]\n\n def forward(self, input: torch.Tensor, state: GraphLSTMState, t: int = 0) -> Tuple[torch.Tensor, GraphLSTMState]:\n hx, cx, gx = state\n if hx is None:\n hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)\n if cx is None:\n cx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)\n if gx is None and self.learn_influence:\n gx = torch.nn.functional.normalize(self.G, p=1., dim=1)\n #gx = torch.softmax(self.G, dim=1)\n elif gx is None:\n gx = self.G\n\n hx = self.r_dropout(hx)\n\n weight_ih = self.weight_ih[self.node_type_index]\n weight_hh = self.weight_hh[self.node_type_index]\n if self.bias_hh is not None:\n bias_hh = self.bias_hh[self.node_type_index]\n else:\n bias_hh = 0.\n\n c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(cx)\n\n gates = (self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) +\n self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh)\n gates = torch.matmul(gx, gates)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 2)\n\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n cellgate = torch.tanh(cellgate)\n outgate = torch.sigmoid(outgate)\n\n cy = c_mask * ((forgetgate * cx) + (ingate * cellgate)) + (1 - c_mask) * cx\n hy = outgate * torch.tanh(cy)\n\n gx = gx + self.G_add\n if self.learn_influence or self.learn_additive_graph_influence:\n gx = torch.nn.functional.normalize(gx, p=1., dim=1)\n #gx = torch.softmax(gx, dim=1)\n\n return hy, (hy, cy, gx)\n\n\nclass StaticGraphLSTM_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):\n super().__init__()\n self.layers = ModuleList([StaticGraphLSTMCell_(input_size, hidden_size, **kwargs)]\n + [StaticGraphLSTMCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])\n self.dropout = Dropout(layer_dropout)\n\n def forward(self, input: torch.Tensor, states: Optional[List[GraphLSTMState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphLSTMState]]:\n if states is None:\n n: Optional[torch.Tensor] = None\n states = [(n, n, n)] * len(self.layers)\n\n output_states: List[GraphLSTMState] = []\n output = input\n i = 0\n for rnn_layer in self.layers:\n state = states[i]\n inputs = output.unbind(1)\n outputs: List[torch.Tensor] = []\n for t, input in enumerate(inputs):\n out, state = rnn_layer(input, state, t_i+t)\n outputs += [out]\n output = torch.stack(outputs, dim=1)\n output = self.dropout(output)\n output_states += [state]\n i += 1\n return output, output_states\n\n\ndef StaticGraphLSTM(*args, **kwargs):\n return torch.jit.script(StaticGraphLSTM_(*args, **kwargs))\n\nGraphGRUState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]\n\n\nclass StaticGraphGRUCell_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,\n recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,\n learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,\n weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):\n \"\"\"\n\n :param input_size: The number of expected features in the input `x`\n :param hidden_size: The number of features in the hidden state `h`\n :param num_nodes:\n :param dropout:\n :param recurrent_dropout:\n :param graph_influence:\n :param learn_influence:\n :param additive_graph_influence:\n :param learn_additive_graph_influence:\n :param node_types:\n :param weights_per_type:\n :param bias:\n \"\"\"\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.learn_influence = learn_influence\n self.learn_additive_graph_influence = learn_additive_graph_influence\n if graph_influence is not None:\n assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'\n num_nodes = graph_influence.shape[0]\n if type(graph_influence) is Parameter:\n assert learn_influence, \"Graph Influence Matrix is a Parameter, therefore it must be learnable.\"\n self.G = graph_influence\n elif learn_influence:\n self.G = Parameter(graph_influence)\n else:\n self.register_buffer('G', graph_influence)\n else:\n assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'\n eye_influence = torch.eye(num_nodes, num_nodes)\n if learn_influence:\n self.G = Parameter(eye_influence)\n else:\n self.register_buffer('G', eye_influence)\n\n if additive_graph_influence is not None:\n if type(additive_graph_influence) is Parameter:\n self.G_add = additive_graph_influence\n elif learn_additive_graph_influence:\n self.G_add = Parameter(additive_graph_influence)\n else:\n self.register_buffer('G_add', additive_graph_influence)\n else:\n if learn_additive_graph_influence:\n self.G_add = Parameter(torch.zeros_like(self.G))\n else:\n self.G_add = 0.\n\n if weights_per_type and node_types is None:\n node_types = torch.tensor([i for i in range(num_nodes)])\n if node_types is not None:\n num_node_types = node_types.max() + 1\n self.weight_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, hidden_size))\n self.mm = gmm\n self.register_buffer('node_type_index', node_types)\n else:\n self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))\n self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))\n self.mm = torch.matmul\n self.register_buffer('node_type_index', None)\n\n if bias:\n if node_types is not None:\n self.bias_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))\n else:\n self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))\n self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))\n else:\n self.bias_ih = None\n self.bias_hh = None\n\n self.clockwork = clockwork\n if clockwork:\n phase = torch.arange(0., hidden_size)\n phase = phase - phase.min()\n phase = (phase / phase.max()) * 8.\n phase += 1.\n phase = torch.floor(phase)\n self.register_buffer('phase', phase)\n else:\n phase = torch.ones(hidden_size)\n self.register_buffer('phase', phase)\n\n self.dropout = Dropout(dropout)\n self.r_dropout = Dropout(recurrent_dropout)\n\n self.num_nodes = num_nodes\n\n self.init_weights()\n\n def init_weights(self):\n stdv = 1.0 / math.sqrt(self.hidden_size)\n for weight in self.parameters():\n if weight is self.G:\n continue\n if weight is self.G_add:\n continue\n weight.data.uniform_(-stdv, stdv)\n #if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:\n # weight.data[1:] = weight.data[0]\n\n def forward(self, input: torch.Tensor, state: GraphGRUState, t: int = 0) -> Tuple[torch.Tensor, GraphGRUState]:\n hx, gx = state\n if hx is None:\n hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)\n if gx is None and self.learn_influence:\n gx = torch.nn.functional.normalize(self.G, p=1., dim=1)\n #gx = torch.softmax(self.G, dim=1)\n elif gx is None:\n gx = self.G\n\n hx = self.r_dropout(hx)\n\n weight_ih = self.weight_ih[self.node_type_index]\n weight_hh = self.weight_hh[self.node_type_index]\n if self.bias_hh is not None:\n bias_hh = self.bias_hh[self.node_type_index]\n else:\n bias_hh = 0.\n if self.bias_ih is not None:\n bias_ih = self.bias_ih[self.node_type_index]\n else:\n bias_ih = 0.\n\n c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(hx)\n\n x_results = self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) + bias_ih\n h_results = self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh\n x_results = torch.matmul(gx, x_results)\n h_results = torch.matmul(gx, h_results)\n\n i_r, i_z, i_n = x_results.chunk(3, 2)\n h_r, h_z, h_n = h_results.chunk(3, 2)\n\n r = torch.sigmoid(i_r + h_r)\n z = torch.sigmoid(i_z + h_z)\n n = torch.tanh(i_n + r * h_n)\n\n hy = n - torch.mul(n, z) + torch.mul(z, hx)\n hy = c_mask * hy + (1 - c_mask) * hx\n\n gx = gx + self.G_add\n if self.learn_influence or self.learn_additive_graph_influence:\n gx = torch.nn.functional.normalize(gx, p=1., dim=1)\n #gx = torch.softmax(gx, dim=1)\n\n return hy, (hy, gx)\n\n\nclass StaticGraphGRU_(Module):\n def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):\n super().__init__()\n self.layers = ModuleList([StaticGraphGRUCell_(input_size, hidden_size, **kwargs)]\n + [StaticGraphGRUCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])\n self.dropout = Dropout(layer_dropout)\n\n def forward(self, input: torch.Tensor, states: Optional[List[GraphGRUState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphGRUState]]:\n if states is None:\n n: Optional[torch.Tensor] = None\n states = [(n, n)] * len(self.layers)\n\n output_states: List[GraphGRUState] = []\n output = input\n i = 0\n for rnn_layer in self.layers:\n state = states[i]\n inputs = output.unbind(1)\n outputs: List[torch.Tensor] = []\n for t, input in enumerate(inputs):\n out, state = rnn_layer(input, state, t_i+t)\n outputs += [out]\n output = torch.stack(outputs, dim=1)\n output = self.dropout(output)\n output_states += [state]\n i += 1\n return output, output_states\n\n\ndef StaticGraphGRU(*args, **kwargs):\n return torch.jit.script(StaticGraphGRU_(*args, **kwargs))\n"
] | [
[
"torch.ones",
"torch.stack",
"torch.Tensor",
"torch.nn.functional.normalize",
"torch.zeros_like",
"torch.tensor",
"torch.mul",
"torch.arange",
"torch.tanh",
"torch.zeros",
"torch.einsum",
"torch.sigmoid",
"torch.eye",
"torch.matmul",
"torch.floor"
]
] |
EternalImmortal/Real-time-emotion-classifier-mini-Xception | [
"161f295d4be511f7e4cc700399ca37c48ea81f6a"
] | [
"src/utils/preprocessor.py"
] | [
"import numpy as np\n# from scipy.misc import imread, imresize\nfrom scipy import misc\n\n\ndef preprocess_input(x, v2=True):\n x = x.astype('float32')\n x = x / 255.0\n if v2:\n x = x - 0.5\n x = x * 2.0\n return x\n\n\ndef _imread(image_name):\n return misc.imread(image_name)\n\n\ndef _imresize(image_array, size):\n return misc.imresize(image_array, size)\n\n\ndef to_categorical(integer_classes, num_classes=2):\n integer_classes = np.asarray(integer_classes, dtype='int')\n num_samples = integer_classes.shape[0]\n categorical = np.zeros((num_samples, num_classes))\n categorical[np.arange(num_samples), integer_classes] = 1\n return categorical\n"
] | [
[
"numpy.zeros",
"scipy.misc.imresize",
"numpy.asarray",
"numpy.arange",
"scipy.misc.imread"
]
] |
randommm/pytorch-lightning | [
"10e87b7b7acbbad8fc12ec5c07638ed093547ef8"
] | [
"pytorch_lightning/plugins/training_type/parallel.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, List, Optional\n\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.overrides.base import unwrap_lightning_module\nfrom pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment\nfrom pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin\nfrom pytorch_lightning.utilities import _XLA_AVAILABLE\nfrom pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp\n\n\nclass ParallelPlugin(TrainingTypePlugin, ABC):\n \"\"\" Plugin for training with multiple processes in parallel. \"\"\"\n\n def __init__(\n self,\n parallel_devices: Optional[List[torch.device]] = None,\n cluster_environment: Optional[ClusterEnvironment] = None,\n ):\n super().__init__()\n self.parallel_devices = parallel_devices\n self.cluster_environment = cluster_environment\n\n @property\n @abstractmethod\n def root_device(self) -> torch.device:\n raise NotImplementedError\n\n @property\n def on_gpu(self) -> bool:\n return self.root_device.type == \"cuda\" and torch.cuda.is_available()\n\n @property\n def on_tpu(self) -> bool:\n return self.root_device.type == \"xla\" and _XLA_AVAILABLE\n\n @property\n def lightning_module(self):\n return unwrap_lightning_module(self._model)\n\n @property\n def global_rank(self) -> int:\n return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0\n\n @property\n def local_rank(self) -> int:\n return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0\n\n @property\n def node_rank(self) -> int:\n return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0\n\n @property\n def world_size(self) -> int:\n return self.cluster_environment.world_size() if self.cluster_environment is not None else 1\n\n @property\n def is_global_zero(self) -> bool:\n return self.global_rank == 0\n\n @property\n def distributed_sampler_kwargs(self):\n distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)\n return distributed_sampler_kwargs\n\n def reconciliate_processes(self, trace: str):\n \"\"\"\n Function to re-conciliate processes on failure\n \"\"\"\n\n def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:\n \"\"\"Perform a all_gather on all processes \"\"\"\n return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)\n\n def reduce_boolean_decision(self, decision: bool) -> bool:\n decision = torch.tensor(int(decision), device=self.lightning_module.device)\n decision = self.reduce(decision, reduce_op=ReduceOp.SUM)\n decision = bool(decision == self.world_size)\n return decision\n\n @property\n def torch_distributed_backend(self):\n torch_backend = os.getenv(\"PL_TORCH_DISTRIBUTED_BACKEND\")\n if torch_backend is None:\n torch_backend = \"nccl\" if self.on_gpu else \"gloo\"\n return torch_backend\n\n @staticmethod\n def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':\n \"\"\"\n Add global batchnorm for a model spread across multiple GPUs and nodes.\n\n Override to synchronize batchnorm between specific process groups instead\n of the whole world or use a different sync_bn like `apex`'s version.\n\n Args:\n model: pointer to current :class:`LightningModule`.\n\n Return:\n LightningModule with batchnorm layers synchronized between process groups\n \"\"\"\n return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n @contextmanager\n def block_backward_sync(self):\n \"\"\"\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n Returns: context manager with sync behaviour off\n \"\"\"\n if isinstance(self.model, DistributedDataParallel):\n with self.model.no_sync():\n yield None\n else:\n yield None\n\n def teardown(self) -> None:\n # Un-reference the wrapper if any was used.\n # todo (tchaton): Add support for all plugins.\n if isinstance(self.model, DistributedDataParallel):\n self.model = self.lightning_module\n\n if self.on_gpu:\n # GPU teardown\n self.lightning_module.cpu()\n # clean up memory\n torch.cuda.empty_cache()\n"
] | [
[
"torch.cuda.empty_cache",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.cuda.is_available"
]
] |
cremerlab/useless_expression | [
"a6020674f0ae73b4cc6173de60a0ea93016ee562",
"a6020674f0ae73b4cc6173de60a0ea93016ee562",
"a6020674f0ae73b4cc6173de60a0ea93016ee562"
] | [
"code/processing/growth_rates/2021-08-14_r1_DoubleKO_acetate/analysis.py",
"code/processing/growth_rates/2021-08-12_r1_DoubleKO_acetate/processing.py",
"code/processing/growth_rates/2021-07-27_r2_SingleKO_acetate/analysis.py"
] | [
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.viz\nimport altair as alt\nimport altair_saver\nimport scipy.stats\ncolors, palette = futileprot.viz.altair_style()\n\n# Add metadata\nDATE = '2021-08-14'\nRUN_NO = 1\nSTRAINS = 'DoubleKO'\nMEDIUM = 'acetate'\n\n# Load the measurement data\ndata = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')\n\n# Perform a simplistic inference of the growth rate to get a sense of what\n# the result is.\n# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()\ndata = data[['strain', 'elapsed_time_hr', 'od_600nm']]\n\n# For each strain, infer the growth rate and compute the fit\nlayout = False\nfor g, d in data.groupby(['strain']):\n time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)\n\n # Perform the regression\n popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))\n slope, intercept, err = popt[0], popt[1], popt[-1]\n print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')\n # Compute the fit\n fit = np.exp(intercept + slope * time_range)\n fit_df = pd.DataFrame([])\n fit_df['elapsed_time_hr'] = time_range\n fit_df['od_600nm'] = fit\n\n # Generate the plot\n points = alt.Chart(\n data=d, \n width=300, \n height=150\n ).mark_point(\n color=colors['primary_blue']\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u]',\n scale=alt.Scale(type='log'))\n )\n\n fit = alt.Chart(data=fit_df,\n title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'\n ).mark_line( \n color=colors['primary_blue']\n ).encode(\n x='elapsed_time_hr:Q',\n y='od_600nm:Q'\n )\n merge = points + fit\n if layout == False:\n layout = merge\n else: \n layout &= merge\n\naltair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',\n scale_factor=2)\n# %%\n",
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.io\nimport futileprot.viz\nimport altair as alt \nimport altair_saver\ncolors, palette = futileprot.viz.altair_style()\n\n# Define experiment parameters\nDATE = '2021-08-12'\nSTRAINS = 'DoubleKO'\nMEDIUM = 'acetate'\nRUN_NO = 1\nROOT = '../../../..'\nSKIPROWS = 36 \nOD_BOUNDS = [0.03, 0.18]\n\n# Add the well identifiers\nMAP = {'GC073': ['C3', 'D3', 'E3'],\n 'GC069': ['C4', 'D4', 'E4'],\n 'GC075': ['C5', 'D5', 'E5'],\n 'GC070': ['C6', 'D6', 'E6'],\n 'GC065': ['C7', 'D7', 'E7'],\n 'GC098': ['C8', 'D8', 'E8'],\n 'GC074': ['C9', 'D9', 'E9'],\n 'GC097': ['C10', 'D10' ,'E10'],\n 'GC084': ['F3', 'F4', 'F5'],\n 'GC106': ['F6', 'F7', 'F8'],\n 'GC100': ['F9', 'F10', 'F11']} \n\n# Generate a list of all valid wells\nwells = [f'{letter}{number}' for letter in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] for number in np.arange(1,13)]\n\n# Load the data\ndata = pd.read_csv(f'{ROOT}/data/growth_rates/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}/{DATE}_r{RUN_NO}.csv', \n skiprows=SKIPROWS)\n\n# Melt and drop unnecessary stuff\nmelted = data.melt(id_vars=['Time'], var_name='well', value_name='od_600nm')\nmelted = melted.loc[melted['well'].isin(wells)]\nmelted.dropna(inplace=True)\n\n# Add strain identifier and replicates\nmelted['strain'] = 'blank'\nmelted['replicate'] = 0\nfor strain, wells in MAP.items():\n for idx, well in enumerate(wells):\n melted.loc[melted['well']==well, 'strain'] = strain\n melted.loc[melted['well']==well, 'replicate'] = idx + 1\n\n# Add information regarding date and growth medium\nmelted['growth_medium'] = MEDIUM\nmelted['date'] = DATE\nmelted['run_number'] = RUN_NO\n\n# Convert time to elapsed time\nmelted['time_sec'] = pd.to_timedelta(melted['Time'].values)\nmelted['time_sec'] = melted['time_sec'].dt.total_seconds()\nmelted['elapsed_time_hr'] = (melted['time_sec'] - melted['time_sec'].min())/3600\n\n# Drop unnecessary Time columns\nmelted.drop(columns=['Time', 'time_sec'], inplace=True)\n\n\n# Reformat blank value as average eentry per time\nmeasurement = []\nfor g, d in melted.groupby(['elapsed_time_hr']):\n d = d.copy()\n avg_blank = d[d['strain']=='blank']\n meas = d[d['strain']!='blank']\n meas['avg_blank_value'] = avg_blank['od_600nm'].mean()\n measurement.append(meas)\nmeasurement = pd.concat(measurement, sort=False)\nmeasurement.rename(columns={'strain':'identifier'}, inplace=True)\n\n# Add shorthand strain information and class identifier\nstrain_shorthand, _, strain_class = futileprot.io.standardize_strains(measurement['identifier'].values)\nmeasurement['strain'] = strain_shorthand\nmeasurement['class'] = strain_class\n\n# measurement = pd.concat(measurements, sort=False)\n# Save to disk\nmeasurement.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_measurements.csv', index=False)\n\n#%%\n# Perform the blank subtraction\nmeasurement['od_600nm_subtracted'] = measurement['od_600nm'].values - measurement['avg_blank_value'].values\n\n# Given truncation, recalculated elapsed time and save truncated data\ntrunc = []\nfor g, d in measurement.groupby(['strain', 'replicate']):\n d = d.copy()\n d = d[(d['od_600nm_subtracted'] >= OD_BOUNDS[0]) & \n (d['od_600nm_subtracted'] <= OD_BOUNDS[1])]\n d['elapsed_time_hr'] -= d['elapsed_time_hr'].min()\n trunc.append(d)\ntrunc = pd.concat(trunc, sort=False)\ntrunc = trunc[['strain', 'elapsed_time_hr', \n 'od_600nm_subtracted', 'replicate', 'growth_medium', \n 'date', 'run_number', 'identifier', 'class']]\ntrunc.rename(columns={'od_600nm_subtracted':'od_600nm',\n 'replicate':'technical_replicate'}, inplace=True)\ntrunc.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv', index=False)\n\n# %%\n# Generate a figure of all of the raw traces\nraw_traces = alt.Chart(\n data=measurement, \n width=400, \n height=200\n ).mark_line(\n point=True,\n opacity=0.75\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u.]'),\n color=alt.Color('replicate:N', title='technical replicate')\n ).facet(\n row='strain'\n )\naltair_saver.save(raw_traces, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_raw_traces.png',\n scale_factor=2)\n\n# %%\n",
"#%%\nimport numpy as np \nimport pandas as pd \nimport futileprot.viz\nimport altair as alt\nimport altair_saver\nimport scipy.stats\ncolors, palette = futileprot.viz.altair_style()\n\n# Add metadata\nDATE = '2021-07-27'\nRUN_NO = 2\nSTRAINS = 'SingleKO'\nMEDIUM = 'acetate'\n\n# Load the measurement data\ndata = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')\n\n# Perform a simplistic inference of the growth rate to get a sense of what\n# the result is.\n# data = data.groupby(['strain', 'elapsed_time_hr']).mean().reset_index()\ndata = data[['strain', 'elapsed_time_hr', 'od_600nm']]\n\n# For each strain, infer the growth rate and compute the fit\nlayout = False\nfor g, d in data.groupby(['strain']):\n time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)\n\n # Perform the regression\n popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))\n slope, intercept, err = popt[0], popt[1], popt[-1]\n print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')\n # Compute the fit\n fit = np.exp(intercept + slope * time_range)\n fit_df = pd.DataFrame([])\n fit_df['elapsed_time_hr'] = time_range\n fit_df['od_600nm'] = fit\n\n # Generate the plot\n points = alt.Chart(\n data=d, \n width=300, \n height=150\n ).mark_point(\n color=colors['primary_blue']\n ).encode(\n x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),\n y=alt.Y('od_600nm:Q', title='optical density [a.u]',\n scale=alt.Scale(type='log'))\n )\n\n fit = alt.Chart(data=fit_df,\n title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'\n ).mark_line( \n color=colors['primary_blue']\n ).encode(\n x='elapsed_time_hr:Q',\n y='od_600nm:Q'\n )\n merge = points + fit\n if layout == False:\n layout = merge\n else: \n layout &= merge\n\naltair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',\n scale_factor=2)\n# %%\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.log",
"numpy.exp"
],
[
"pandas.read_csv",
"pandas.to_timedelta",
"numpy.arange",
"pandas.concat"
],
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.log",
"numpy.exp"
]
] |
Fred159/3D-Perception | [
"a23a42dc19d0a38e48beb5e7c0725e6d14c542f3"
] | [
"sensor_stick/src/sensor_stick/features.py"
] | [
"import matplotlib.colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pcl_helper import *\n\nprint('run features.py')\n\n\ndef rgb_to_hsv(rgb_list):\n rgb_normalized = [1.0 * rgb_list[0] / 255, 1.0 * rgb_list[1] / 255, 1.0 * rgb_list[2] / 255]\n hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]\n return hsv_normalized\n\n\ndef compute_color_histograms(cloud, using_hsv=False):\n # Compute histograms for the clusters\n point_colors_list = []\n\n # Step through each point in the point cloud\n for point in pc2.read_points(cloud, skip_nans=True):\n rgb_list = float_to_rgb(point[3])\n if using_hsv:\n point_colors_list.append(rgb_to_hsv(rgb_list) * 255)\n else:\n point_colors_list.append(rgb_list)\n\n # Populate lists with color values\n channel_1_vals = []\n channel_2_vals = []\n channel_3_vals = []\n\n for color in point_colors_list:\n channel_1_vals.append(color[0])\n channel_2_vals.append(color[1])\n channel_3_vals.append(color[2])\n\n # TODO: Compute histograms\n nbins = 32\n bins_range = (0, 256)\n # TODO: Concatenate and normalize the histograms\n channel_1_hist = np.histogram(channel_1_vals, bins=nbins, range=bins_range)\n channel_2_hist = np.histogram(channel_2_vals, bins=nbins, range=bins_range)\n channel_3_hist = np.histogram(channel_3_vals, bins=nbins, range=bins_range)\n hist_features = np.concatenate((channel_1_hist[0], channel_2_hist[0], channel_3_hist[0])).astype(np.float64)\n normed_features = hist_features / np.sum(hist_features)\n # Generate random features for demo mode.\n # Replace normed_features with your feature vectorl\n # normed_features = np.random.random(96)\n # print('run normed_features finished')\n return normed_features\n\n\ndef compute_normal_histograms(normal_cloud):\n norm_x_vals = []\n norm_y_vals = []\n norm_z_vals = []\n nbins = 32\n bins_range = (-1, 1)\n\n for norm_component in pc2.read_points(normal_cloud,\n field_names=('normal_x', 'normal_y', 'normal_z'),\n skip_nans=True):\n norm_x_vals.append(norm_component[0])\n norm_y_vals.append(norm_component[1])\n norm_z_vals.append(norm_component[2])\n\n # TODO: Compute histograms of normal values (just like with color)\n norm_x_hist = np.histogram(norm_x_vals, bins=nbins, range=bins_range)\n norm_y_hist = np.histogram(norm_y_vals, bins=nbins, range=bins_range)\n norm_z_hist = np.histogram(norm_z_vals, bins=nbins, range=bins_range)\n # TODO: Concatenate and normalize the histograms\n norm_hist_features = np.concatenate((norm_x_hist[0], norm_y_hist[0], norm_z_hist[0])).astype(np.float64)\n normed_features = norm_hist_features / np.sum(norm_hist_features)\n # Generate random features for demo mode.\n # Replace normed_features with your feature vector\n # normed_feature = np.random.random(96)\n # print('run compute_normal_histograms function finished')\n return normed_features\n\n"
] | [
[
"numpy.histogram",
"numpy.sum",
"numpy.concatenate"
]
] |
Complicateddd/Complicateddd-ROITransformer | [
"2adfbf98892d569c460d100c6e2169c5fa3a9b82"
] | [
"submit.py"
] | [
"from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections\nimport mmcv\nfrom mmcv import Config\nfrom mmdet.datasets import get_dataset\nimport cv2\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nimport DOTA_devkit.polyiou as polyiou\nimport math\nimport pdb\n\ndef py_cpu_nms_poly_fast_np(dets, thresh):\n obbs = dets[:, 0:-1]\n x1 = np.min(obbs[:, 0::2], axis=1)\n y1 = np.min(obbs[:, 1::2], axis=1)\n x2 = np.max(obbs[:, 0::2], axis=1)\n y2 = np.max(obbs[:, 1::2], axis=1)\n scores = dets[:, 8]\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n polys = []\n for i in range(len(dets)):\n tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],\n dets[i][2], dets[i][3],\n dets[i][4], dets[i][5],\n dets[i][6], dets[i][7]])\n polys.append(tm_polygon)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n ovr = []\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n hbb_inter = w * h\n hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)\n h_inds = np.where(hbb_ovr > 0)[0]\n tmp_order = order[h_inds + 1]\n for j in range(tmp_order.size):\n iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])\n hbb_ovr[h_inds[j]] = iou\n\n try:\n if math.isnan(ovr[0]):\n pdb.set_trace()\n except:\n pass\n inds = np.where(hbb_ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\nclass DetectorModel():\n def __init__(self,\n config_file,\n checkpoint_file):\n # init RoITransformer\n self.config_file = config_file\n self.checkpoint_file = checkpoint_file\n self.cfg = Config.fromfile(self.config_file)\n self.data_test = self.cfg.data['test']\n self.dataset = get_dataset(self.data_test)\n # self.classnames = self.dataset.CLASSES\n self.classnames = ('1', '2', '3', '4', '5')\n\n self.model = init_detector(config_file, checkpoint_file, device='cuda:0')\n\n def inference_single(self, imagname):\n img = mmcv.imread(imagname)\n height, width, channel = img.shape\n # slide_h, slide_w = slide_size\n # hn, wn = chip_size\n # TODO: check the corner case\n # import pdb; pdb.set_trace()\n total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]\n # print(self.classnames)\n\n chip_detections = inference_detector(self.model, img)\n # nms\n for i in range(5):\n keep = py_cpu_nms_poly_fast_np(chip_detections[i], 0.1)\n chip_detections[i] = chip_detections[i][keep]\n return chip_detections\n\n def inference_single_vis(self, srcpath, dstpath):\n detections = self.inference_single(srcpath)\n print(detections)\n img = draw_poly_detections(srcpath, detections, self.classnames, scale=1, threshold=0.3)\n cv2.imwrite(dstpath, img)\n\nif __name__ == '__main__':\n import tqdm\n roitransformer = DetectorModel(r'configs/Huojianjun/faster_rcnn_RoITrans_r101x_fpn_1x_anchors_augs_augfpn.py',\n r'work_dirs/faster_rcnn_RoITrans_r101_all_aug_rote_1333_crop_rote/epoch_278.pth')\n\n # roitransformer.inference_single_vis(r'demo/48.tif',\n # r'demo/48_out.tif',\n # (1024, 1024),\n # (1024, 1024))\n\n threshold=0.0001\n class_names=('1', '2', '3', '4', '5')\n import os\n path=\"/media/ubuntu/data/huojianjun/科目四/科目四/test2\"\n file_img_name=os.listdir(path)\n\n result_file=open(\"./科目四_莘莘学子.txt\",'w')\n\n # print(file_img_name)\n count=0\n def filer(x):\n x=int(x)\n if x>1024:\n return 1024\n if x<0:\n return 0\n else:\n return x\n\n for name in tqdm.tqdm(file_img_name):\n # count+=1\n path_img=os.path.join(path,name)\n detection_result=roitransformer.inference_single(path_img)\n for j, name_cls in enumerate(class_names):\n dets = detection_result[j]\n for det in dets:\n bbox = det[:8]\n score = round(det[-1],2)\n if score < threshold:\n continue\n bbox = list(map(filer, bbox))\n # print(bbox)\n # print(score)\n # print(name_cls)\n result_file.writelines(name+\" \"+str(name_cls)+\" \"+str(score)+\" \"\n +str(bbox[0])\n +\" \"+str(bbox[1])+\" \"+str(bbox[2])+\" \"+str(bbox[3])\n +\" \"+str(bbox[4])+\" \"+str(bbox[5])+\" \"+str(bbox[6])\n +\" \"+str(bbox[7]))\n result_file.writelines(\"\\n\")\n count+=1\n # if name==\"3.tif\":\n # print(count)\n # if count==3:\n\n # break\n\n # print(path_img)\n\n"
] | [
[
"numpy.zeros",
"numpy.max",
"numpy.min",
"numpy.maximum",
"numpy.where",
"numpy.minimum"
]
] |
Isaac-JenkinsRA/Stone-Soup | [
"54c9c7dca8162dadaa58e85933cf10a0f86ce1e1"
] | [
"stonesoup/predictor/tests/test_kalman.py"
] | [
"# coding: utf-8\nimport datetime\nimport pytest\nimport numpy as np\n\nfrom ...models.transition.linear import ConstantVelocity\nfrom ...predictor.kalman import (\n KalmanPredictor, ExtendedKalmanPredictor, UnscentedKalmanPredictor,\n SqrtKalmanPredictor)\nfrom ...types.prediction import GaussianStatePrediction\nfrom ...types.state import GaussianState, SqrtGaussianState\nfrom ...types.track import Track\n\n\n@pytest.mark.parametrize(\n \"PredictorClass, transition_model, prior_mean, prior_covar\",\n [\n ( # Standard Kalman\n KalmanPredictor,\n ConstantVelocity(noise_diff_coeff=0.1),\n np.array([[-6.45], [0.7]]),\n np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n ),\n ( # Extended Kalman\n ExtendedKalmanPredictor,\n ConstantVelocity(noise_diff_coeff=0.1),\n np.array([[-6.45], [0.7]]),\n np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n ),\n ( # Unscented Kalman\n UnscentedKalmanPredictor,\n ConstantVelocity(noise_diff_coeff=0.1),\n np.array([[-6.45], [0.7]]),\n np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n )\n ],\n ids=[\"standard\", \"extended\", \"unscented\"]\n)\ndef test_kalman(PredictorClass, transition_model,\n prior_mean, prior_covar):\n\n # Define time related variables\n timestamp = datetime.datetime.now()\n timediff = 2 # 2sec\n new_timestamp = timestamp + datetime.timedelta(seconds=timediff)\n time_interval = new_timestamp - timestamp\n\n # Define prior state\n prior = GaussianState(prior_mean,\n prior_covar,\n timestamp=timestamp)\n\n transition_model_matrix = transition_model.matrix(time_interval=time_interval)\n transition_model_covar = transition_model.covar(time_interval=time_interval)\n # Calculate evaluation variables\n eval_prediction = GaussianStatePrediction(\n transition_model_matrix @ prior.mean,\n transition_model_matrix@prior.covar@transition_model_matrix.T + transition_model_covar)\n\n # Initialise a kalman predictor\n predictor = PredictorClass(transition_model=transition_model)\n\n # Perform and assert state prediction\n prediction = predictor.predict(prior=prior,\n timestamp=new_timestamp)\n\n assert np.allclose(prediction.mean,\n eval_prediction.mean, 0, atol=1.e-14)\n assert np.allclose(prediction.covar,\n eval_prediction.covar, 0, atol=1.e-14)\n assert prediction.timestamp == new_timestamp\n\n # TODO: Test with Control Model\n\n\ndef test_lru_cache():\n predictor = KalmanPredictor(ConstantVelocity(noise_diff_coeff=0))\n\n timestamp = datetime.datetime.now()\n state = GaussianState([[0.], [1.]], np.diag([1., 1.]), timestamp)\n track = Track([state])\n\n prediction_time = timestamp + datetime.timedelta(seconds=1)\n prediction1 = predictor.predict(track, prediction_time)\n assert np.array_equal(prediction1.state_vector, np.array([[1.], [1.]]))\n\n prediction2 = predictor.predict(track, prediction_time)\n assert prediction2 is prediction1\n\n track.append(GaussianState([[1.], [1.]], np.diag([1., 1.]), prediction_time))\n prediction3 = predictor.predict(track, prediction_time)\n assert prediction3 is not prediction1\n\n\ndef test_sqrt_kalman():\n # Define time related variables\n timestamp = datetime.datetime.now()\n timediff = 2 # 2sec\n new_timestamp = timestamp + datetime.timedelta(seconds=timediff)\n\n # Define prior state\n prior_mean = np.array([[-6.45], [0.7]])\n prior_covar = np.array([[4.1123, 0.0013],\n [0.0013, 0.0365]])\n prior = GaussianState(prior_mean,\n prior_covar,\n timestamp=timestamp)\n sqrt_prior_covar = np.linalg.cholesky(prior_covar)\n sqrt_prior = SqrtGaussianState(prior_mean, sqrt_prior_covar,\n timestamp=timestamp)\n\n transition_model = ConstantVelocity(noise_diff_coeff=0.1)\n\n # Initialise a kalman predictor\n predictor = KalmanPredictor(transition_model=transition_model)\n sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model)\n # Can swap out this method\n sqrt_predictor = SqrtKalmanPredictor(transition_model=transition_model, qr_method=True)\n\n # Perform and assert state prediction\n prediction = predictor.predict(prior=prior, timestamp=new_timestamp)\n sqrt_prediction = sqrt_predictor.predict(prior=sqrt_prior,\n timestamp=new_timestamp)\n\n assert np.allclose(prediction.mean, sqrt_prediction.mean, 0, atol=1.e-14)\n assert np.allclose(prediction.covar,\n sqrt_prediction.sqrt_covar@sqrt_prediction.sqrt_covar.T, 0,\n atol=1.e-14)\n assert np.allclose(prediction.covar, sqrt_prediction.covar, 0, atol=1.e-14)\n assert prediction.timestamp == sqrt_prediction.timestamp\n"
] | [
[
"numpy.array",
"numpy.allclose",
"numpy.linalg.cholesky",
"numpy.diag"
]
] |
jials/CS4243-project | [
"100d7ed1cbd379de3b2e65c16e037bf4afec0fb1"
] | [
"changeDetection.py"
] | [
"import numpy as np\nimport cv2\nimport imageMarker\n\nlucas_kanade_params = dict(\n winSize= (4, 4),\n maxLevel= 3, #level of pyramids used\n criteria= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n)\n\ndef mark_features_on_all_images(images, features_coordinates):\n marked_images = []\n marked_frame_coordinates = []\n\n last_gs_img = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)\n\n p0 = []\n for coordinate in features_coordinates:\n p0.append([coordinate,])\n p0 = np.float32(p0)\n\n mask = np.zeros_like(images[0])\n status_arr = []\n for fr in range(1, len(images)):\n marked_coordinates = []\n if images[fr] is None:\n print('change detection problematic frame', fr)\n print('len of given images', len(images))\n frame = images[fr].copy()\n gs_img = cv2.cvtColor(images[fr], cv2.COLOR_BGR2GRAY)\n\n p1, st, err = cv2.calcOpticalFlowPyrLK(last_gs_img, gs_img, p0, None, **lucas_kanade_params)\n\n status_arr.append(st)\n\n if p1 is None:\n marked_images.append(frame)\n marked_frame_coordinates.append(features_coordinates if len(images) == 1 else marked_frame_coordinates[-1])\n continue\n\n new_points = []\n for index in range(len(p1)):\n if st[index] == 1:\n new_points.append(p1[index])\n else:\n new_points.append(p0[index])\n new_points = np.array(new_points)\n\n for index, point in enumerate(new_points):\n x, y = point.ravel()\n marked_coordinates.append([x,y])\n imageMarker.mark_image_at_point(frame, int(y), int(x), 9, imageMarker.colors[index])\n marked_frame_coordinates.append(marked_coordinates)\n\n img = cv2.add(frame,mask)\n marked_images.append(img)\n\n # update last frame and point\n last_gs_img = gs_img.copy()\n p0 = new_points.reshape(-1,1,2)\n\n return marked_images, marked_frame_coordinates, status_arr\n"
] | [
[
"numpy.array",
"numpy.zeros_like",
"numpy.float32"
]
] |
zmlabe/ModelBiasesANN | [
"df28842a8594870db3282682b1261af5058af832"
] | [
"Scripts/ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestWarmthGFDL.py"
] | [
"\"\"\"\nANN for evaluating model biases, differences, and other thresholds using \nexplainable AI (add warmth/cool GFDL-CM3 model only)\n\nReference : Barnes et al. [2020, JAMES]\nAuthor : Zachary M. Labe\nDate : 20 July 2021\nVersion : 4 - subsamples random weight class (#8) for mmmean\n\"\"\"\n\n### Import packages\nimport sys\nimport math\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Dense, Activation\nfrom keras import regularizers\nfrom keras import metrics\nfrom keras import optimizers\nfrom keras.models import Sequential\nimport tensorflow.keras as keras\nimport tensorflow as tf\nimport pandas as pd\nimport random\nimport scipy.stats as stats\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport palettable.cubehelix as cm\nimport cmocean as cmocean\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport calc_Stats as dSS\nimport calc_LRPclass as LRP\nimport innvestigate\nfrom sklearn.metrics import accuracy_score\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n### Prevent tensorflow 2.+ deprecation warnings\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n### LRP param\nDEFAULT_NUM_BWO_ITERATIONS = 200\nDEFAULT_BWO_LEARNING_RATE = .001\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n###############################################################################\n###############################################################################\n###############################################################################\n### Data preliminaries \ndirectorydataLLL = '/Users/zlabe/Data/LENS/monthly'\ndirectorydataENS = '/Users/zlabe/Data/SMILE/'\ndirectorydataBB = '/Users/zlabe/Data/BEST/'\ndirectorydataEE = '/Users/zlabe/Data/ERA5/'\ndirectoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'\n###############################################################################\n###############################################################################\nmodelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',\n 'GFDL_CM3','GFDL_ESM2M','lens']\ndatasetsingle = ['SMILE']\ndataset_obs = 'ERA5BE'\nseasons = ['annual']\nvariq = 'T2M'\nreg_name = 'LowerArctic'\ntimeper = 'historical'\n###############################################################################\n###############################################################################\n# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',\n# 'GFDL_ESM2M','lens']\n# pickSMILE = ['CCCma_canesm2','MPI','lens']\npickSMILE = []\nif len(pickSMILE) >= 1:\n lenOfPicks = len(pickSMILE)\nelse:\n lenOfPicks = len(modelGCMs)\n###############################################################################\n###############################################################################\nland_only = False\nocean_only = False\nif land_only == True:\n maskNoiseClass = 'land'\nelif ocean_only == True:\n maskNoiseClass = 'ocean'\nelse:\n maskNoiseClass = 'none'\n\n###############################################################################\n###############################################################################\nrm_merid_mean = False\nrm_annual_mean = False\n###############################################################################\n###############################################################################\nrm_ensemble_mean = False\nrm_observational_mean = False\n###############################################################################\n###############################################################################\ncalculate_anomalies = False\nif calculate_anomalies == True:\n if timeper == 'historical': \n baseline = np.arange(1951,1980+1,1)\n elif timeper == 'future':\n baseline = np.arange(2021,2050+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n###############################################################################\n###############################################################################\nwindow = 0\nensTypeExperi = 'ENS'\n# shuffletype = 'TIMEENS'\n# shuffletype = 'ALLENSRAND'\n# shuffletype = 'ALLENSRANDrmmean'\nshuffletype = 'RANDGAUSS'\nsizeOfTwin = 4 # name of experiment for adding noise class #8\nif sizeOfTwin > 0:\n sizeOfTwinq = 1\nelse:\n sizeOfTwinq = sizeOfTwin\n###############################################################################\n###############################################################################\nfactorObs = 10 # factor to add to obs\n###############################################################################\n###############################################################################\nif ensTypeExperi == 'ENS':\n if window == 0:\n rm_standard_dev = False\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950+window,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020+window,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\nelif ensTypeExperi == 'GCM':\n if window == 0:\n rm_standard_dev = False\n yearsall = np.arange(1950,2019+1,1)\n ravel_modelens = False\n ravelmodeltime = False\n else:\n rm_standard_dev = True\n if timeper == 'historical': \n yearsall = np.arange(1950,2019+1,1)\n elif timeper == 'future':\n yearsall = np.arange(2020,2099+1,1)\n else:\n print(ValueError('WRONG TIMEPER!'))\n sys.exit()\n ravelmodeltime = False\n ravel_modelens = True\n###############################################################################\n###############################################################################\nnumOfEns = 16\nlensalso = True\nif len(pickSMILE) == 0:\n if modelGCMs[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nelif len(pickSMILE) != 0:\n if pickSMILE[-1] == 'RANDOM':\n randomalso = True\n else:\n randomalso = False\nlentime = len(yearsall)\n###############################################################################\n###############################################################################\nravelyearsbinary = False\nravelbinary = False\nnum_of_class = lenOfPicks + sizeOfTwinq\n###############################################################################\n###############################################################################\nlrpRule = 'z'\nnormLRP = True\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Picking experiment to save\ntypeOfAnalysis = 'issueWithExperiment'\n\n# Experiment #1\nif rm_ensemble_mean == True:\n if window > 1:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-1'\n# Experiment #2\nif rm_ensemble_mean == True:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-2'\n# Experiment #3 (raw data)\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-3'\n if variq == 'T2M':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 20 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 20 # random noise value to add/subtract from each grid point\n# Experiment #4\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-4'\n if variq == 'T2M':\n integer = 25 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 15 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #5\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-5'\n# Experiment #6\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == False:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == True:\n typeOfAnalysis = 'Experiment-6'\n# Experiment #7\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == True:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-7'\n# Experiment #8\nif rm_ensemble_mean == False:\n if window == 0:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-8'\n if variq == 'T2M':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'P':\n integer = 1 # random noise value to add/subtract from each grid point\n elif variq == 'SLP':\n integer = 5 # random noise value to add/subtract from each grid point\n# Experiment #9\nif rm_ensemble_mean == False:\n if window > 1:\n if calculate_anomalies == True:\n if rm_merid_mean == False:\n if rm_observational_mean == False:\n if rm_annual_mean == False:\n typeOfAnalysis = 'Experiment-9'\n \nprint('\\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\\n' % (typeOfAnalysis,timeper))\nif typeOfAnalysis == 'issueWithExperiment':\n sys.exit('Wrong parameters selected to analyze')\n \n### Select how to save files\nif land_only == True:\n saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelif ocean_only == True:\n saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nelse:\n saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi\nprint('*Filename == < %s >' % saveData) \n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Create sample class labels for each model for my own testing\n### Appends a twin set of classes for the random noise class \nif seasons != 'none':\n classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))\n for i in range(lenOfPicks):\n classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i) \n \n if sizeOfTwin > 0: \n ### Add random noise models\n randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)\n classesl = np.append(classesl,randomNoiseClass,axis=0)\n \n if ensTypeExperi == 'ENS':\n classeslnew = np.swapaxes(classesl,0,1)\n elif ensTypeExperi == 'GCM':\n classeslnew = classesl\n###############################################################################\n###############################################################################\n###############################################################################\n############################################################################### \n### Begin ANN and the entire script\nfor sis,singlesimulation in enumerate(datasetsingle):\n lrpsns = []\n for seas in range(len(seasons)):\n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### ANN preliminaries\n simuqq = datasetsingle[0]\n monthlychoice = seasons[seas]\n lat_bounds,lon_bounds = UT.regions(reg_name)\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',\n 'RMSE Train','RMSE Test',\n 'ridge penalty','zero mean',\n 'zero merid mean','land only?','ocean only?']) \n \n ### Define primary dataset to use\n dataset = singlesimulation\n modelType = dataset\n \n ### Whether to test and plot the results using obs data\n if dataset_obs == '20CRv3':\n year_obsall = np.arange(yearsall[sis].min(),2015+1,1)\n elif dataset_obs == 'ERA5':\n year_obsall = np.arange(1979+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1979,2019+1,1)\n elif dataset_obs == 'ERA5BE':\n year_obsall = np.arange(1950+window,2019+1,1)\n if rm_standard_dev == False:\n year_obsall = np.arange(1950,2019+1,1)\n if monthlychoice == 'DJF':\n obsyearstart = year_obsall.min()+1\n year_obs = year_obsall[1:]\n else:\n obsyearstart = year_obsall.min()\n year_obs = year_obsall\n \n ### Remove the annual mean? True to subtract it from dataset ##########\n if rm_annual_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Rove the ensemble mean? True to subtract it from dataset ##########\n if rm_ensemble_mean == True:\n directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'\n \n ### Split the data into training and testing sets? value of 1 will use all \n ### data as training\n segment_data_factor = .75\n \n ### Hiddens corresponds to the number of hidden layers the nnet will use - 0 \n ### for linear model, or a list [10, 20, 5] for multiple layers of nodes \n ### (10 nodes in first layer, 20 in second, etc); The \"loop\" part \n ### allows you to loop through multiple architectures. For example, \n ### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the \n ### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,\n ### and the next would be 3 hidden layers of 1 node each.\n \n ### Set useGPU to True to use the GPU, but only if you selected the GPU \n ### Runtime in the menu at the top of this page\n useGPU = False\n \n ### Set Cascade to True to utilize the nnet's cascade function\n cascade = False\n \n ### Plot within the training loop - may want to set to False when testing out \n ### larget sets of parameters\n plot_in_train = False\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Read in model and observational/reanalysis data\n \n def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)\n print('\\nOur dataset: ',dataset,' is shaped',data.shape)\n return datar,lats,lons\n \n def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):\n data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)\n data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,\n lat_bounds,lon_bounds)\n \n print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)\n return data_obs,lats_obs,lons_obs\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Select data to test, train on \n def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):\n \n global random_segment_seed,trainIndices,testIndices\n if random_segment_seed == None:\n random_segment_seed = int(int(np.random.randint(1, 100000)))\n np.random.seed(random_segment_seed)\n\n############################################################################### \n############################################################################### \n############################################################################### \n ###################################################################\n ### Large Ensemble experiment\n if ensTypeExperi == 'ENS':\n \n ### Flip GCM and ensemble member axes\n datanew = np.swapaxes(data,0,1)\n classeslnew = np.swapaxes(classesl,0,1)\n \n if fac < 1 :\n nrows = datanew.shape[0]\n segment_train = int(np.round(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'ensembles, testing on',segment_test)\n \n ### Picking out random ensembles\n i = 0\n trainIndices = list()\n while i < segment_train:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n trainIndices.append(line)\n i += 1\n else:\n pass\n \n i = 0\n testIndices = list()\n while i < segment_test:\n line = np.random.randint(0, nrows)\n if line not in trainIndices:\n if line not in testIndices:\n testIndices.append(line)\n i += 1\n else:\n pass\n \n ### Training segment----------\n data_train = np.empty((len(trainIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(trainIndices):\n data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytrain[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on ensembles: ',trainIndices)\n print('Testing on ensembles: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((len(testIndices),datanew.shape[1],\n datanew.shape[2],datanew.shape[3],\n datanew.shape[4]))\n Ytest = np.empty((len(testIndices),classeslnew.shape[1],\n classeslnew.shape[2]))\n for index,ensemble in enumerate(testIndices):\n data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]\n Ytest[index,:,:] = classeslnew[ensemble,:,:]\n \n ### Random ensembles are picked\n if debug:\n print('Training on ensembles: %s' % len(trainIndices))\n print('Testing on ensembles: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n\n###############################################################################\n############################################################################### \n############################################################################### \n ###################################################################\n ### GCM type experiments without ensembles\n elif ensTypeExperi == 'GCM':\n if data.ndim == 5:\n datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))\n classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))\n else:\n datanew = data\n classeslnew = classesl\n \n if fac < 1 :\n nrows = datanew.shape[1]\n segment_train = int(np.floor(nrows * fac))\n segment_test = nrows - segment_train\n print('Training on',segment_train,'years, testing on',segment_test)\n \n ### Picking out random ensembles\n firstyears = int(np.floor(segment_test/2))\n lastyears = -int(np.floor(segment_test/2))\n trainIndices = np.arange(firstyears,firstyears+segment_train,1)\n testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)\n \n ### Training segment----------\n data_train = np.empty((datanew.shape[0],len(trainIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))\n for index,ensemble in enumerate(trainIndices):\n data_train[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytrain[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('\\nTraining on years: ',trainIndices)\n print('Testing on years: ',testIndices)\n print('\\norg data - shape', datanew.shape)\n print('training data - shape', data_train.shape)\n \n ### Reshape into X and Y\n Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))\n Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))\n Xtrain_shape = (data_train.shape[0])\n \n ### Testing segment----------\n data_test = np.empty((datanew.shape[0],len(testIndices),\n datanew.shape[2],datanew.shape[3]))\n Ytest = np.empty((classeslnew.shape[0],len(testIndices)))\n for index,ensemble in enumerate(testIndices):\n data_test[:,index,:,:] = datanew[:,ensemble,:,:]\n Ytest[:,index] = classeslnew[:,ensemble]\n \n ### Random ensembles are picked\n if debug:\n print('Training on years: %s' % len(trainIndices))\n print('Testing on years: %s' % len(testIndices))\n print('\\norg data - shape', datanew.shape)\n print('testing data - shape', data_test.shape)\n \n ### Reshape into X and Y\n Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))\n Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))\n Xtest_shape = (data_test.shape[0])\n \n Xtest_shape = (data_test.shape[0], data_test.shape[1])\n data_train_shape = data_train.shape[0]\n data_test_shape = data_test.shape[0]\n \n ### 'unlock' the random seed\n np.random.seed(None)\n \n ### One-hot vectors\n Ytrain = keras.utils.to_categorical(Ytrain)\n Ytest = keras.utils.to_categorical(Ytest) \n \n ### Class weights\n class_weight = class_weight_creator(Ytrain)\n \n else:\n print(ValueError('WRONG EXPERIMENT!'))\n return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Plotting functions \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n\n ###############################################################################\n ###############################################################################\n ############################################################################### \n ### Create a class weight dictionary to help if the classes are unbalanced\n def class_weight_creator(Y):\n class_dict = {}\n weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)\n for i in range( Y.shape[-1] ):\n class_dict[i] = weights[i] \n return class_dict\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Neural Network Creation & Training \n class TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.times = []\n \n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_time_start = time.time()\n \n def on_epoch_end(self, epoch, logs={}):\n self.times.append(time.time() - self.epoch_time_start)\n \n def defineNN(hidden, input_shape, output_shape, ridgePenalty): \n \n model = Sequential()\n ### Initialize first layer\n ### Model is a single node with activation function\n model.add(Dense(hidden[0],input_shape=(input_shape,),\n activation=actFun, use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Initialize other layers\n for layer in hidden[1:]:\n model.add(Dense(layer,activation=actFun,\n use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n print('\\nTHIS IS AN ANN!\\n')\n \n #### Initialize output layer\n model.add(Dense(output_shape,activation=None,use_bias=True,\n kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),\n bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),\n kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))\n \n ### Add softmax layer at the end\n model.add(Activation('softmax'))\n \n return model\n \n def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):\n \n global lr_here, batch_size\n lr_here = 0.001\n model.compile(optimizer=optimizers.SGD(lr=lr_here,\n momentum=0.9,nesterov=True), \n loss = 'categorical_crossentropy',\n metrics=[metrics.categorical_accuracy])\n # model.compile(optimizer=optimizers.Nadam(lr=lr_here), \n # loss = 'categorical_crossentropy',\n # metrics=[metrics.categorical_accuracy])\n \n ### Declare the relevant model parameters\n batch_size = 24 \n \n print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----') \n \n ### Callbacks\n time_callback = TimeHistory()\n early_stopping = keras.callbacks.EarlyStopping(monitor='loss',\n patience=2,\n verbose=1,\n mode='auto')\n \n history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,\n shuffle=True,verbose=verbose,\n callbacks=[time_callback,early_stopping],\n validation_split=0.)\n print('******** done training ***********')\n \n return model, history\n \n def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):\n \"\"\"or loops to iterate through training iterations, ridge penalty, \n and hidden layer list\n \"\"\"\n results = {}\n global nnet,random_network_seed\n \n for niter in iterations:\n for penalty in ridge_penalty:\n for hidden in hiddens:\n \n ### Check / use random seed\n if random_network_seed == None:\n np.random.seed(None)\n random_network_seed = int(np.random.randint(1, 100000))\n np.random.seed(random_network_seed)\n random.seed(random_network_seed)\n tf.set_random_seed(0)\n \n ### Standardize the data\n Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)\n Xmean,Xstd = stdVals\n \n ### Define the model\n model = defineNN(hidden,\n input_shape=np.shape(Xtrain)[1],\n output_shape=np.shape(Ytrain)[1],\n ridgePenalty=penalty) \n \n ### Train the net\n model, history = trainNN(model,Xtrain,\n Ytrain,niter,class_weight,verbose=1)\n \n ### After training, use the network with training data to \n ### check that we don't have any errors and output RMSE\n rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))\n if type(Ytest) != bool:\n rmse_test = 0.\n rmse_test = dSS.rmse(Ytest,model.predict(Xtest))\n else:\n rmse_test = False\n \n this_result = {'iters': niter, \n 'hiddens' : hidden, \n 'RMSE Train' : rmse_train, \n 'RMSE Test' : rmse_test, \n 'ridge penalty': penalty, \n 'zero mean' : rm_annual_mean,\n 'zero merid mean' : rm_merid_mean,\n 'land only?' : land_only,\n 'ocean only?' : ocean_only,\n 'Segment Seed' : random_segment_seed,\n 'Network Seed' : random_network_seed }\n results.update(this_result)\n \n global experiment_result\n experiment_result = experiment_result.append(results,\n ignore_index=True)\n \n #if True to plot each iter's graphs.\n if plot_in_train == True:\n plt.figure()\n \n plt.subplot(1,1,1)\n plt.plot(history.history['loss'],label = 'training')\n plt.title(history.history['loss'][-1])\n plt.xlabel('epoch')\n plt.xlim(2,len(history.history['loss'])-1)\n plt.legend()\n \n plt.grid(True)\n plt.show()\n \n #'unlock' the random seed\n np.random.seed(None)\n random.seed(None)\n tf.set_random_seed(None)\n \n return experiment_result, model\n \n ###############################################################################\n ###############################################################################\n ###############################################################################\n ### Results\n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n \n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n ### Parameters\n debug = True\n NNType = 'ANN'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[10,10]]\n ridge_penalty = [0.1]\n # hiddensList = [[8,8]]\n # ridge_penalty = [0.2]\n actFun = 'relu'\n \n if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):\n debug = True\n NNType = 'ANN'\n avgHalfChunk = 0\n option4 = True\n biasBool = False\n hiddensList = [[8,8]]\n ridge_penalty = [0.10]\n actFun = 'relu'\n \n expList = [(0)] # (0,1)\n expN = np.size(expList)\n \n iterations = [100] \n random_segment = True\n foldsN = 1\n \n for avgHalfChunk in (0,): \n session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n K.clear_session()\n \n for loop in ([0]): \n ### Get info about the region\n lat_bounds,lon_bounds = UT.regions(reg_name)\n data_all,lats,lons = read_primary_dataset(variq,dataset,\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,\n dataset_obs,\n numOfEns,\n lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n for exp in expList: \n ### Get the data together\n data, data_obs, = data_all, data_obs_all,\n###############################################################################\n if len(pickSMILE) >= 1:\n data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)\n print('\\n*Pick models to analysis from %s*\\n' % pickSMILE)\n###############################################################################\n if calculate_anomalies == True:\n data, data_obs = dSS.calculate_anomalies(data,data_obs,\n lats,lons,baseline,yearsall)\n print('\\n*Calculate anomalies for %s-%s*\\n' % (baseline.min(),baseline.max()))\n############################################################################### \n if rm_annual_mean == True:\n data, data_obs = dSS.remove_annual_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed annual mean*\\n')\n############################################################################### \n if rm_merid_mean == True:\n data, data_obs = dSS.remove_merid_mean(data,data_obs,\n lats,lons,\n lats_obs,lons_obs)\n print('\\n*Removed meridional mean*\\n')\n############################################################################### \n if rm_ensemble_mean == True:\n data = dSS.remove_ensemble_mean(data,ravel_modelens,\n ravelmodeltime,\n rm_standard_dev,\n numOfEns)\n print('\\n*Removed ensemble mean*')\n############################################################################### \n if rm_standard_dev == True:\n data = dSS.rm_standard_dev(data,window,ravelmodeltime,\n numOfEns)\n print('\\n*Removed standard deviation*')\n############################################################################### \n if rm_observational_mean == True:\n data = dSS.remove_observations_mean(data,data_obs,lats,lons)\n print('\\n*Removed observational data*')\n############################################################################### \n if land_only == True:\n data, data_obs = dSS.remove_ocean(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed ocean data*')\n###############################################################################\n if ocean_only == True:\n data, data_obs = dSS.remove_land(data,data_obs,\n lat_bounds,\n lon_bounds) \n print('\\n*Removed land data*') \n###############################################################################\n ### Adding random data\n if sizeOfTwin > 0:\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)\n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n### Modify the GFDL-CM3 model for warmth and cooling that model only\n print('\\n <<< FACTOR FOR OBS IS %s! >>>\\n' % factorObs)\n if factorObs == 0:\n data = data\n elif factorObs == 1: # warm its mean state\n GFDL = data[4,:,:,:,:]\n GFDLwarmer = GFDL + 3\n data[4,:,:,:,:] = GFDLwarmer\n elif factorObs == 2: # cool its mean state\n GFDL = data[4,:,:,:,:]\n GFDLcooler = GFDL - 3\n data[4,:,:,:,:] = GFDLcooler\n elif factorObs == 3: # warm recent 10 years\n GFDL = data[4,:,:,:,:] \n GFDLbefore = GFDL[:,:-10,:,:]\n GFDLafter = GFDL[:,-10:,:,:] + 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq\n elif factorObs == 4: # cool recent 10 years\n GFDL = data[4,:,:,:,:] \n GFDLbefore = GFDL[:,:-10,:,:]\n GFDLafter = GFDL[:,-10:,:,:] - 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq \n elif factorObs == 5: # warm the North Pole\n sizeofNP = 10\n GFDL = data[4,:,:,:,:] \n warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5\n addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP\n GFDL[:,:,sizeofNP:,:] = addtoclimoNP\n data[4,:,:,:,:] = GFDL\n elif factorObs == 6: # cool the North Pole\n sizeofNP = 10\n GFDL = data[4,:,:,:,:] \n coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5\n addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP\n GFDL[:,:,sizeofNP:,:] = addtoclimoNP\n data[4,:,:,:,:] = GFDL\n elif factorObs == 7: # warm the Lower Arctic\n sizeofLA = 5\n GFDL = data[4,:,:,:,:] \n warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5\n addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA\n GFDL[:,:,:sizeofLA,:] = addtoclimoLA\n data[4,:,:,:,:] = GFDL\n elif factorObs == 8: # cool the Lower Arctic\n sizeofLA = 5\n GFDL = data[4,:,:,:,:] \n coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5\n addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA\n GFDL[:,:,:sizeofLA,:] = addtoclimoLA\n data[4,:,:,:,:] = GFDL\n elif factorObs == 9: # warm early 50 years\n GFDL = data[4,:,:,:,:] \n GFDLafter = GFDL[:,50:,:,:]\n GFDLbefore = GFDL[:,:50,:,:] + 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq\n elif factorObs == 10: # cool early 50 years\n GFDL = data[4,:,:,:,:] \n GFDLafter = GFDL[:,50:,:,:]\n GFDLbefore = GFDL[:,:50,:,:] - 3\n GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)\n data[4,:,:,:,:] = GFDLq \n \n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n\n###############################################################################\n###############################################################################\n###############################################################################\n ### Loop over folds\n for loop in np.arange(0,foldsN): \n \n K.clear_session()\n #---------------------------\n # random_segment_seed = 34515\n random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))\n #---------------------------\n Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)\n \n YtrainClassMulti = Ytrain \n YtestClassMulti = Ytest \n \n # For use later\n XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)\n Xmean, Xstd = stdVals \n \n #---------------------------\n random_network_seed = 87750\n #---------------------------\n \n # Create and train network\n exp_result,model = test_train_loopClass(Xtrain,\n YtrainClassMulti,\n Xtest,\n YtestClassMulti,\n iterations=iterations,\n ridge_penalty=ridge_penalty,\n hiddens=hiddensList,class_weight=class_weight,\n plot_in_train = True)\n model.summary() \n \n ################################################################################################################################################ \n # save the model\n dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'\n savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed) \n savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)\n \n if(reg_name=='Globe'):\n regSave = ''\n else:\n regSave = '_' + reg_name\n \n if(rm_annual_mean==True):\n savename = savename + '_AnnualMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'\n if(rm_ensemble_mean==True):\n savename = savename + '_EnsembleMeanRemoved' \n savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'\n \n savename = savename + regSave \n # model.save(dirname + savename + '.h5')\n # np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)\n \n print('saving ' + savename)\n \n ###############################################################\n ### Make final plot\n ### Get obs\n dataOBSERVATIONS = data_obs\n latsOBSERVATIONS = lats_obs\n lonsOBSERVATIONS = lons_obs\n \n Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])\n \n annType = 'class'\n if monthlychoice == 'DJF':\n startYear = yearsall[sis].min()+1\n endYear = yearsall[sis].max()\n else:\n startYear = yearsall[sis].min()\n endYear = yearsall[sis].max()\n years = np.arange(startYear,endYear+1,1) \n Xmeanobs = np.nanmean(Xobs,axis=0)\n Xstdobs = np.nanstd(Xobs,axis=0) \n \n XobsS = (Xobs-Xmeanobs)/Xstdobs\n XobsS[np.isnan(XobsS)] = 0\n \n xtrainpred = (Xtrain-Xmean)/Xstd\n xtrainpred[np.isnan(xtrainpred)] = 0\n xtestpred = (Xtest-Xmean)/Xstd\n xtestpred[np.isnan(xtestpred)] = 0\n \n if(annType=='class'):\n YpredObs = model.predict(XobsS)\n YpredTrain = model.predict(xtrainpred)\n YpredTest = model.predict(xtestpred)\n \n #######################################################\n #######################################################\n #######################################################\n ### Check null hypothesis of random data!\n randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',\n numOfEns,lensalso,\n randomalso,\n ravelyearsbinary,\n ravelbinary,\n shuffletype,\n lat_bounds,\n lon_bounds)\n randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])\n randarraymean = np.nanmean(randarrayn,axis=0)\n randarraystd = np.nanstd(randarrayn,axis=0)\n randarrayS = (randarrayn-randarraymean)/randarraystd\n \n ### Prediction on random data\n YpredRand = model.predict(randarrayS)\n #######################################################\n #######################################################\n #######################################################\n \n ### Get output from model\n trainingout = YpredTrain\n testingout = YpredTest\n \n if ensTypeExperi == 'ENS':\n classesltrain = classeslnew[trainIndices,:,:].ravel()\n classesltest = classeslnew[testIndices,:,:].ravel()\n elif ensTypeExperi == 'GCM':\n classesltrain = classeslnew[:,:,trainIndices].ravel()\n classesltest = classeslnew[:,:,testIndices].ravel()\n \n ### Random data tests\n randout = YpredRand\n labelsrand = np.argmax(randout,axis=1)\n uniquerand,countrand = np.unique(labelsrand,return_counts=True)\n np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)\n np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)\n \n ### Observations\n obsout = YpredObs\n labelsobs = np.argmax(obsout,axis=1)\n uniqueobs,countobs = np.unique(labelsobs,return_counts=True)\n print(labelsobs)\n np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)\n np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)\n \n def truelabel(data):\n \"\"\"\n Calculate argmax\n \"\"\"\n maxindexdata= np.argmax(data[:,:],axis=1) \n \n return maxindexdata\n \n def accuracyTotalTime(data_pred,data_true):\n \"\"\"\n Compute accuracy for the entire time series\n \"\"\"\n \n data_truer = data_true\n data_predr = data_pred\n accdata_pred = accuracy_score(data_truer,data_predr)\n \n return accdata_pred\n\n ##############################################################################\n ##############################################################################\n ############################################################################## \n indextrain = truelabel(trainingout)\n acctrain = accuracyTotalTime(indextrain,classesltrain)\n indextest = truelabel(testingout)\n acctest = accuracyTotalTime(indextest,classesltest)\n print('\\n\\nAccuracy Training == ',acctrain)\n print('Accuracy Testing == ',acctest)\n \n ## Save the output for plotting\n np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)\n np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)\n \n np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)\n np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)\n \n np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)\n np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)\n \n ### See more more details\n model.layers[0].get_config()\n \n ## Define variable for analysis\n print('\\n\\n------------------------')\n print(variq,'= Variable!')\n print(monthlychoice,'= Time!')\n print(reg_name,'= Region!')\n print(lat_bounds,lon_bounds)\n print(dataset,'= Model!')\n print(dataset_obs,'= Observations!\\n')\n print(rm_annual_mean,'= rm_annual_mean') \n print(rm_merid_mean,'= rm_merid_mean') \n print(rm_ensemble_mean,'= rm_ensemble_mean') \n print(land_only,'= land_only')\n print(ocean_only,'= ocean_only')\n \n ## Variables for plotting\n lons2,lats2 = np.meshgrid(lons,lats) \n observations = data_obs\n modeldata = data\n modeldatamean = np.nanmean(modeldata,axis=1)\n \n spatialmean_obs = UT.calc_weightedAve(observations,lats2)\n spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)\n spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)\n plt.figure()\n plt.plot(yearsall,spatialmean_modmean.transpose())\n plt.plot(yearsall,spatialmean_modmean.transpose()[:,4],linewidth=3,color='red',label=r'GFDL-CM3 - %s-Experiment' % factorObs)\n plt.xlabel('Years')\n plt.ylabel('Average Arctic Temperature')\n plt.legend()\n plt.ylim([-14.5,-1])\n plt.savefig('/Users/zlabe/Desktop/factor-%s.png' % factorObs,dpi=300)\n plt.figure()\n plt.plot(spatialmean_obs)\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n ## Visualizing through LRP\n numLats = lats.shape[0]\n numLons = lons.shape[0] \n numDim = 3\n\n ##############################################################################\n ##############################################################################\n ##############################################################################\n \n lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),\n np.append(Ytrain,Ytest,axis=0),\n biasBool,annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n meanlrp = np.nanmean(lrpall,axis=0)\n fig=plt.figure()\n plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)\n \n ### For training data only\n lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n ### For training data only\n lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,\n annType,num_of_class,\n yearsall,lrpRule,normLRP,\n numLats,numLons,numDim)\n \n \n ### For observations data only\n lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n\n ### For random data only\n lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,\n num_of_class,yearsall,lrpRule,\n normLRP,numLats,numLons,numDim)\n \n ##############################################################################\n ##############################################################################\n ##############################################################################\n def netcdfLRP(lats,lons,var,directory,typemodel,saveData):\n print('\\n>>> Using netcdfLRP function!')\n \n from netCDF4 import Dataset\n import numpy as np\n \n name = 'LRPMap' + typemodel + '_' + saveData + '.nc'\n filename = directory + name\n ncfile = Dataset(filename,'w',format='NETCDF4')\n ncfile.description = 'LRP maps for using selected seed' \n \n ### Dimensions\n ncfile.createDimension('years',var.shape[0])\n ncfile.createDimension('lat',var.shape[1])\n ncfile.createDimension('lon',var.shape[2])\n \n ### Variables\n years = ncfile.createVariable('years','f4',('years'))\n latitude = ncfile.createVariable('lat','f4',('lat'))\n longitude = ncfile.createVariable('lon','f4',('lon'))\n varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))\n \n ### Units\n varns.units = 'unitless relevance'\n ncfile.title = 'LRP relevance'\n ncfile.instituion = 'Colorado State University'\n ncfile.references = 'Barnes et al. [2020]'\n \n ### Data\n years[:] = np.arange(var.shape[0])\n latitude[:] = lats\n longitude[:] = lons\n varns[:] = var\n \n ncfile.close()\n print('*Completed: Created netCDF4 File!')\n \n netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)\n netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)\n netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)\n netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)"
] | [
[
"numpy.sum",
"numpy.savetxt",
"numpy.random.seed",
"numpy.size",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.plot",
"numpy.meshgrid",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.append",
"numpy.nanmean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"numpy.genfromtxt",
"matplotlib.pyplot.title",
"tensorflow.keras.initializers.RandomNormal",
"numpy.isnan",
"numpy.unique",
"matplotlib.pyplot.rc",
"numpy.zeros",
"numpy.argmax",
"numpy.arange",
"sklearn.metrics.accuracy_score",
"tensorflow.set_random_seed",
"matplotlib.pyplot.ylim",
"tensorflow.ConfigProto",
"tensorflow.keras.utils.to_categorical",
"matplotlib.pyplot.legend",
"numpy.swapaxes",
"matplotlib.pyplot.grid",
"pandas.DataFrame",
"numpy.floor",
"numpy.nanstd",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.shape",
"tensorflow.get_default_graph",
"numpy.round",
"matplotlib.pyplot.contourf",
"numpy.random.randint",
"matplotlib.pyplot.xlabel"
]
] |
fengxia41103/stock | [
"1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1"
] | [
"backend/stock/workers/get_valuation_ratio.py"
] | [
"import logging\n\nimport pandas as pd\n\nfrom stock.models import MyStock\nfrom stock.models import ValuationRatio\nfrom yahooquery import Ticker\n\nlogger = logging.getLogger(\"stock\")\n\n\nclass MyValuationRatio:\n def __init__(self, symbol):\n self.stock = MyStock.objects.get(symbol=symbol)\n\n def get(self):\n s = Ticker(self.stock.symbol, timeout=15)\n\n # all numbers convert to million\n df = s.valuation_measures\n if \"unavailable\" in df or \"error\" in df:\n logger.error(\"{}: {}\".format(self.stock.symbol, df))\n return\n\n # DB doesn't like NaN\n df = df.where(pd.notnull(df), 0)\n\n mapping = {\n \"forward_pe\": \"ForwardPeRatio\",\n \"pb\": \"PbRatio\",\n \"pe\": \"PeRatio\",\n \"peg\": \"PegRatio\",\n \"ps\": \"PsRatio\",\n }\n\n # enumerate data frame\n for row in df.itertuples(index=False):\n i, created = ValuationRatio.objects.get_or_create(\n stock=self.stock, on=row.asOfDate.date()\n )\n\n for key, val in mapping.items():\n try:\n tmp = float(getattr(row, val))\n except AttributeError:\n tmp = 0\n\n # set value\n setattr(i, key, tmp)\n i.save()\n\n # if all values are 0, discard the record\n ValuationRatio.objects.filter(\n forward_pe=0, pb=0, pe=0, peg=0, ps=0\n ).delete()\n"
] | [
[
"pandas.notnull"
]
] |
ss18/shapenet | [
"5a605bee6b2750f3a586ca9a740165e66b5dd7d8"
] | [
"shapenet/networks/utils.py"
] | [
"# author: Justus Schock (justus.schock@rwth-aachen.de)\n\nimport torch\n\n\nclass CustomGroupNorm(torch.nn.Module):\n \"\"\"\n Custom Group Norm which adds n_groups=2 as default parameter\n \"\"\"\n\n def __init__(self, n_features, n_groups=2):\n \"\"\"\n\n Parameters\n ----------\n n_features : int\n number of input features\n n_groups : int\n number of normalization groups\n \"\"\"\n super().__init__()\n self.norm = torch.nn.GroupNorm(n_groups, n_features)\n\n def forward(self, x):\n \"\"\"\n Forward batch through network\n\n Parameters\n ----------\n x : :class:`torch.Tensor`\n batch to forward\n\n Returns\n -------\n :class:`torch.Tensor`\n normalized results\n\n \"\"\"\n return self.norm(x)\n"
] | [
[
"torch.nn.GroupNorm"
]
] |
DevD1092/Retinaface_DLIB | [
"455e393f1bd688cf2d1cc41960105af9ea8a26c6"
] | [
"test_widerface.py"
] | [
"from __future__ import print_function\nimport os\nimport sys\nimport argparse\nimport torch\nimport torch.backends.cudnn as cudnn\nimport numpy as np\nfrom data import cfg_mnet, cfg_re50\nfrom layers.functions.prior_box import PriorBox\nfrom utils.nms.py_cpu_nms import py_cpu_nms\nimport cv2\nfrom models.retinaface import RetinaFace\nfrom utils.box_utils import decode, decode_landm\nfrom utils.timer import Timer\n\n\nparser = argparse.ArgumentParser(description='Retinaface')\nparser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',\n type=str, help='Trained state_dict file path to open')\nparser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')\nparser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')\nparser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')\nparser.add_argument('--cpu', action=\"store_true\", default=False, help='Use cpu inference')\nparser.add_argument('--dataset_folder', default='./data/widerface/widerface/val/images/', type=str, help='dataset path')\nparser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')\nparser.add_argument('--top_k', default=5000, type=int, help='top_k')\nparser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')\nparser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')\nparser.add_argument('-s', '--save_image', action=\"store_true\", default=False, help='show detection results')\nparser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')\nargs = parser.parse_args()\n\n\ndef check_keys(model, pretrained_state_dict):\n ckpt_keys = set(pretrained_state_dict.keys())\n model_keys = set(model.state_dict().keys())\n used_pretrained_keys = model_keys & ckpt_keys\n unused_pretrained_keys = ckpt_keys - model_keys\n missing_keys = model_keys - ckpt_keys\n print('Missing keys:{}'.format(len(missing_keys)))\n print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))\n print('Used keys:{}'.format(len(used_pretrained_keys)))\n assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'\n return True\n\n\ndef remove_prefix(state_dict, prefix):\n ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x\n return {f(key): value for key, value in state_dict.items()}\n\n\ndef load_model(model, pretrained_path, load_to_cpu):\n print('Loading pretrained model from {}'.format(pretrained_path))\n if load_to_cpu:\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)\n else:\n device = torch.cuda.current_device()\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))\n if \"state_dict\" in pretrained_dict.keys():\n pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')\n else:\n pretrained_dict = remove_prefix(pretrained_dict, 'module.')\n check_keys(model, pretrained_dict)\n model.load_state_dict(pretrained_dict, strict=False)\n return model\n\n\nif __name__ == '__main__':\n torch.set_grad_enabled(False)\n \n cfg = None\n if args.network == \"mobile0.25\":\n cfg = cfg_mnet\n elif args.network == \"resnet50\":\n cfg = cfg_re50\n # net and model\n net = RetinaFace(cfg=cfg, phase = 'test')\n net = load_model(net, args.trained_model, args.cpu)\n net.eval()\n print('Finished loading model!')\n print(net)\n cudnn.benchmark = True\n device = torch.device(\"cpu\" if args.cpu else \"cuda\")\n net = net.to(device)\n \n # testing dataset\n testset_folder = args.dataset_folder\n print (testset_folder)\n testset_list = args.dataset_folder + \"test_list.txt\"\n test_dataset = []\n #print (testset_list)\n\n with open(testset_list, 'r') as fr:\n content = fr.readlines()\n test_dataset = [line.strip() for line in content]\n num_images = len(test_dataset)\n print (num_images)\n\n _t = {'forward_pass': Timer(), 'misc': Timer()}\n\n # testing begin\n for i, img_name in enumerate(test_dataset):\n image_path = testset_folder + img_name\n print (image_path)\n img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = np.float32(img_raw)\n\n # testing scale\n target_size = 1600\n max_size = 2150\n im_shape = img.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n resize = float(target_size) / float(im_size_min)\n # prevent bigger axis from being more than max_size:\n if np.round(resize * im_size_max) > max_size:\n resize = float(max_size) / float(im_size_max)\n if args.origin_size:\n resize = 1\n\n if resize != 1:\n img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)\n im_height, im_width, _ = img.shape\n scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\n img -= (104, 117, 123)\n img = img.transpose(2, 0, 1)\n img = torch.from_numpy(img).unsqueeze(0)\n img = img.to(device)\n scale = scale.to(device)\n\n _t['forward_pass'].tic()\n loc, conf, landms = net(img) # forward pass\n _t['forward_pass'].toc()\n _t['misc'].tic()\n priorbox = PriorBox(cfg, image_size=(im_height, im_width))\n priors = priorbox.forward()\n priors = priors.to(device)\n prior_data = priors.data\n boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])\n boxes = boxes * scale / resize\n boxes = boxes.cpu().numpy()\n scores = conf.squeeze(0).data.cpu().numpy()[:, 1]\n landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])\n scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],\n img.shape[3], img.shape[2], img.shape[3], img.shape[2],\n img.shape[3], img.shape[2]])\n scale1 = scale1.to(device)\n landms = landms * scale1 / resize\n landms = landms.cpu().numpy()\n\n # ignore low scores\n inds = np.where(scores > args.confidence_threshold)[0]\n boxes = boxes[inds]\n landms = landms[inds]\n scores = scores[inds]\n\n # keep top-K before NMS\n order = scores.argsort()[::-1]\n # order = scores.argsort()[::-1][:args.top_k]\n boxes = boxes[order]\n landms = landms[order]\n scores = scores[order]\n\n # do NMS\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\n keep = py_cpu_nms(dets, args.nms_threshold)\n # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)\n dets = dets[keep, :]\n landms = landms[keep]\n\n # keep top-K faster NMS\n # dets = dets[:args.keep_top_k, :]\n # landms = landms[:args.keep_top_k, :]\n\n dets = np.concatenate((dets, landms), axis=1)\n _t['misc'].toc()\n\n # --------------------------------------------------------------------\n save_name = args.save_folder + img_name[:-4] + \".txt\"\n dirname = os.path.dirname(save_name)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(save_name, \"w\") as fd:\n bboxs = dets\n file_name = os.path.basename(save_name)[:-4] + \"\\n\"\n bboxs_num = str(len(bboxs)) + \"\\n\"\n fd.write(file_name)\n fd.write(bboxs_num)\n for box in bboxs:\n x = int(box[0])\n y = int(box[1])\n w = int(box[2]) - int(box[0])\n h = int(box[3]) - int(box[1])\n confidence = str(box[4])\n line = str(x) + \" \" + str(y) + \" \" + str(w) + \" \" + str(h) + \" \" + confidence + \" \\n\"\n fd.write(line)\n\n print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))\n\n # save image\n if args.save_image:\n for b in dets:\n if b[4] < args.vis_thres:\n continue\n text = \"{:.4f}\".format(b[4])\n b = list(map(int, b))\n cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)\n cx = b[0]\n cy = b[1] + 12\n cv2.putText(img_raw, text, (cx, cy),\n cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))\n\n # landms\n cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)\n cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)\n cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)\n cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)\n cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)\n # save image\n if not os.path.exists(\"./results_handtask/\"):\n os.makedirs(\"./results_handtask/\")\n name = \"./results_handtask/%05d.jpg\" % i\n cv2.imwrite(name, img_raw)\n\n"
] | [
[
"torch.load",
"torch.set_grad_enabled",
"numpy.float32",
"torch.cuda.current_device",
"numpy.where",
"numpy.hstack",
"numpy.max",
"torch.from_numpy",
"numpy.min",
"numpy.round",
"numpy.concatenate",
"torch.device",
"torch.Tensor"
]
] |
bluetyson/archai | [
"50f70ccccf536466cc0370c8a63401e05dec33fd"
] | [
"archai/datasets/providers/svhn_provider.py"
] | [
"# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT license.\r\n\r\nfrom typing import List, Tuple, Union, Optional\r\n\r\nfrom overrides import overrides, EnforceOverrides\r\nfrom torch.utils.data.dataset import Dataset\r\n\r\nimport torchvision\r\nfrom torchvision.transforms import transforms\r\nfrom torch.utils.data import ConcatDataset\r\n\r\nfrom archai.datasets.dataset_provider import DatasetProvider, register_dataset_provider, TrainTestDatasets\r\nfrom archai.common.config import Config\r\nfrom archai.common import utils\r\n\r\n\r\nclass SvhnProvider(DatasetProvider):\r\n def __init__(self, conf_dataset:Config):\r\n super().__init__(conf_dataset)\r\n self._dataroot = utils.full_path(conf_dataset['dataroot'])\r\n\r\n @overrides\r\n def get_datasets(self, load_train:bool, load_test:bool,\r\n transform_train, transform_test)->TrainTestDatasets:\r\n trainset, testset = None, None\r\n\r\n if load_train:\r\n trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',\r\n download=True, transform=transform_train)\r\n extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',\r\n download=True, transform=transform_train)\r\n trainset = ConcatDataset([trainset, extraset])\r\n if load_test:\r\n testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',\r\n download=True, transform=transform_test)\r\n\r\n return trainset, testset\r\n\r\n @overrides\r\n def get_transforms(self)->tuple:\r\n MEAN = [0.4914, 0.4822, 0.4465]\r\n STD = [0.2023, 0.1994, 0.20100]\r\n transf = [\r\n transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip()\r\n ]\r\n\r\n normalize = [\r\n transforms.ToTensor(),\r\n transforms.Normalize(MEAN, STD)\r\n ]\r\n\r\n train_transform = transforms.Compose(transf + normalize)\r\n test_transform = transforms.Compose(normalize)\r\n\r\n return train_transform, test_transform\r\n\r\nregister_dataset_provider('svhn', SvhnProvider)"
] | [
[
"torch.utils.data.ConcatDataset"
]
] |
shinh/dldt | [
"edd86d090592f7779f4dbb2681546e1f4e81284f",
"edd86d090592f7779f4dbb2681546e1f4e81284f"
] | [
"model-optimizer/extensions/middle/Reduce_test.py",
"model-optimizer/mo/front/caffe/extractors/inner_product_test.py"
] | [
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.middle.Reduce import ReduceReplacer\nfrom mo.middle.passes.eliminate_test import build_graph\nfrom mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs\n\n# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the\n# dictionary with node attributes.\nnodes_attributes = {\n # Placeholder layers\n 'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},\n 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n\n # Reshape layers\n 'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},\n 'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Reshape layers\n 'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},\n 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n 'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},\n 'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Pooling\n 'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},\n 'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Power\n 'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},\n 'power_data': {'value': None, 'shape': None, 'kind': 'data'},\n\n # Concat\n 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},\n}\n\n\nclass ReduceReplacerTest(unittest.TestCase):\n def test1(self):\n # Original graph\n # data(1,64,1)-->Reduce(axis=1,keep_dims=True)-->data(1,1,1)\n #\n # Reference graph\n # data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reduce_1': {'axis': np.array([1]), 'keep_dims': True, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reshape_1': {'dim': np.array([1, 1, 64, 1])},\n 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},\n 'pooling': {'window': np.array([1, 1, 64, 1])},\n 'pooling_data': {'shape': np.array([1, 1, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 1, 1])},\n 'reshape_2_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test2(self):\n # Original graph\n # data(1,3,64,64)-->Reduce(axis=2,keep_dims=True)-->data(1,3,1,64)\n #\n # Reference graph\n # data(1,3,64,64)->Reshape->Pool(1,3,1,64)->Reshape(1,3,1,64)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reduce_1': {'axis': np.array([2]), 'keep_dims': True, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([1, 3, 1, 64])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reshape_1': {'dim': np.array([1, 3, 64, 64])},\n 'reshape_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'pooling': {'window': np.array([1, 1, 64, 1])},\n 'pooling_data': {'shape': np.array([1, 3, 1, 64])},\n 'reshape_2': {'dim': np.array([1, 3, 1, 64])},\n 'reshape_2_data': {'shape': np.array([1, 3, 1, 64])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test3(self):\n # Original graph\n # data(1,3,64,64)-->Reduce(axis=[2,3],keep_dims=True)-->data(1,3,1,1)\n #\n # Reference graph\n # data(1,3,64,64)->Reshape->Pool(1,3,1,1)->Reshape(1,3,1,1)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reduce_1': {'axis': np.array([2, 3]), 'keep_dims': True, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([1, 3, 1, 1])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},\n 'reshape_1': {'dim': np.array([1, 3, 64 * 64, 1])},\n 'reshape_1_data': {'shape': np.array([1, 3, 64 * 64, 1])},\n 'pooling': {'window': np.array([1, 1, 64 * 64, 1])},\n 'pooling_data': {'shape': np.array([1, 3, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 3, 1, 1])},\n 'reshape_2_data': {'shape': np.array([1, 3, 1, 1])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test4(self):\n # Original graph\n # data(2,3,64,64)-->Reduce(axis=[1,2,3],keep_dims=False)-->data(2)\n #\n # Reference graph\n # data(2,3,64,64)->Reshape(2,1,3*64*64,1)->Pool(2,1,1,1)->Reshape(2)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},\n 'reduce_1': {'axis': np.array([1, 2, 3]), 'keep_dims': False, 'reduce_type': 'Mean'},\n 'reduce_1_data': {'shape': np.array([2])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},\n 'reshape_1': {'dim': np.array([2, 1, 3 * 64 * 64, 1])},\n 'reshape_1_data': {'shape': np.array([2, 1, 3 * 64 * 64, 1])},\n 'pooling': {'window': np.array([1, 1, 3 * 64 * 64, 1])},\n 'pooling_data': {'shape': np.array([2, 1, 1, 1])},\n 'reshape_2': {'dim': np.array([2])},\n 'reshape_2_data': {'shape': np.array([2])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test5(self):\n # Original graph\n # data(1, 16, 64, 64, 64, 4)-->Reduce(axis=[5],keep_dims=False)-->data(1, 16, 64, 64, 64)\n #\n # Reference graph\n # data(1, 16, 64, 64, 64, 4)->Reshape(1*16*64*64, 64, 4, 1)->Pool(1, 1, 4, 1)->Reshape(1, 16, 64, 64, 64)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},\n 'reduce_1': {'axis': np.array([5]), 'keep_dims': False, 'reduce_type': 'max'},\n 'reduce_1_data': {'shape': np.array([1, 16, 64, 64, 64])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},\n 'reshape_1': {'dim': np.array([65536, 64, 4, 1])},\n 'reshape_1_data': {'shape': np.array([65536, 64, 4, 1])},\n 'pooling': {'window': np.array([1, 1, 4, 1])},\n 'pooling_data': {'shape': np.array([65536, 64, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 16, 64, 64, 64])},\n 'reshape_2_data': {'shape': np.array([1, 16, 64, 64, 64])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n def test6(self):\n # Original graph\n # data(1,64,1)-->Reduce(axis=-2,keep_dims=True, reduce_type=Sum)-->data(1,1,1)\n #\n # Reference graph\n # data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)->Power(scale=64)\n #\n graph = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reduce_1'),\n ('reduce_1', 'reduce_1_data'),\n ('reduce_1_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reduce_1': {'axis': np.array([-2]), 'keep_dims': True, 'reduce_type': 'Sum'},\n 'reduce_1_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1_data', 'reshape_1'),\n ('reshape_1', 'reshape_1_data'),\n ('reshape_1_data', 'pooling'),\n ('pooling', 'pooling_data'),\n ('pooling_data', 'reshape_2'),\n ('reshape_2', 'reshape_2_data'),\n ('reshape_2_data', 'power'),\n ('power', 'power_data'),\n ('power_data', 'concat'),\n ],\n {'placeholder_1_data': {'shape': np.array([1, 64, 1])},\n 'reshape_1': {'dim': np.array([1, 1, 64, 1])},\n 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},\n 'pooling': {'window': np.array([1, 1, 64, 1])},\n 'pooling_data': {'shape': np.array([1, 1, 1, 1])},\n 'reshape_2': {'dim': np.array([1, 1, 1])},\n 'reshape_2_data': {'shape': np.array([1, 1, 1])},\n 'power': {'scale': 64.0},\n 'power_data': {'shape': np.array([1, 1, 1])},\n }, nodes_with_edges_only=True)\n\n pattern = ReduceReplacer()\n pattern.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)\n self.assertTrue(flag, resp)\n",
"\"\"\"\n Copyright (c) 2018-2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom mo.front.caffe.extractors.inner_product import inner_product_ext\nfrom mo.front.common.partial_infer.inner_product import caffe_inner_product\nfrom mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer\n\n\nclass FakeProtoLayer:\n def __init__(self, val):\n self.inner_product_param = val\n\n\nclass TestInnerProduct(unittest.TestCase):\n def test_inner_product_ext(self):\n params = {\n 'num_output': 10,\n 'bias_term': True\n }\n mean_blob = np.array([1., 2.])\n variance_blob = np.array([3., 4.])\n blobs = [mean_blob, variance_blob]\n res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),\n FakeModelLayer(blobs))\n exp_res = {\n 'type': 'FullyConnected',\n 'out-size': 10,\n 'infer': caffe_inner_product,\n 'weights': mean_blob,\n 'biases': variance_blob,\n 'embedded_inputs': [\n (1, 'weights', {\n 'bin': 'weights'\n }),\n (2, 'biases', {\n 'bin': 'biases'\n })\n ]\n }\n for i in exp_res:\n if i in ('weights', 'biases'):\n np.testing.assert_array_equal(res[i], exp_res[i])\n else:\n self.assertEqual(res[i], exp_res[i])\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.testing.assert_array_equal"
]
] |
Yfyangd/Computer_Vision_CS665 | [
"59dca3ce42f43b4aea446497a578f4a0eb93995d"
] | [
"Homography/hw2-2/homography.py"
] | [
"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\ndef get_homograph(u,v):\n A = np.array([[u[0][0], u[0][1], 1, 0, 0, 0, -1 * u[0][0] * v[0][0], -1 * u[0][1] * v[0][0]],\n [0, 0, 0, u[0][0], u[0][1], 1, -1 * u[0][0] * v[0][1], -1 * u[0][1] * v[0][1]],\n [u[1][0], u[1][1], 1, 0, 0, 0, -1 * u[1][0] * v[1][0], -1 * u[1][1] * v[1][0]],\n [0, 0, 0, u[1][0], u[1][1], 1, -1 * u[1][0] * v[1][1], -1 * u[1][1] * v[1][1]],\n [u[2][0], u[2][1], 1, 0, 0, 0, -1 * u[2][0] * v[2][0], -1 * u[2][1] * v[2][0]],\n [0, 0, 0, u[2][0], u[2][1], 1, -1 * u[2][0] * v[2][1], -1 * u[2][1] * v[2][1]],\n [u[3][0], u[3][1], 1, 0, 0, 0, -1 * u[3][0] * v[3][0], -1 * u[3][1] * v[3][0]],\n [0, 0, 0, u[3][0], u[3][1], 1, -1 * u[3][0] * v[3][1], -1 * u[3][1] * v[3][1]]\n ])\n b = np.array([[v[0][0]],\n [v[0][1]],\n [v[1][0]],\n [v[1][1]],\n [v[2][0]],\n [v[2][1]],\n [v[3][0]],\n [v[3][1]]\n ])\n tmp = np.dot(np.linalg.inv(A), b)\n H = np.array([[tmp[0][0], tmp[1][0], tmp[2][0]],\n [tmp[3][0], tmp[4][0], tmp[5][0]],\n [tmp[6][0], tmp[7][0], 1]\n ])\n return H\n\n\ndef interpolation(img, new_x, new_y):\n fx = round(new_x - int(new_x), 2)\n fy = round(new_y - int(new_y), 2)\n p = np.zeros((3,))\n p += (1 - fx) * (1 - fy) * img[int(new_y), int(new_x)]\n p += (1 - fx) * fy * img[int(new_y) + 1, int(new_x)]\n p += fx * (1 - fy) * img[int(new_y), int(new_x) + 1]\n p += fx * fy * img[int(new_y) + 1, int(new_x) + 1]\n return p\n\ndef forward_warping(u,v,input_image,canvas):\n matrix = get_homograph(u,v)\n i0_max = u[0:4,0:1].max()\n i0_min = u[0:4,0:1].min()\n i1_max = u[0:4,1:2].max()\n i1_min = u[0:4,1:2].min()\n i0_range = i0_max-i0_min\n i1_range = i1_max-i1_min\n \n for i in range(i1_range):\n for j in range(i0_range):\n tmp2 = np.dot(matrix, np.array([[j+i0_min, i+i1_min, 1]]).T)\n x, y = int(tmp2[0][0] / tmp2[2][0]), int(tmp2[1][0] / tmp2[2][0])\n canvas[y][x] = input_image[i+i1_min][j+i0_min]\n return canvas\n\ndef backward_warping(u,v,input_image,canvas):\n matrix = get_homograph(u,v) # v: output, u: input\n i0_max = u[0:4,0:1].max()\n i0_min = u[0:4,0:1].min()\n i1_max = u[0:4,1:2].max()\n i1_min = u[0:4,1:2].min()\n i0_range = i0_max-i0_min\n i1_range = i1_max-i1_min\n for j in range(i1_range):\n for i in range(i0_range):\n new_pos = np.dot(matrix, np.array([[i+i0_min, j+i1_min, 1]]).T)\n new_x, new_y = new_pos[0][0] / new_pos[2][0], new_pos[1][0] / new_pos[2][0]\n res = interpolation(input_image, new_x, new_y)\n canvas[j+i1_min][i+i0_min] = res\n return canvas"
] | [
[
"numpy.array",
"numpy.linalg.inv",
"numpy.zeros"
]
] |
The-SocialLion/Speech-Emotion-Recognition-using-MLP-Classifier | [
"5c4101ebbe2b43db28dbb97f94dc3001bdf56ff8"
] | [
"sp.py"
] | [
"import librosa\r\nimport soundfile\r\nimport os, glob, pickle\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ndef extract_feature(file_name, mfcc, chroma, mel):\r\n with soundfile.SoundFile(file_name) as sound_file:\r\n X = sound_file.read(dtype=\"float32\")\r\n sample_rate=sound_file.samplerate\r\n if chroma:\r\n stft=np.abs(librosa.stft(X))\r\n result=np.array([])\r\n if mfcc:\r\n mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)\r\n result=np.hstack((result, mfccs))\r\n if chroma:\r\n chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)\r\n result=np.hstack((result, chroma))\r\n if mel:\r\n mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\r\n result=np.hstack((result, mel))\r\n return result\r\n\r\nemotions={\r\n '01':'neutral',\r\n '02':'calm',\r\n '03':'happy',\r\n '04':'sad',\r\n '05':'angry',\r\n '06':'fearful',\r\n '07':'disgust',\r\n '08':'surprised'\r\n}\r\n#DataFlair - Emotions to observe\r\nobserved_emotions=['calm', 'happy', 'fearful', 'disgust'] \r\n\r\ndef load_data(ts):\r\n tr=abs(1-ts)\r\n x,y=[],[]\r\n for file in glob.glob(\"D:\\\\python\\\\dl programs\\\\SP\\\\DATA\\\\Actor_*\\\\*.wav\"):\r\n file_name=os.path.basename(file)\r\n emotion=emotions[file_name.split(\"-\")[2]]\r\n print(emotion)\r\n if emotion not in observed_emotions:\r\n continue\r\n feature=extract_feature(file, mfcc=True, chroma=True, mel=True)\r\n x.append(feature)\r\n y.append(emotion)\r\n return train_test_split(np.array(x), y, test_size=ts, train_size=tr ,random_state=9)\r\nts=0.25\r\nload_data(ts)\r\nx_train,x_test,y_train,y_test=load_data(ts)\r\nprint((x_train.shape[0], x_test.shape[0]))\r\nprint(f'Features extracted: {x_train.shape[1]}')\r\n#DataFlair - Initialize the Multi Layer Perceptron Classifier\r\nmodel=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)\r\nmodel.fit(x_train,y_train)\r\ny_pred=model.predict(x_test)\r\naccuracy=accuracy_score(y_true=y_test, y_pred=y_pred)\r\n#DataFlair - Print the accuracy\r\nprint(\"Accuracy: {:.2f}%\".format(accuracy*100))\r\n"
] | [
[
"sklearn.metrics.accuracy_score",
"sklearn.neural_network.MLPClassifier",
"numpy.hstack",
"numpy.array"
]
] |
brohrer/nn_methods | [
"acf3d1369e240971e5ee05696610c59c4c993a30"
] | [
"cottonwood/core/layers/dense.py"
] | [
"import numpy as np\nfrom cottonwood.core.activation import Tanh\nfrom cottonwood.core.initializers import LSUV\nfrom cottonwood.core.layers.generic_layer import GenericLayer\nfrom cottonwood.core.optimizers import SGD\nimport cottonwood.core.toolbox as tb\n\n\nclass Dense(GenericLayer):\n def __init__(\n self,\n n_outputs,\n m_inputs=None,\n activation_function=None,\n dropout_rate=0,\n initializer=None,\n previous_layer=None,\n optimizer=None,\n ):\n self.previous_layer = previous_layer\n if m_inputs is not None:\n self.m_inputs = m_inputs\n else:\n self.m_inputs = self.previous_layer.y.size\n self.n_outputs = int(n_outputs)\n self.activation_function = activation_function\n self.dropout_rate = dropout_rate\n\n if activation_function is None:\n self.activation_function = Tanh()\n else:\n self.activation_function = activation_function\n\n if initializer is None:\n self.initializer = LSUV()\n else:\n self.initializer = initializer\n\n if optimizer is None:\n self.optimizer = SGD()\n else:\n self.optimizer = optimizer\n\n # Choose random weights.\n # Inputs match to rows. Outputs match to columns.\n # Add one to m_inputs to account for the bias term.\n self.weights = self.initializer.initialize(\n self.m_inputs + 1, self.n_outputs)\n\n self.reset()\n self.regularizers = []\n\n def __str__(self):\n \"\"\"\n Make a descriptive, human-readable string for this layer.\n \"\"\"\n str_parts = [\n \"fully connected\",\n f\"number of inputs: {self.m_inputs}\",\n f\"number of outputs: {self.n_outputs}\",\n \"activation function:\" + tb.indent(\n self.activation_function.__str__()),\n \"initialization:\" + tb.indent(self.initializer.__str__()),\n \"optimizer:\" + tb.indent(self.optimizer.__str__()),\n ]\n for regularizer in self.regularizers:\n str_parts.append(\n \"regularizer:\" + tb.indent(regularizer.__str__()))\n return \"\\n\".join(str_parts)\n\n def add_regularizer(self, new_regularizer):\n self.regularizers.append(new_regularizer)\n\n def reset(self):\n self.x = np.zeros((1, self.m_inputs))\n self.y = np.zeros((1, self.n_outputs))\n self.de_dx = np.zeros((1, self.m_inputs))\n self.de_dy = np.zeros((1, self.n_outputs))\n\n def forward_pass(self, evaluating=False, **kwargs):\n \"\"\"\n Propagate the inputs forward through the network.\n\n evaluating: boolean\n Is this part of a training run or an evaluation run?\n \"\"\"\n if self.previous_layer is not None:\n self.x += self.previous_layer.y\n # Apply dropout only during training runs.\n if evaluating:\n dropout_rate = 0\n else:\n dropout_rate = self.dropout_rate\n\n if dropout_rate > 0:\n self.i_dropout = np.zeros(self.x.size, dtype=bool)\n self.i_dropout[np.where(\n np.random.uniform(size=self.x.size) < dropout_rate)] = True\n self.x[:, self.i_dropout] = 0\n self.x[:, np.logical_not(self.i_dropout)] *= 1 / (1 - dropout_rate)\n else:\n self.i_dropout = None\n\n bias = np.ones((1, 1))\n x_w_bias = np.concatenate((self.x, bias), axis=1)\n v = x_w_bias @ self.weights\n self.y = self.activation_function.calc(v)\n\n def backward_pass(self):\n \"\"\"\n Propagate the outputs back through the layer.\n \"\"\"\n bias = np.ones((1, 1))\n x_w_bias = np.concatenate((self.x, bias), axis=1)\n\n dy_dv = self.activation_function.calc_d(self.y)\n # v = self.x @ self.weights\n dv_dw = x_w_bias.transpose()\n dv_dx = self.weights.transpose()\n\n dy_dw = dv_dw @ dy_dv\n self.de_dw = self.de_dy * dy_dw\n\n for regularizer in self.regularizers:\n regularizer.pre_optim_update(self)\n\n self.optimizer.update(self)\n\n for regularizer in self.regularizers:\n regularizer.post_optim_update(self)\n\n self.de_dx = (self.de_dy * dy_dv) @ dv_dx\n\n # Remove the dropped-out inputs from this run.\n de_dx_no_bias = self.de_dx[:, :-1]\n\n if self.i_dropout is not None:\n de_dx_no_bias[:, self.i_dropout] = 0\n\n # Remove the bias node from the gradient vector.\n self.previous_layer.de_dy += de_dx_no_bias\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"numpy.zeros",
"numpy.logical_not",
"numpy.concatenate"
]
] |
TinghuiWang/pyActLearn | [
"d858136e86324fac51b0943765ef60bd405e31d1"
] | [
"pyActLearn/sensors/sensor2vec.py"
] | [
"import math\nimport numpy as np\nimport tensorflow as tf\nfrom ..learning.nn.injectors import SkipGramInjector\n\n\ndef sensor2vec(num_sensors, sensor_event_list, embedding_size=20,\n batch_size=128, num_skips=8, skip_window=5,\n num_neg_samples=64, learning_rate=1.0):\n \"\"\"Sensor to Vector\n \"\"\"\n if num_neg_samples > num_sensors:\n num_neg_samples = num_sensors\n # Initialize a SkipGram Injector\n injector = SkipGramInjector(sensor_event_list, batch_size, num_skips, skip_window)\n # Build Training Model\n graph = tf.Graph()\n with graph.as_default():\n # Input Place Holder\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n # As we normally do not have too many sensors - it is OK to use all of them\n valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)\n # Only CPU supports NCE loss\n with tf.device('/cpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([num_sensors, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([num_sensors]))\n\n # Compute the average NCE loss for the batch.\n # tf.nce_loss automatically draws a new sample of the negative labels each\n # time we evaluate the loss.\n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=embed,\n num_sampled=num_neg_samples,\n num_classes=num_sensors))\n\n # Construct the Optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n\n # Add variable initializer.\n init = tf.initialize_all_variables()\n\n # Begin training.\n num_steps = 100001\n\n with tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n print(\"Initialized\")\n\n average_loss = 0\n for step in range(num_steps):\n batch_inputs, batch_labels = injector.next_batch()\n feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n\n final_embeddings = normalized_embeddings.eval()\n final_similarity = 1 - similarity.eval()\n distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:, None]\n return final_embeddings, distance_matrix\n\n\n\ndef sensor2vec_data(sensor_list, event_list, embedding_size=20,\n batch_size=128, num_skips=8, skip_window=5,\n num_neg_samples=64, learning_rate=1.0, ignore_off=True):\n \"\"\"Transform sensor to high dimensional space\n\n Similar to word embedding used in natural language processing system, we want\n to represent sensors using in a synthesized vector space as well, instead of\n using an arbitrary labels for each sensors without any useful information.\n\n The methods used to find word embeddings can be classified into two categories:\n count-based methods (Latent Semantic Analysis) and predictive models.\n In this implementation for mapping sensor into high dimension vector space, we\n use skip-gram negative sampling models.\n\n Args:\n sensor_list (:obj:`list` of :obj:`dict`): List of dictionary containing\n sensor information.\n event_list (:obj:`list` of :obj:`dict`): List of events.\n embedding_size (:obj:`int`): The size of embedding vector.\n batch_size (:obj:`int`): The number of batch used in training\n num_skips (:obj:`int`): How many times to re-use an input to generate a label\n in skip-gram model.\n skip_window (:obj:`int`): How many items to consider left or right in skip-gram\n model.\n num_neg_samples (:obj:`int`): Number of negative samples to draw from the vocabulary.\n ignore_off (:obj:`bool`): Ignore motion-sensor with ``Off`` state in event.rst list.\n\n Please refer to :func:`sensor_distance` for an example of ``sensor_list``.\n Please refer to :func:`sensor_mi_distance` for an example of ``event_list``.\n \"\"\"\n # Put sensor in hash table for fast fetch of index\n num_sensors = len(sensor_list)\n # Negative samples cannot exceed sensor numbers\n if num_neg_samples > num_sensors:\n num_neg_samples = num_sensors\n # Store sensor ID in hash table for faster access\n sensor_dict = {}\n for i in range(num_sensors):\n sensor_dict[sensor_list[i]['name']] = i\n # Generate event.rst sensor list\n event_sensor_list = []\n for event_entry in event_list:\n if ignore_off and event_entry['sensor_status'].upper() == \"OFF\":\n continue\n event_sensor_list.append(sensor_dict[event_entry['sensor_id']])\n # Initialize a SkipGram Injector\n injector = SkipGramInjector(event_sensor_list, batch_size, num_skips, skip_window)\n # Build Training Model\n graph = tf.Graph()\n with graph.as_default():\n # Input Place Holder\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n # As we normally do not have too many sensors - it is OK to use all of them\n valid_dataset = tf.constant([i for i in range(num_sensors)], dtype=tf.int32)\n # Only CPU supports NCE loss\n with tf.device('/cpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([num_sensors, embedding_size], -1.0, 1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([num_sensors, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([num_sensors]))\n\n # Compute the average NCE loss for the batch.\n # tf.nce_loss automatically draws a new sample of the negative labels each\n # time we evaluate the loss.\n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=embed,\n num_sampled=num_neg_samples,\n num_classes=num_sensors))\n\n # Construct the Optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n\n # Add variable initializer.\n init = tf.initialize_all_variables()\n\n # Begin training.\n num_steps = 100001\n\n with tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n print(\"Initialized\")\n\n average_loss = 0\n for step in range(num_steps):\n batch_inputs, batch_labels = injector.next_batch()\n feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print(\"Average loss at step \", step, \": \", average_loss)\n average_loss = 0\n\n # Note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n sim = similarity.eval()\n for i in range(num_sensors):\n valid_sensor = sensor_list[i]['name']\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n log_str = \"Nearest to %s:\" % valid_sensor\n for k in range(top_k):\n close_sensor = sensor_list[nearest[k]]['name']\n log_str = \"%s %s,\" % (log_str, close_sensor)\n print(log_str)\n final_embeddings = normalized_embeddings.eval()\n final_similarity = 1 - similarity.eval()\n distance_matrix = final_similarity / np.max(final_similarity, axis=1)[:,None]\n\n # try:\n # from sklearn.manifold import TSNE\n # import matplotlib.pyplot as plt\n #\n # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n # low_dim_embs = tsne.fit_transform(final_embeddings)\n # labels = [sensor_list[i]['name'] for i in range(num_sensors)]\n #\n # assert low_dim_embs.shape[0] >= len(labels), \"More labels than embeddings\"\n # plt.figure(figsize=(18, 18)) # in inches\n # for i, label in enumerate(labels):\n # x, y = low_dim_embs[i, :]\n # plt.scatter(x, y)\n # plt.annotate(label,\n # xy=(x, y),\n # xytext=(5, 2),\n # textcoords='offset points',\n # ha='right',\n # va='bottom')\n # plt.show()\n # except ImportError:\n # print(\"Please install sklearn, matplotlib, and scipy to visualize embeddings.\")\n\n return final_embeddings, distance_matrix\n"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.nn.nce_loss",
"tensorflow.device",
"tensorflow.matmul",
"tensorflow.Graph",
"tensorflow.random_uniform",
"numpy.max",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.nn.embedding_lookup"
]
] |
Yelloooowww/Deep-Reinforcement-Learning-Hands-On | [
"d1a3a1272d7ceff8796fe412deb4e4d5bd6665a5"
] | [
"Chapter03/03_atari_gan.py"
] | [
"#!/usr/bin/env python\nimport random\nimport argparse\nimport cv2\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nimport torchvision.utils as vutils\n\nimport gym\nimport gym.spaces\n\nimport numpy as np\n\nlog = gym.logger\nlog.set_level(gym.logger.INFO)\n\nLATENT_VECTOR_SIZE = 100\nDISCR_FILTERS = 64\nGENER_FILTERS = 64\nBATCH_SIZE = 16\n\n# dimension input image will be rescaled\nIMAGE_SIZE = 64\n\nLEARNING_RATE = 0.0001\nREPORT_EVERY_ITER = 25\nSAVE_IMAGE_EVERY_ITER = 1000\n\n\nclass InputWrapper(gym.ObservationWrapper):\n \"\"\"\n Preprocessing of input numpy array:\n 1. resize image into predefined size\n 2. move color channel axis to a first place\n \"\"\"\n def __init__(self, *args):\n super(InputWrapper, self).__init__(*args)\n assert isinstance(self.observation_space, gym.spaces.Box)\n old_space = self.observation_space\n self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),\n dtype=np.float32)\n\n def observation(self, observation):\n # resize image\n new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))\n # transform (210, 160, 3) -> (3, 210, 160)\n new_obs = np.moveaxis(new_obs, 2, 0)\n return new_obs.astype(np.float32)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_shape):\n super(Discriminator, self).__init__()\n # this pipe converges image into the single number\n self.conv_pipe = nn.Sequential(\n nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,\n kernel_size=4, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(DISCR_FILTERS*2),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(DISCR_FILTERS * 4),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(DISCR_FILTERS * 8),\n nn.ReLU(),\n nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,\n kernel_size=4, stride=1, padding=0),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n conv_out = self.conv_pipe(x)\n return conv_out.view(-1, 1).squeeze(dim=1)\n\n\nclass Generator(nn.Module):\n def __init__(self, output_shape):\n super(Generator, self).__init__()\n # pipe deconvolves input vector into (3, 64, 64) image\n self.pipe = nn.Sequential(\n nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,\n kernel_size=4, stride=1, padding=0),\n nn.BatchNorm2d(GENER_FILTERS * 8),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(GENER_FILTERS * 4),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(GENER_FILTERS * 2),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,\n kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(GENER_FILTERS),\n nn.ReLU(),\n nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],\n kernel_size=4, stride=2, padding=1),\n nn.Tanh()\n )\n\n def forward(self, x):\n return self.pipe(x)\n\n\ndef iterate_batches(envs, batch_size=BATCH_SIZE):\n batch = [e.reset() for e in envs]\n env_gen = iter(lambda: random.choice(envs), None)\n\n while True:\n e = next(env_gen)\n obs, reward, is_done, _ = e.step(e.action_space.sample())\n if np.mean(obs) > 0.01:\n batch.append(obs)\n if len(batch) == batch_size:\n # Normalising input between -1 to 1\n batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0\n yield torch.tensor(batch_np)\n batch.clear()\n if is_done:\n e.reset()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--cuda\", default=False, action='store_true', help=\"Enable cuda computation\")\n parser.add_argument(\"--cuda\", default=True, action='store_true', help=\"Enable cuda computation\")\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]\n input_shape = envs[0].observation_space.shape\n\n net_discr = Discriminator(input_shape=input_shape).to(device)\n net_gener = Generator(output_shape=input_shape).to(device)\n\n objective = nn.BCELoss()\n gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\n dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\n writer = SummaryWriter()\n\n gen_losses = []\n dis_losses = []\n iter_no = 0\n\n true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)\n fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)\n\n for batch_v in iterate_batches(envs):\n # generate extra fake samples, input is 4D: batch, filters, x, y\n gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)\n batch_v = batch_v.to(device)\n gen_output_v = net_gener(gen_input_v)\n\n # train discriminator\n dis_optimizer.zero_grad()\n dis_output_true_v = net_discr(batch_v)\n dis_output_fake_v = net_discr(gen_output_v.detach())\n dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)\n dis_loss.backward()\n dis_optimizer.step()\n dis_losses.append(dis_loss.item())\n\n # train generator\n gen_optimizer.zero_grad()\n dis_output_v = net_discr(gen_output_v)\n gen_loss_v = objective(dis_output_v, true_labels_v)\n gen_loss_v.backward()\n gen_optimizer.step()\n gen_losses.append(gen_loss_v.item())\n\n iter_no += 1\n if iter_no % REPORT_EVERY_ITER == 0:\n log.info(\"Iter %d: gen_loss=%.3e, dis_loss=%.3e\", iter_no, np.mean(gen_losses), np.mean(dis_losses))\n writer.add_scalar(\"gen_loss\", np.mean(gen_losses), iter_no)\n writer.add_scalar(\"dis_loss\", np.mean(dis_losses), iter_no)\n gen_losses = []\n dis_losses = []\n if iter_no % SAVE_IMAGE_EVERY_ITER == 0:\n writer.add_image(\"fake\", vutils.make_grid(gen_output_v.data[:64], normalize=True), iter_no)\n writer.add_image(\"real\", vutils.make_grid(batch_v.data[:64], normalize=True), iter_no)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.FloatTensor",
"numpy.array",
"numpy.moveaxis",
"torch.tensor",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.zeros",
"torch.nn.BCELoss",
"torch.nn.Sigmoid",
"torch.device",
"numpy.mean",
"torch.nn.ConvTranspose2d"
]
] |
csjtx1021/CAGG | [
"67fde2f1488ee6e2ff137e87860b5243c5b5fe7c"
] | [
"CAGG-NAS/tools/nn/nn_visualise.py"
] | [
"\"\"\"\n Harness for visualising a neural network.\n -- kandasamy@cs.cmu.edu\n\"\"\"\n\n# pylint: disable=invalid-name\n\nimport functools\nimport graphviz as gv\nimport os\nimport networkx as nx\nimport numpy as np\n\n# Parameters for plotting\n_SAVE_FORMAT = 'eps'\n# _SAVE_FORMAT = 'png'\n_LAYER_SHAPE = 'rectangle'\n_IPOP_SHAPE = 'circle'\n_LAYER_FONT = 'DejaVuSans'\n_IPOP_FONT = 'Helvetica'\n_LAYER_FONTSIZE = '16'\n_FILLCOLOR = 'transparent'\n_IPOP_FONTSIZE = '12'\n_IPOP_FILLCOLOR = '#ffc0cb'\n_DECISION_FILLCOLOR = '#98fb98'\n_GRAPH_STYLES = {\n 'graph': {\n 'fontsize': _LAYER_FONTSIZE,\n 'rankdir': 'TB',\n 'label': None,\n },\n 'nodes': {\n },\n 'edges': {\n 'arrowhead': 'open',\n 'fontsize': '12',\n }\n}\n\nGV_GRAPH = functools.partial(gv.Graph, format=_SAVE_FORMAT)\nGV_DIGRAPH = functools.partial(gv.Digraph, format=_SAVE_FORMAT)\n\n# Utilities for adding nodes, edges and styles -------------------------------------------\ndef add_nodes(graph, nodes):\n \"\"\" Adds nodes to the graph. \"\"\"\n for n in nodes:\n if isinstance(n, tuple):\n graph.node(n[0], **n[1])\n else:\n graph.node(n)\n return graph\n\ndef add_edges(graph, edges):\n \"\"\" Adds edges to the graph. \"\"\"\n # pylint: disable=star-args\n for e in edges:\n if isinstance(e[0], tuple):\n graph.edge(*e[0], **e[1])\n else:\n graph.edge(*e)\n return graph\n\ndef apply_styles(graph, styles):\n \"\"\" Applies styles to the graph. \"\"\"\n graph.graph_attr.update(\n ('graph' in styles and styles['graph']) or {}\n )\n graph.node_attr.update(\n ('nodes' in styles and styles['nodes']) or {}\n )\n graph.edge_attr.update(\n ('edges' in styles and styles['edges']) or {}\n )\n return graph\n\n# Wrappers for tedious routines ----------------------------------------------------------\ndef _get_ip_layer(layer_idx):\n \"\"\" Returns a tuple representing the input layer. \"\"\"\n return (str(layer_idx), {'label': 'i/p', 'shape': 'circle', 'style': 'filled',\n 'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,\n 'fontname': _IPOP_FONT})\n\ndef _get_op_layer(layer_idx):\n \"\"\" Returns a tuple representing the output layer. \"\"\"\n return (str(layer_idx), {'label': 'o/p', 'shape': 'circle', 'style': 'filled',\n 'fillcolor': _IPOP_FILLCOLOR, 'fontsize': _IPOP_FONTSIZE,\n 'fontname': _IPOP_FONT})\n\ndef _get_layer(layer_idx, nn, for_pres):\n \"\"\" Returns a tuple representing the layer label. \"\"\"\n if nn.layer_labels[layer_idx] in ['ip', 'op']:\n fill_colour = _IPOP_FILLCOLOR\n elif nn.layer_labels[layer_idx] in ['softmax', 'linear']:\n fill_colour = _DECISION_FILLCOLOR\n else:\n fill_colour = _FILLCOLOR\n label = nn.get_layer_descr(layer_idx, for_pres)\n return (str(layer_idx), {'label': label, 'shape': 'rectangle', 'fillcolor': fill_colour,\n 'style': 'filled', 'fontname': _LAYER_FONT}),((layer_idx), nn.layer_labels[layer_idx],(nn.num_units_in_each_layer[layer_idx]))\n\n\n\n\n\ndef _get_edge(layer_idx_start, layer_idx_end):\n \"\"\" Returns a tuple which is an edge. \"\"\"\n return (str(layer_idx_start), str(layer_idx_end))\n\ndef _get_edges(conn_mat):\n \"\"\" Returns all edges. \"\"\"\n starts, ends = conn_mat.nonzero()\n return [_get_edge(starts[i], ends[i]) for i in range(len(starts))]\n\n# Main API ------------------------------------------------------------------------------\ndef visualise_nn(nn, save_file_prefix, fig_label=None, for_pres=True):\n \"\"\" The main API which will be used to visualise the network. \"\"\"\n # First create nodes in the order\n nodes = [_get_layer(i, nn, for_pres)[0] for i in range(nn.num_layers)]\n nodes_my = [_get_layer(i, nn, for_pres)[1] for i in range(nn.num_layers)]\n #print(\"nodes_my=\",nodes_my)\n edges = _get_edges(nn.conn_mat)\n edges_my = [(int(s),int(t)) for s,t in edges]\n #print(\"edges_my=\",edges_my)\n nn_graph = GV_DIGRAPH()\n add_nodes(nn_graph, nodes)\n add_edges(nn_graph, edges)\n graph_styles = _GRAPH_STYLES\n graph_styles['graph']['label'] = fig_label\n apply_styles(nn_graph, graph_styles)\n nn_graph.render(save_file_prefix)\n \n if os.path.exists(save_file_prefix):\n # graphviz also creates another file in the name of the prefix. delete it.\n os.remove(save_file_prefix)\n\n return tonxgraph(nodes_my,edges_my)\n\nNODE_TYPES = ['ip', 'op', 'linear']\nhidden_list = [8,16,32,64,128,256,512,1024]\nfor i in hidden_list:\n NODE_TYPES.append(\"relu-%s\"%i)\n NODE_TYPES.append(\"crelu-%s\"%i)\n NODE_TYPES.append(\"leaky-relu-%s\"%i)\n NODE_TYPES.append(\"softplus-%s\"%i)\n NODE_TYPES.append(\"elu-%s\"%i)\n NODE_TYPES.append(\"logistic-%s\"%i)\n NODE_TYPES.append(\"tanh-%s\"%i)\n\n\ndef tonxgraph(nodes_my,edges_my):\n g = {\"x\":[],\"edge_index\":[],\"edge_attr\":[]}\n \n for n_idx, type, num_hidden in nodes_my:\n n_idx = int(n_idx)\n if type=='ip' or type=='op' or type=='linear':\n g[\"x\"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(type)])\n else:\n num_hidden = np.random.choice(hidden_list)\n g[\"x\"].append(np.eye(len(NODE_TYPES))[NODE_TYPES.index(\"%s-%s\"%(type,num_hidden))])\n row = []\n col = []\n for s, t in edges_my:\n row.append(s)\n col.append(t)\n g[\"edge_attr\"].append(np.ones(1))\n g[\"edge_index\"].append(row)\n g[\"edge_index\"].append(col)\n\n g[\"x\"]=np.array(g[\"x\"])\n g[\"edge_attr\"]=np.array(g[\"edge_attr\"])\n\n print(\"+\",g[\"x\"].shape)\n assert g[\"x\"].shape[0] <= 20\n \n return g\n\n\n\n\n #g_nx = nx.nx_agraph.from_agraph(nn_graph)\n #A = nx.nx_agraph.to_agraph(g_nx) # convert to a graphviz graph\n #A.layout() # neato layout\n #A.draw(\"a.ps\")\n\ndef visualise_list_of_nns(list_of_nns, save_dir, fig_labels=None, fig_file_names=None,\n for_pres=False):\n \"\"\" Visualises a list of neural networks. \"\"\"\n g_list = []\n if fig_labels is None:\n fig_labels = [None] * len(list_of_nns)\n if fig_file_names is None:\n fig_file_names = [str(idx) for idx in range(len(list_of_nns))]\n for idx, nn in enumerate(list_of_nns):\n save_file_prefix = os.path.join(save_dir, fig_file_names[idx])\n g = visualise_nn(nn, save_file_prefix, fig_labels[idx], for_pres)\n g_list.append(g)\n return g_list\n\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.random.choice"
]
] |
songzy12/MatchZoo | [
"a43dc3b1d43b3f2a1b43b11d3fc4009616507e23"
] | [
"matchzoo/layers/matching_layer.py"
] | [
"\"\"\"An implementation of Matching Layer.\"\"\"\nimport typing\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n\nclass MatchingLayer(layers.Layer):\n \"\"\"\n Layer that computes a matching matrix between samples in two tensors.\n\n :param normalize: Whether to L2-normalize samples along the\n dot product axis before taking the dot product.\n If set to True, then the output of the dot product\n is the cosine proximity between the two samples.\n :param matching_type: the similarity function for matching\n :param kwargs: Standard layer keyword arguments.\n\n Examples:\n >>> import matchzoo as mz\n >>> layer = mz.layers.MatchingLayer(matching_type='dot',\n ... normalize=True)\n >>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10\n >>> layer.build([[num_batch, left_len, num_dim],\n ... [num_batch, right_len, num_dim]])\n\n \"\"\"\n\n def __init__(self, normalize: bool = False,\n matching_type: str = 'dot', **kwargs):\n \"\"\":class:`MatchingLayer` constructor.\"\"\"\n super().__init__(**kwargs)\n self._normalize = normalize\n self._validate_matching_type(matching_type)\n self._matching_type = matching_type\n self._shape1 = None\n self._shape2 = None\n\n @classmethod\n def _validate_matching_type(cls, matching_type: str = 'dot'):\n valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']\n if matching_type not in valid_matching_type:\n raise ValueError(f\"{matching_type} is not a valid matching type, \"\n f\"{valid_matching_type} expected.\")\n\n def build(self, input_shape: list):\n \"\"\"\n Build the layer.\n\n :param input_shape: the shapes of the input tensors,\n for MatchingLayer we need tow input tensors.\n \"\"\"\n # Used purely for shape validation.\n if not isinstance(input_shape, list) or len(input_shape) != 2:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on a list of 2 inputs.')\n self._shape1 = input_shape[0]\n self._shape2 = input_shape[1]\n for idx in 0, 2:\n if self._shape1[idx] != self._shape2[idx]:\n raise ValueError(\n 'Incompatible dimensions: '\n f'{self._shape1[idx]} != {self._shape2[idx]}.'\n f'Layer shapes: {self._shape1}, {self._shape2}.'\n )\n\n def call(self, inputs: list, **kwargs) -> typing.Any:\n \"\"\"\n The computation logic of MatchingLayer.\n\n :param inputs: two input tensors.\n \"\"\"\n x1 = inputs[0]\n x2 = inputs[1]\n if self._matching_type == 'dot':\n if self._normalize:\n x1 = tf.math.l2_normalize(x1, axis=2)\n x2 = tf.math.l2_normalize(x2, axis=2)\n return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)\n else:\n if self._matching_type == 'mul':\n def func(x, y):\n return x * y\n elif self._matching_type == 'plus':\n def func(x, y):\n return x + y\n elif self._matching_type == 'minus':\n def func(x, y):\n return x - y\n elif self._matching_type == 'concat':\n def func(x, y):\n return tf.concat([x, y], axis=3)\n else:\n raise ValueError(f\"Invalid matching type.\"\n f\"{self._matching_type} received.\"\n f\"Mut be in `dot`, `mul`, `plus`, \"\n f\"`minus` and `concat`.\")\n x1_exp = tf.stack([x1] * self._shape2[1], 2)\n x2_exp = tf.stack([x2] * self._shape1[1], 1)\n return func(x1_exp, x2_exp)\n\n def compute_output_shape(self, input_shape: list) -> tuple:\n \"\"\"\n Calculate the layer output shape.\n\n :param input_shape: the shapes of the input tensors,\n for MatchingLayer we need tow input tensors.\n \"\"\"\n if not isinstance(input_shape, list) or len(input_shape) != 2:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on a list of 2 inputs.')\n shape1 = list(input_shape[0])\n shape2 = list(input_shape[1])\n if len(shape1) != 3 or len(shape2) != 3:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on 2 inputs with 3 dimensions.')\n if shape1[0] != shape2[0] or shape1[2] != shape2[2]:\n raise ValueError('A `MatchingLayer` layer should be called '\n 'on 2 inputs with same 0,2 dimensions.')\n\n if self._matching_type in ['mul', 'plus', 'minus']:\n return shape1[0], shape1[1], shape2[1], shape1[2]\n elif self._matching_type == 'dot':\n return shape1[0], shape1[1], shape2[1], 1\n elif self._matching_type == 'concat':\n return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]\n else:\n raise ValueError(f\"Invalid `matching_type`.\"\n f\"{self._matching_type} received.\"\n f\"Must be in `mul`, `plus`, `minus` \"\n f\"`dot` and `concat`.\")\n\n def get_config(self) -> dict:\n \"\"\"Get the config dict of MatchingLayer.\"\"\"\n config = {\n 'normalize': self._normalize,\n 'matching_type': self._matching_type,\n }\n base_config = super(MatchingLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"tensorflow.math.l2_normalize",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.einsum"
]
] |
738844605/DualResidualNetworks | [
"6d025e074d4c914fae86f51cd8b93569a2c05335",
"6d025e074d4c914fae86f51cd8b93569a2c05335"
] | [
"test/noise.py",
"train/haze.py"
] | [
"# python 2.7, pytorch 0.3.1\n\nimport os, sys\nsys.path.insert(1, '../')\nimport torch\nimport cv2\nimport shutil\nimport torchvision\nimport numpy as np\nimport itertools\nimport subprocess\nimport random\n\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\n\nfrom pietorch import data_convertors\nfrom pietorch.DuRN_P import cleaner as cleaner\nfrom pietorch.DuRN_P_no_norm import cleaner as cleaner_no_norm\nfrom pietorch.pytorch_ssim import ssim as ssim\nfrom skimage.measure import compare_psnr as psnr\nfrom skimage.measure import compare_ssim as ski_ssim\n\n#------ Options -------\ntag = 'DuRN_P_no_norm' # 'DuRN_P' or 'DuRN_P_no_norm' for gaussion or real-world noise removal\ndata_name = 'RealNoiseHKPoly' # 'BSD_gray' or 'RealNoiseHKPoly'\n\n# Gaussian noise level. Comment it if you set data_name = 'RealNoiseHKPoly'.\n#noise_level = 70 # choose one from [30, 50, 70]\n#----------------------\n\nif data_name == 'BSD_gray': \n testroot = \"../data/\"+data_name+\"/test/\"\n test_list_pth = '../lists/'+data_name+'/testlist.txt'\nelse:\n testroot = \"../data/\"+data_name+\"/test1/\"\n test_list_pth = '../lists/'+data_name+'/test1_list.txt'\n\nPretrained = '../trainedmodels/'+data_name+'/'+tag+'_model.pt' \nshow_dst = '../cleaned_images/'+data_name+'/'+tag+'/'\nsubprocess.check_output(['mkdir', '-p', show_dst])\n\n# Make the transformer and the network\nif data_name == 'BSD_gray':\n transform = [transforms.ToTensor(), noise_level] \n cleaner = cleaner().cuda()\nelse:\n transform = transforms.ToTensor() \n cleaner = cleaner_no_norm().cuda()\n \ncleaner.load_state_dict(torch.load(Pretrained))\ncleaner.eval()\n\n# Make the dataloader\nconvertor = data_convertors.ConvertImageSet(testroot, test_list_pth, data_name,\n transform=transform)\ndataloader = DataLoader(convertor, batch_size=1, shuffle=False, num_workers=1)\n\nave_psnr = 0 \nave_ssim = 0 \nct_num = 0\nfor i, data in enumerate(dataloader):\n ct_num+= 1.0\n im_input, label, im_name = data \n im_input = Variable(im_input, requires_grad=False).cuda()\n res = cleaner(im_input)\n res = res.data.cpu().numpy()\n res[res>1] = 1\n res[res<0] = 0\n res*= 255\n if data_name == 'BSD_gray':\n res = res.astype(np.uint8)[0,0]\n label = label.numpy()[0,0]\n label*= 255\n label = label.astype(np.uint8) \n cv2.imwrite(show_dst+im_name[0].split('.')[0]+'_'+str(noise_level)+'.png', res)\n ave_psnr+= psnr(res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=False)\n \n elif data_name == 'RealNoiseHKPoly':\n res = res.astype(np.uint8)[0]\n res = res.transpose((1,2,0))\n label = label.numpy()[0].transpose((1,2,0))\n label*= 255\n label = label.astype(np.uint8) \n Image.fromarray(res).save(show_dst+im_name[0].split('real')[0]+'.png')\n ave_psnr+= psnr(res, label, data_range=255)\n ave_ssim+= ski_ssim(res, label, data_range=255, multichannel=True)\n \n else:\n print('Unknown dataset name.')\n \nprint('psnr: '+str(ave_psnr/ct_num))\nprint('ssim: '+str(ave_ssim/ct_num))\nprint('Test done.')\n",
"import os, sys\nsys.path.insert(1, '../')\nimport torch\nimport cv2\nimport shutil\nimport torchvision\nimport numpy as np\nimport itertools\nimport subprocess\nimport random\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nfrom pietorch import data_convertors\nfrom pietorch.DuRN_US import cleaner as cleaner\nfrom pietorch.pytorch_ssim import ssim as ssim\n\n#------ Options -------\ntag = 'DuRN_US'\ndata_name = 'RESIDE'\nbch_size = 40\nbase_lr = 0.0001\nepoch_size = 3000\ngpus = 1\ncrop_size = 256\n\nssim_weight = 1.1\nl1_loss_weight = 0.75\nwith_data_aug = False\n#----------------------\n\n# Set pathes\ndata_root = '../data/' +data_name+'/indoor_train/'\nimlist_pth = '../lists/'+data_name+'_indoor/train_list.txt'\n\n# dstroot for saving models. \n# logroot for writting some log(s), if is needed.\ndstroot = './trainedmodels/'+data_name+'/'+tag+'/'\nlogroot = './logs/'+data_name+'/'+tag+'/'\nsubprocess.check_output(['mkdir', '-p', dstroot])\nsubprocess.check_output(['mkdir', '-p', logroot])\n\n# Transform\ntransform = transforms.ToTensor()\n# Dataloader\nconvertor = data_convertors.ConvertImageSet(data_root, imlist_pth, data_name,\n transform=transform, is_train=True,\n with_aug=with_data_aug, crop_size=crop_size)\ndataloader = DataLoader(convertor, batch_size=bch_size, shuffle=False, num_workers=5)\n\n# Make network\ncleaner = cleaner().cuda()\ncleaner.train()\n\n# Optimizer and Loss\noptimizer = optim.Adam(cleaner.parameters(), lr=base_lr)\nL1_loss = nn.L1Loss()\n\n# Start training\nprint('Start training...')\nfor epoch in range(epoch_size): \n for iteration, data in enumerate(dataloader):\n img, label, _ = data\n img_var = Variable(img, requires_grad=False).cuda()\n label_var = Variable(label, requires_grad=False).cuda()\n\n # Cleaning noisy images\n cleaned = cleaner(img_var)\n\n # Compute ssim loss (not used)\n ssim_loss = -ssim(cleaned, label_var)\n ssim_loss = ssim_loss*ssim_weight\n\n # Compute L1 loss (not used)\n l1_loss = L1_loss(cleaned, label_var)\n l1_loss = l1_loss*l1_loss_weight\n\n loss = ssim_loss + l1_loss\n # Backward and update params \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Check \n# torchvision.utils.save_image(img[:16], logroot+'input_images.png')\n# torchvision.utils.save_image(label[:16], logroot+'label_images.png')\n# torchvision.utils.save_image(cleaned[:16], logroot+'temp_res.png' )\n print('Epoch('+str(epoch+1)+'), iteration('+str(iteration+1)+'): '+str(loss.item()))\n\n if epoch%10 == 9:\n if gpus == 1: \n torch.save(cleaner.state_dict(), dstroot+'epoch_'+str(epoch+1)+'_model.pt')\n else:\n torch.save(cleaner.module.state_dict(), dstroot+'epoch_'+str(epoch+1)+'_model.pt') \n\n if epoch in [700, 1400]:\n for param_group in optimizer.param_groups:\n param_group['lr']*= 0.1 \n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.load"
],
[
"torch.utils.data.DataLoader",
"torch.nn.L1Loss",
"torch.autograd.Variable"
]
] |
sayabiws/simple-image-recommender | [
"27162c544fc08b5774049039694f0fa7c7faac3f"
] | [
"main.py"
] | [
"# Simple image recommender\n#\n# required:\n# data/images: a folder containing your images dataset\n# data/users: can be empty, but the folder needs to exist (for now ?)\n# \n# optional:\n# data/tags.csv: a comma-separated list containing the names of your \n# images and the corresponding semicolon-separated tags\n# (eg. \"37.png,sky;blue;cliff\")\n\n# Libraries import\nfrom PIL import Image\nfrom sklearn.cluster import MiniBatchKMeans\nfrom operator import itemgetter\nimport pandas\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\nimport pandas as pd\nimport json\nimport math\nimport os\nimport json\nimport csv\n\n\n# User data gathering\ndef user_data_gathering():\n\tname = input(\"Please enter your username: \")\n\tuser_favs = []\n\tuser_dislikes = []\n\ttry:\n\t\twith open(\"data/users/\" + name + \".txt\", \"r\") as userfile:\n\t\t\tuser_favs = userfile.readline().rstrip().split(\",\")\n\t\t\tuser_dislikes = userfile.readline().rstrip().split(\",\")\n\texcept FileNotFoundError:\n\t\tprint(\"This user doesn't exist. Creating it...\")\n\tif not user_favs:\n\t\tprint(\"No favourite images defined!\")\n\tif not user_dislikes:\n\t\tprint(\"No disliked images defined!\")\n\tdo_fav = input(\"Would you like to define your favourite images? ([y]es/[n]o/[a]dd): \")\n\tif do_fav == \"y\":\n\t\tuser_favs = input(\"Please enter your favourite images, separated by a comma: \").split(\",\")\n\telif do_fav == \"a\":\n\t\tuser_favs += input(\"Please enter the images you want to add, separated by a comma: \").split(\",\")\n\telif do_fav == \"n\":\n\t\tpass\n\telse:\n\t\tprint(\"Incorrect choice. Exiting\")\n\t\texit()\n\n\tdo_dislike = input(\"Would you like to define your disliked images? ([y]es/[n]o/[a]dd): \")\n\tif do_dislike == \"y\":\n\t\tuser_dislikes = input(\"Please enter your disliked images, separated by a comma: \").split(\",\")\n\telif do_dislike == \"a\":\n\t\tuser_dislikes += input(\"Please enter the images you want to add, separated by a comma: \").split(\",\")\n\telif do_dislike == \"n\":\n\t\tpass\n\telse:\n\t\tprint(\"Incorrect choice. Exiting\")\n\t\texit()\n\tuserfile = open(\"data/users/\" + name + \".txt\", \"w+\")\n\tuserfile.write(\",\".join(user_favs) + \"\\n\")\n\tuserfile.write(\",\".join(user_dislikes) + \"\\n\")\n\tuserfile.close()\n\n\treturn user_favs,user_dislikes\n\n# Get all images filenames in data/images/\ndef get_image_list():\n\timagelist = []\n\tfor file in os.listdir(\"data/images\"):\n\t\tif file.endswith(\".png\") or file.endswith(\".jpg\") or file.endswith(\".gif\") or file.endswith(\".tif\") or file.endswith(\".bmp\"):\n\t\t\timagelist.append(file)\n\treturn imagelist\n\n# Get color clusters per image\ndef get_clusters(filename, n_clusters):\n\timgfile = Image.open(\"data/images/\" + filename).convert('RGBA')\n\tnumarray = np.array(imgfile.getdata(), np.uint8)\n\n\tclusters = MiniBatchKMeans(n_clusters=n_clusters)\n\tclusters.fit(numarray)\n\n\tnpbins = np.arange(0, n_clusters + 1)\n\thistogram = np.histogram(clusters.labels_, bins=npbins)\n\n\t# Sort histogram\n\tpairs = sorted(zip(histogram[0], histogram[1]), key=itemgetter(0))\n\thistogram = (np.array([v for v, i in pairs]),\n\t\t\t\t np.array([i for v, i in pairs]))\n\n\tcolors = []\n\n\tfor i in range(n_clusters):\n\t\tj = histogram[1][i]\n\t\tcolors.append(\n\t\t\t(\n\t\t\t\tmath.ceil(clusters.cluster_centers_[j][0]),\n\t\t\t\tmath.ceil(clusters.cluster_centers_[j][1]),\n\t\t\t\tmath.ceil(clusters.cluster_centers_[j][2])\n\t\t\t)\n\t\t)\n\n\treturn colors\n\n# Returns a pandas dataframe with the tags info\ndef get_tags(filename):\n\ttry:\n\t\ttags_df = pd.read_csv(filename)\n\texcept FileNotFoundError:\n\t\tprint(\"No tags have been defined. Ignoring tags.\")\n\n\ttags_df[\"tags\"] = tags_df.tags.str.split(\";\")\n\treturn tags_df\n\n# Clean the clusters data\ndef clean_data(clusters):\n\tfor image in clusters:\n\t\ttmp = []\n\t\tfor color in image[\"colors\"]: \n\t\t\ttmp.append(((color[0])<<16)|((color[1])<<8)|(color[2]))\n\t\timage[\"colors\"] = tmp\n\t\ttmp = []\n\n\treturn clusters\n\n# The actual prediction algorithm\ndef predict(clusters, user_fav, user_dislikes):\n\timages = sorted(clusters, key=lambda x: x['name'])\n\tcolor_clusters = [image[\"colors\"] for image in images]\n\n\t# Build training data\n\ttraining_data = color_clusters\n\tresult_data = [(image['name'] in user_fav) for image in images]\n\t\n\t# Build dataframes\n\ttraining_df = pandas.DataFrame(training_data, columns=['color1', 'color2', 'color3'])\n\tresult_df = pandas.DataFrame(result_data, columns=['favorite'])\n\n\t# Train decision tree\n\tclassifier = RandomForestClassifier(n_estimators=10, max_depth=10)\n\tclassifier = classifier.fit(training_df, result_df.values.ravel())\n\n\tpredicted = classifier.predict(list(map(lambda x: x['colors'], images)))\n\n\tprint(\"# Predicted as favorites\")\n\n\tfor index, favorite in enumerate(predicted):\n\t\tname = images[index]['name']\n\t\t# Only print new images\n\t\tif favorite and name not in user_fav and name not in user_dislikes:\n\t\t\tprint(name)\n\n# Main function\ndef main():\n\tprint(\"Loading...\")\n\tprint(\" -- Looking up images...\")\n\timagelist = get_image_list()\n\tprint(\" -- Calculating color clusters (this can take some time if it has never been done before)...\")\n\tn_clusters = 3\n\n\ttry:\n\t\tclustersData = open(\"data/clusters.json\", \"r\")\n\t\tclusters = json.load(clustersData)\n\texcept:\n\t\tclusters = [{\"name\":filename, \"colors\":get_clusters(filename, n_clusters)} for filename in imagelist]\n\t\tr = json.dumps(clusters)\n\t\tclusersfile = open(\"data/clusters.json\", \"w\")\n\t\tclusersfile.write(r)\n\t\tclusersfile.close()\n\n\tprint(\" -- Extracting tags...\")\n\ttags = get_tags(\"data/tags.csv\")\n\tprint(\"Loading done!\")\n\n\t# Gathering user data\n\tprint(\"Gathering user data...\")\n\t(user_favs, user_dislikes) = user_data_gathering()\n\n\t# Recommendation system\n\tprint(\"Computing recommendation...\")\n\tcleanedclusters = clean_data(clusters)\n\tpredict(cleanedclusters, user_favs, user_dislikes)\n\nif __name__ == \"__main__\":\n\tmain()"
] | [
[
"numpy.histogram",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.arange",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"sklearn.cluster.MiniBatchKMeans"
]
] |
Nickmeagan70/tensorflow | [
"6bfedde8466daced9f40a0e11840f5ce274abc7d"
] | [
"tensorflow/python/pywrap_tensorflow.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"A Python wrapper that loads _pywrap_tensorflow_internal.so.\"\"\"\n\nimport ctypes\nimport sys\nimport traceback\n\nfrom tensorflow.python.platform import self_check\n\n# TODO(mdan): Cleanup antipattern: import for side effects.\n\n# Perform pre-load sanity checks in order to produce a more actionable error.\nself_check.preload_check()\n\n# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long\n\ntry:\n # This import is expected to fail if there is an explicit shared object\n # dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.\n from tensorflow.python import pywrap_dlopen_global_flags\n _use_dlopen_global_flags = True\nexcept ImportError:\n _use_dlopen_global_flags = False\n\n# On UNIX-based platforms, pywrap_tensorflow is a python library that\n# dynamically loads _pywrap_tensorflow.so.\n_can_set_rtld_local = (\n hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))\nif _can_set_rtld_local:\n _default_dlopen_flags = sys.getdlopenflags()\n\ntry:\n if _use_dlopen_global_flags:\n pywrap_dlopen_global_flags.set_dlopen_flags()\n elif _can_set_rtld_local:\n # Ensure RTLD_LOCAL behavior for platforms where it isn't the default\n # (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not\n # override an RTLD_GLOBAL in _default_dlopen_flags).\n sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)\n\n # Python2.7 does not have a ModuleNotFoundError.\n try:\n ModuleNotFoundError\n except NameError:\n ModuleNotFoundError = ImportError # pylint: disable=redefined-builtin\n\n # pylint: disable=wildcard-import,g-import-not-at-top,line-too-long,undefined-variable\n try:\n from tensorflow.python._pywrap_tensorflow_internal import *\n # This try catch logic is because there is no bazel equivalent for py_extension.\n # Externally in opensource we must enable exceptions to load the shared object\n # by exposing the PyInit symbols with pybind. This error will only be\n # caught internally or if someone changes the name of the target _pywrap_tensorflow_internal.\n\n # This logic is used in other internal projects using py_extension.\n except ModuleNotFoundError:\n pass\n\n if _use_dlopen_global_flags:\n pywrap_dlopen_global_flags.reset_dlopen_flags()\n elif _can_set_rtld_local:\n sys.setdlopenflags(_default_dlopen_flags)\nexcept ImportError:\n raise ImportError(\n f'{traceback.format_exc()}'\n f'\\n\\nFailed to load the native TensorFlow runtime.\\n'\n f'See https://www.tensorflow.org/install/errors '\n f'for some common causes and solutions.\\n'\n f'If you need help, create an issue '\n f'at https://github.com/tensorflow/tensorflow/issues '\n f'and include the entire stack trace above this error message.')\n\n# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long\n"
] | [
[
"tensorflow.python.pywrap_dlopen_global_flags.reset_dlopen_flags",
"tensorflow.python.pywrap_dlopen_global_flags.set_dlopen_flags",
"tensorflow.python.platform.self_check.preload_check"
]
] |
nvaytet/scipp | [
"f14f56ed19cccb4162d55b1123df7225eeedb395"
] | [
"src/scipp/plotting/tools.py"
] | [
"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)\n# @author Neil Vaytet\n\nfrom .. import config\nfrom ..core import concatenate, values, dtype, units, nanmin, nanmax, histogram, \\\n full_like\nfrom ..core import Variable, DataArray\nfrom ..core import abs as abs_\nimport numpy as np\nfrom copy import copy\nimport io\n\n\ndef get_line_param(name=None, index=None):\n \"\"\"\n Get the default line parameter from the config.\n If an index is supplied, return the i-th item in the list.\n \"\"\"\n param = getattr(config.plot, name)\n return param[index % len(param)]\n\n\ndef to_bin_centers(x, dim):\n \"\"\"\n Convert array edges to centers\n \"\"\"\n return 0.5 * (x[dim, 1:] + x[dim, :-1])\n\n\ndef to_bin_edges(x, dim):\n \"\"\"\n Convert array centers to edges\n \"\"\"\n idim = x.dims.index(dim)\n if x.shape[idim] < 2:\n one = 1.0 * x.unit\n return concatenate(x[dim, 0:1] - one, x[dim, 0:1] + one, dim)\n else:\n center = to_bin_centers(x, dim)\n # Note: use range of 0:1 to keep dimension dim in the slice to avoid\n # switching round dimension order in concatenate step.\n left = center[dim, 0:1] - (x[dim, 1] - x[dim, 0])\n right = center[dim, -1] + (x[dim, -1] - x[dim, -2])\n return concatenate(concatenate(left, center, dim), right, dim)\n\n\ndef parse_params(params=None, defaults=None, globs=None, array=None):\n \"\"\"\n Construct the colorbar settings using default and input values\n \"\"\"\n from matplotlib.colors import Normalize, LogNorm, LinearSegmentedColormap\n from matplotlib import cm\n\n parsed = dict(config.plot.params)\n if defaults is not None:\n for key, val in defaults.items():\n parsed[key] = val\n if globs is not None:\n for key, val in globs.items():\n # Global parameters need special treatment because by default they\n # are set to None, and we don't want to overwrite the defaults.\n if val is not None:\n parsed[key] = val\n if params is not None:\n if isinstance(params, bool):\n params = {\"show\": params}\n for key, val in params.items():\n parsed[key] = val\n\n if parsed[\"norm\"] == \"log\":\n norm = LogNorm\n elif parsed[\"norm\"] == \"linear\":\n norm = Normalize\n else:\n raise RuntimeError(\"Unknown norm. Expected 'linear' or 'log', \"\n \"got {}.\".format(parsed[\"norm\"]))\n vmin = parsed[\"vmin\"]\n vmax = parsed[\"vmax\"]\n parsed[\"norm\"] = norm(vmin=vmin.value if vmin is not None else None,\n vmax=vmax.value if vmax is not None else None)\n\n # Convert color into custom colormap\n if parsed[\"color\"] is not None:\n parsed[\"cmap\"] = LinearSegmentedColormap.from_list(\n \"tmp\", [parsed[\"color\"], parsed[\"color\"]])\n else:\n parsed[\"cmap\"] = copy(cm.get_cmap(parsed[\"cmap\"]))\n\n if parsed[\"under_color\"] is None:\n parsed[\"cmap\"].set_under(parsed[\"cmap\"](0.0))\n else:\n parsed[\"cmap\"].set_under(parsed[\"under_color\"])\n if parsed[\"over_color\"] is None:\n parsed[\"cmap\"].set_over(parsed[\"cmap\"](1.0))\n else:\n parsed[\"cmap\"].set_over(parsed[\"over_color\"])\n\n return parsed\n\n\ndef vars_to_err(v):\n \"\"\"\n Convert variances to errors.\n \"\"\"\n with np.errstate(invalid=\"ignore\"):\n v = np.sqrt(v)\n np.nan_to_num(v, copy=False)\n return v\n\n\ndef find_log_limits(x):\n \"\"\"\n To find log scale limits, we histogram the data between 1.0-30\n and 1.0e+30 and include only bins that are non-zero.\n \"\"\"\n from .. import flatten, ones\n volume = np.product(x.shape)\n pixel = flatten(values(x.astype(dtype.float64)), to='pixel')\n weights = ones(dims=['pixel'], shape=[volume], unit='counts')\n hist = histogram(DataArray(data=weights, coords={'order': pixel}),\n bins=Variable(dims=['order'],\n values=np.geomspace(1e-30, 1e30, num=61),\n unit=x.unit))\n # Find the first and the last non-zero bins\n inds = np.nonzero((hist.data > 0.0 * units.counts).values)\n ar = np.arange(hist.data.shape[0])[inds]\n # Safety check in case there are no values in range 1.0e-30:1.0e+30:\n # fall back to the linear method and replace with arbitrary values if the\n # limits are negative.\n if len(ar) == 0:\n [vmin, vmax] = find_linear_limits(x)\n if vmin.value <= 0.0:\n if vmax.value <= 0.0:\n vmin = full_like(vmin, 0.1)\n vmax = full_like(vmax, 1.0)\n else:\n vmin = 1.0e-3 * vmax\n else:\n vmin = hist.coords['order']['order', ar.min()]\n vmax = hist.coords['order']['order', ar.max() + 1]\n return [vmin, vmax]\n\n\ndef find_linear_limits(x):\n \"\"\"\n Find variable min and max.\n \"\"\"\n return [\n values(nanmin(x).astype(dtype.float64)),\n values(nanmax(x).astype(dtype.float64))\n ]\n\n\ndef find_limits(x, scale=None, flip=False):\n \"\"\"\n Find sensible limits, depending on linear or log scale.\n \"\"\"\n if scale is not None:\n if scale == \"log\":\n lims = {\"log\": find_log_limits(x)}\n else:\n lims = {\"linear\": find_linear_limits(x)}\n else:\n lims = {\"log\": find_log_limits(x), \"linear\": find_linear_limits(x)}\n if flip:\n for key in lims:\n lims[key] = np.flip(lims[key]).copy()\n return lims\n\n\ndef fix_empty_range(lims, replacement=None):\n \"\"\"\n Range correction in case xmin == xmax\n \"\"\"\n dx = 0.0 * lims[0].unit\n if lims[0].value == lims[1].value:\n if replacement is not None:\n dx = 0.5 * replacement\n elif lims[0].value == 0.0:\n dx = 0.5 * lims[0].unit\n else:\n dx = 0.5 * abs_(lims[0])\n return [lims[0] - dx, lims[1] + dx]\n\n\ndef fig_to_pngbytes(fig):\n \"\"\"\n Convert figure to png image bytes.\n We also close the figure to prevent it from showing up again in\n cells further down the notebook.\n \"\"\"\n import matplotlib.pyplot as plt\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n plt.close(fig)\n buf.seek(0)\n return buf.getvalue()\n\n\ndef to_dict(meta):\n \"\"\"\n Convert a coords, meta, attrs or masks object to a python dict.\n \"\"\"\n return {name: var for name, var in meta.items()}\n"
] | [
[
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.errstate",
"numpy.arange",
"matplotlib.cm.get_cmap",
"numpy.geomspace",
"numpy.product",
"matplotlib.pyplot.close",
"numpy.flip",
"numpy.sqrt",
"numpy.nan_to_num",
"numpy.nonzero"
]
] |
alishameli/CS231n-Sample-Code-1 | [
"e47e593026c80530f7c387c4feca24f88c1618a2"
] | [
"tensorflow/predict.py"
] | [
"import argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nimport models\n\ndef predict(model_data_path, image_path):\n\n # Default input size\n height = 228\n width = 304\n channels = 3\n batch_size = 1\n \n # Read image\n img = Image.open(image_path)\n img = img.resize([width,height], Image.ANTIALIAS)\n img = np.array(img).astype('float32')\n img = np.expand_dims(np.asarray(img), axis = 0)\n \n # Create a placeholder for the input image\n input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))\n \n # Construct the network\n net = models.ResNet50UpProj({'data': input_node}, batch_size)\n \n with tf.Session() as sess:\n\n # Load the converted parameters\n print('Loading the model')\n net.load(model_data_path, sess) \n \n uninitialized_vars = []\n for var in tf.global_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError: \n uninitialized_vars.append(var)\n\n init_new_vars_op = tf.variables_initializer(uninitialized_vars)\n sess.run(init_new_vars_op)\n \n # Evalute the network for the given image\n pred = sess.run(net.get_output(), feed_dict={input_node: img})\n \n # Plot result\n fig = plt.figure()\n ii = plt.imshow(pred[0,:,:,0], interpolation='nearest')\n fig.colorbar(ii)\n plt.show()\n\n return pred\n \n \ndef main():\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('model_path', help='Converted parameters for the model')\n parser.add_argument('image_paths', help='Directory of images to predict')\n args = parser.parse_args()\n\n # Predict the image\n pred = predict(args.model_path, args.image_paths)\n \n os._exit(0)\n\nif __name__ == '__main__':\n main()\n\n \n\n\n\n"
] | [
[
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"numpy.asarray",
"tensorflow.global_variables",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"tensorflow.Session",
"numpy.array",
"tensorflow.variables_initializer"
]
] |
dbusbridge/spektral | [
"a95807603c2bb96c80f34d326f663273c72ca3fc"
] | [
"spektral/datasets/delaunay.py"
] | [
"from __future__ import absolute_import\n\nimport numpy as np\nfrom scipy.spatial import Delaunay\n\nfrom spektral.utils import label_to_one_hot, numpy_to_nx\n\nRETURN_TYPES = {'numpy', 'networkx'}\nMAX_K = 7 # Maximum number of nodes in a graph\n\n\ndef generate_data(return_type='networkx', classes=0, n_samples_in_class=1000,\n n_nodes=7, support_low=0., support_high=10., drift_amount=1.0,\n one_hot_labels=True, support=None, seed=None):\n \"\"\"\n Generates a dataset of Delaunay triangulations as described by\n [Zambon et al. (2017)](https://arxiv.org/abs/1706.06941).\n Note that this function is basically deprecated and will change soon.\n \n :param return_type: `'networkx'` or `'numpy'`, data format to return;\n :param classes: indices of the classes to load (integer, or list of integers\n between 0 and 20);\n :param n_samples_in_class: number of generated samples per class;\n :param n_nodes: number of nodes in a graph;\n :param support_low: lower bound of the uniform distribution from which the \n support is generated;\n :param support_high: upper bound of the uniform distribution from which the \n support is generated;\n :param drift_amount: coefficient to control the amount of change between \n classes;\n :param one_hot_labels: one-hot encode dataset labels;\n :param support: custom support to use instead of generating it randomly; \n :param seed: random numpy seed;\n :return: if `return_type='networkx'`, a list of graphs in Networkx format, \n and an array containing labels; if `return_type='numpy'`, the adjacency \n matrix, node features, and an array containing labels.\n \"\"\"\n if return_type not in RETURN_TYPES:\n raise ValueError('Possible return_type: {}'.format(RETURN_TYPES))\n\n if isinstance(classes, int):\n classes = [classes]\n\n if max(classes) > 20 or min(classes) < 0:\n raise ValueError('Class indices must be between 0 and 20')\n\n r_classes = list(reversed(classes))\n if r_classes[-1] == 0:\n r_classes.insert(0, r_classes.pop(-1))\n\n # Support points\n np.random.seed(seed)\n if support is None:\n support = np.random.uniform(support_low, support_high, (1, n_nodes, 2))\n else:\n try:\n assert support.shape == (1, n_nodes, 2)\n except AssertionError:\n print('The given support doesn\\'t have shape (1, n_nodes, 2) as'\n 'expected. Attempting to reshape.')\n support = support.reshape(1, n_nodes, 2)\n\n # Compute node features\n node_features = []\n # Other node features\n for idx, i in enumerate(r_classes):\n if i == 0:\n concept_0 = np.repeat(support, n_samples_in_class, 0)\n noise_0 = np.random.normal(0, 1, (n_samples_in_class, n_nodes, 2))\n class_0 = concept_0 + noise_0\n node_features.append(class_0)\n else:\n radius = 10. * ((2./3.) ** (drift_amount * (i - 1)))\n phase = np.random.uniform(0, 2 * np.pi, (n_nodes, 1))\n perturb_i_x = radius * np.cos(phase)\n perturb_i_y = radius * np.sin(phase)\n perturb_i = np.concatenate((perturb_i_x, perturb_i_y), axis=-1)\n support_i = support + perturb_i\n concept_i = np.repeat(support_i, n_samples_in_class, 0)\n noise_i = np.random.normal(0, 1, (n_samples_in_class, n_nodes, 2))\n class_i = concept_i + noise_i\n node_features.append(class_i)\n node_features = np.array(node_features).reshape((-1, n_nodes, 2))\n\n # Compute adjacency matrices\n adjacency = []\n for nf in node_features:\n adj = compute_adj(nf)\n adjacency.append(adj)\n adjacency = np.array(adjacency)\n\n # Compute labels\n labels = np.repeat(classes, n_samples_in_class)\n if one_hot_labels:\n labels = label_to_one_hot(labels, labels=classes)\n\n if return_type is 'numpy':\n return adjacency, node_features, labels\n elif return_type is 'networkx':\n graphs = numpy_to_nx(adjacency, node_features=node_features, nf_name='coords')\n return graphs, labels\n else:\n raise NotImplementedError\n\n\ndef compute_adj(x):\n \"\"\"\n Computes the Delaunay triangulation of the given points\n :param x: array of shape (num_nodes, 2)\n :return: the computed adjacency matrix\n \"\"\"\n tri = Delaunay(x)\n edges_explicit = np.concatenate((tri.vertices[:, :2],\n tri.vertices[:, 1:],\n tri.vertices[:, ::2]), axis=0)\n adj = np.zeros((x.shape[0], x.shape[0]))\n adj[edges_explicit[:, 0], edges_explicit[:, 1]] = 1.\n return np.clip(adj + adj.T, 0, 1)\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"scipy.spatial.Delaunay",
"numpy.random.seed",
"numpy.repeat",
"numpy.random.normal",
"numpy.cos",
"numpy.clip",
"numpy.array",
"numpy.sin",
"numpy.concatenate"
]
] |
finagle29/PypeIt | [
"418d6d24d24054ad590d2f06c0b4688ea18f492e"
] | [
"pypeit/scripts/flux_setup.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport os,time\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom pypeit import msgs\nfrom pypeit.par.util import make_pypeit_file\n\n\nclass SmartFormatter(argparse.HelpFormatter):\n\n def _split_lines(self, text, width):\n if text.startswith('R|'):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.HelpFormatter._split_lines(self, text, width)\n\ndef parser(options=None):\n parser = argparse.ArgumentParser(description='Parse', formatter_class=SmartFormatter)\n parser.add_argument(\"sci_path\", type=str, help=\"Path for Science folder\")\n parser.add_argument(\"--objmodel\", type=str, default='qso', choices=['qso', 'star', 'poly'],\n help=\"R|Science object model used in the telluric fitting.\\n\"\n \"The options are:\\n\"\n \"\\n\"\n \" qso = For quasars. You might need to set redshift, bal_wv_min_mx in the tell file.\\n\"\n \"\\n\"\n \" star = For stars. You need to set star_type, star_ra, star_dec, and star_mag in the tell_file.\\n\"\n \"\\n\"\n \" poly = For other type object, You might need to set fit_wv_min_mx, \\n\"\n \" and norder in the tell_file.\"\n )\n\n if options is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(options)\n return args\n\n\ndef main(args):\n \"\"\"\n This setups PypeIt files for fluxing, coadding and telluric corrections.\n It will produce three files named as your_spectragraph.flux, your_spectragraph.coadd1d,\n and your_spectragraph.tell\n \"\"\"\n allfiles = os.listdir(args.sci_path)\n allfiles = np.sort(allfiles)\n spec1dfiles = []\n spec2dfiles = []\n spec1dinfos = []\n for ifile in allfiles:\n if ('spec1d' in ifile) and ('.fits' in ifile):\n spec1dfiles.append(ifile)\n elif ('spec2d' in ifile) and ('.fits' in ifile):\n spec2dfiles.append(ifile)\n elif ('spec1d' in ifile) and ('.txt' in ifile):\n spec1dinfos.append(ifile)\n else:\n msgs.warn('{:} is not a standard PypeIt output.'.format(ifile))\n if len(spec2dfiles) > len(spec1dfiles):\n msgs.warn('The following exposures do not have 1D extractions:')\n for ii in range(len(spec2dfiles)):\n if not os.path.exists(os.path.join(args.sci_path, spec2dfiles[ii].replace('spec2d','spec1d'))):\n msgs.info('\\t {:}'.format(spec2dfiles[ii]))\n\n if len(spec1dfiles) > 0:\n par = fits.open(os.path.join(args.sci_path, spec1dfiles[0]))\n\n ## fluxing pypeit file\n spectrograph = par[0].header['PYP_SPEC']\n pypeline = par[0].header['PYPELINE']\n flux_file = '{:}.flux'.format(spectrograph)\n cfg_lines = ['[fluxcalib]']\n cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\\n']\n cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']\n make_pypeit_file(flux_file, spectrograph, spec1dfiles, cfg_lines=cfg_lines, setup_mode=True)\n fin = open(flux_file, \"rt\")\n data = fin.read()\n data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))\n data = data.replace('data', 'flux')\n fin.close()\n fin = open(flux_file, \"wt\")\n fin.write(data)\n fin.close()\n\n ## coadd1d pypeit file\n coadd1d_file = '{:}.coadd1d'.format(spectrograph)\n cfg_lines = ['[coadd1d]']\n cfg_lines += [' coaddfile = YOUR_OUTPUT_FILE_NAME # Please set your output file name']\n cfg_lines += [' sensfuncfile = YOUR_SENSFUNC_FILE # Please set your SENSFUNC file name']\n if pypeline == 'Echelle':\n cfg_lines += [' wave_method = velocity # creates a uniformly space grid in log10(lambda)\\n']\n else:\n cfg_lines += [' wave_method = linear # creates a uniformly space grid in lambda\\n']\n\n cfg_lines += ['# This file includes all extracted objects. You need to figure out which object you want to \\n'+\\\n '# coadd before running pypeit_coadd_1dspec!!!']\n spec1d_info = []\n for ii in range(len(spec1dfiles)):\n meta_tbl = Table.read(os.path.join(args.sci_path, spec1dfiles[ii]).replace('.fits', '.txt'),\n format='ascii.fixed_width')\n _, indx = np.unique(meta_tbl['name'],return_index=True)\n objects = meta_tbl[indx]\n for jj in range(len(objects)):\n spec1d_info.append(spec1dfiles[ii] + ' '+ objects['name'][jj])\n make_pypeit_file(coadd1d_file, spectrograph, spec1d_info, cfg_lines=cfg_lines, setup_mode=True)\n fin = open(coadd1d_file, \"rt\")\n data = fin.read()\n data = data.replace('spec1d_', os.path.join(args.sci_path,'spec1d_'))\n data = data.replace('data', 'coadd1d')\n fin.close()\n fin = open(coadd1d_file, \"wt\")\n fin.write(data)\n fin.close()\n\n ## tellfit pypeit file\n tellfit_file = '{:}.tell'.format(spectrograph)\n cfg_lines = ['[tellfit]']\n if args.objmodel == 'qso':\n cfg_lines += [' objmodel = qso']\n cfg_lines += [' redshift = 0.0']\n cfg_lines += [' bal_wv_min_max = 10000.,11000.']\n elif args.objmodel == 'star':\n cfg_lines += [' objmodel = star']\n cfg_lines += [' star_type = A0']\n cfg_lines += [' star_mag = 0.0']\n elif args.objmodel == 'poly':\n cfg_lines += [' objmodel = poly']\n cfg_lines += [' polyorder = 5']\n cfg_lines += [' fit_wv_min_max = 17000.0,22000.0']\n\n with open(tellfit_file, 'w') as f:\n f.write('# Auto-generated PypeIt file\\n')\n f.write('# {0}\\n'.format(time.strftime(\"%a %d %b %Y %H:%M:%S\", time.localtime())))\n f.write(\"\\n\")\n f.write(\"# User-defined execution parameters\\n\")\n f.write(\"# This is only an example. Make sure to change the following parameters accordingly.\\n\")\n f.write('\\n'.join(cfg_lines))\n f.write('\\n')\n f.write('\\n')\n msgs.info('PypeIt file written to: {0}'.format(tellfit_file))\n\n\n"
] | [
[
"numpy.sort",
"numpy.unique"
]
] |
LamannaLeonardo/OLAM | [
"7a6611912ebb40d39a934dd454efec4cbb7913d3"
] | [
"Util/Latex_generator.py"
] | [
"# Copyright (c) 2022, Leonardo Lamanna\n# All rights reserved.\n# This source code is licensed under the MIT-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport pandas as pd\nimport os\n\npd.options.display.max_colwidth = 100\n\ndef generate_latex_table(data_file, labels, tab_name, caption, header):\n\n with open(tab_name + \".tex\", \"w\") as f:\n df = pd.read_excel(data_file, sheet_name=\"Summary\")\n df_restricted = df[labels]\n f.write(df_restricted.to_latex(index=False, escape=False,\n label=\"tab:{}\".format(tab_name),\n caption= caption,\n header = header))\n\n\ndef generate_comparison_latex_table():\n labels = [\"Domain\", \"Neg precision A\", \"Neg recall A\", \"Overall precision A\", \"Overall recall A\",\n \"Neg precision B\", \"Neg recall B\", \"Overall precision B\", \"Overall recall B\"]\n header = [\"Domain\", \"$P_{\\\\eff^{-}}$\", \"$R_{\\\\eff^{-}}$\", \"$P$\", \"$R$\",\n \"$P_{\\\\eff^{-}}$\", \"$R_{\\\\eff^{-}}$\", \"$P$\", \"$R$\"]\n caption = \"For each domain:statistics on final metrics of the last instance grouped by \" \\\n \"negative effects.\"\n tab_name = \"comparison_summary_uncertain\"\n file_path = os.path.join(\"comparison_summary_uncertain.xlsx\")\n\n generate_latex_table(file_path, labels, tab_name, caption, header)\n\n\ndef generate_comparison_latex_table_fama():\n labels = [\"Domain\", \"Tot time\", \"Overall precision\", \"Overall recall\", \"FAMA tot time\",\n \"FAMA precision\", \"FAMA recall\", \"Delta act\"]\n header = [\"Domain\", \"$t$\", \"$P$\", \"$R$\", \"$t$\", \"$P$\", \"$R$\", \"$\\delta_{A}$\"]\n caption = \"Comparison among OLAM and FAMA with full observability. FAMA is run with all plan traces \" \\\n \"provided in \\protect\\cite{aineto_AIJ2019}. MODEL WITH UNCERTAIN NEGATIVE EFFECTS AND STRIPS ASSUMPTION.\"\n tab_name = \"comparison_fama\"\n file_path = os.path.join(\"comparison_fama.xlsx\")\n\n generate_latex_table(file_path, labels, tab_name, caption, header)\n\n\ndef generate_summary_latex_table():\n # labels = [\"Domain\", \"Instances\", \"Precs precision\", \"Precs recall\",\"Pos precision\", \"Pos recall\",\n # \"Neg precision\", \"Neg recall\", \"Overall precision\", \"Overall recall\"]\n labels = [\"Domain\", \"Instances\", \"Precs precision\", \"Precs recall\",\"Pos precision\", \"Pos recall\",\n \"Neg precision\", \"Neg recall\", \"Average precision\", \"Average recall\"]\n header = [\"Domain\", \"$I$\", \"$P_{\\\\prec}$\", \"$R_{\\\\prec}$\", \"$P_{\\\\eff^{+}}$\", \"$R_{\\\\eff^{+}}$\", \"$P_{\\\\eff^{-}}$\",\n \"$R_{\\\\eff^{-}}$\", \"$P$\", \"$R$\"]\n caption = \"For each domain:statistics on final metrics of the last instance grouped by \" \\\n \"preconditions, positive effects and negative ones.\"\n tab_name = \"overall_summary_certain_nostripsass\"\n\n folder = \"../Analysis/IJCAI_Results/Results_certain_NOnegeff_assumption\"\n file_path = os.path.join(folder, \"overall_summary.xlsx\")\n\n generate_latex_table(file_path, labels, tab_name, caption, header)\n\n\ndef generate_domain_objects_table():\n\n header = [\"Domain\", \"Objects\"]\n caption = \"For each domain, problem objects of all problems in the generated set.\"\n tab_name = \"all_problem_objects\"\n\n df = pd.DataFrame({\n \"Domain\":[],\n \"Objects\":[]\n })\n # df.set_index('Domain', inplace=True)\n\n domain_dataframes = [name for name in os.listdir(os.path.join(\"..\", \"Analysis\", \"Results_cert\"))\n if not name.startswith(\"overall\")]\n\n for domain_dataframe in domain_dataframes:\n domain = domain_dataframe.split(\"_\")[0]\n df_domain = pd.read_excel(os.path.join(\"..\", \"Analysis\", \"Results_cert\", domain_dataframe),\n sheet_name=\"Objects\")\n domain_obj_types = [key.strip().lower() for key in list(df_domain) if key.strip().lower() != \"total objs\"]\n\n for i, row in df_domain.iterrows():\n problem_objs = []\n for k in domain_obj_types:\n problem_objs.append(\"{} {}\".format(k,row[\"\\t\" + k]))\n\n eval = {\n \"Domain\":domain,\n \"Objects\":\", \".join(problem_objs)\n }\n\n\n df = df.append(eval, ignore_index=True)\n\n\n\n\n with open(tab_name + \".tex\", \"w\") as f:\n f.write(df.to_latex(index=False,\n label=\"tab:{}\".format(tab_name),\n caption= caption,\n header = header))\n\n\n\nif __name__ == \"__main__\":\n\n generate_summary_latex_table()\n #\n # generate_domain_objects_table()\n"
] | [
[
"pandas.DataFrame",
"pandas.read_excel"
]
] |
RichardoLuo/ColossalAI | [
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba",
"797a9dc5a9e801d7499b8667c3ef039a38aa15ba"
] | [
"tests/components_to_test/repeated_computed_layer.py",
"tests/test_tensor/test_tensor.py"
] | [
"#!/usr/bin/env python\n\nimport torch\nimport torch.nn as nn\nfrom colossalai.nn import CheckpointModule\nfrom .utils.dummy_data_generator import DummyDataGenerator\nfrom .registry import non_distributed_component_funcs\n\n\nclass NetWithRepeatedlyComputedLayers(CheckpointModule):\n \"\"\"\n This model is to test with layers which go through forward pass multiple times.\n In this model, the fc1 and fc2 call forward twice\n \"\"\"\n\n def __init__(self, checkpoint=False) -> None:\n super().__init__(checkpoint=checkpoint)\n self.fc1 = nn.Linear(5, 5)\n self.fc2 = nn.Linear(5, 5)\n self.fc3 = nn.Linear(5, 2)\n self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3]\n\n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n return x\n\n\nclass DummyDataLoader(DummyDataGenerator):\n\n def generate(self):\n data = torch.rand(16, 5)\n label = torch.randint(low=0, high=2, size=(16,))\n return data, label\n\n\n@non_distributed_component_funcs.register(name='repeated_computed_layers')\ndef get_training_components():\n\n def model_builder(checkpoint=True):\n return NetWithRepeatedlyComputedLayers(checkpoint)\n\n trainloader = DummyDataLoader()\n testloader = DummyDataLoader()\n\n criterion = torch.nn.CrossEntropyLoss()\n return model_builder, trainloader, testloader, torch.optim.Adam, criterion\n",
"import torch\nfrom colossalai.tensor import ColoTensor\nfrom numpy import allclose\n\n\ndef test_tensor_indexing():\n torch_t = torch.randn(2, 3)\n colo_t = ColoTensor.init_from_torch_tensor(torch_t)\n assert allclose(torch_t[:, 1], colo_t[:, 1].torch_tensor())\n\n\ndef test_lazy_init_tensor():\n lazy_t = ColoTensor(2, 3, dtype=torch.float32, requires_grad=True)\n assert lazy_t._torch_tensor.numel() == 0\n assert lazy_t.numel() == 6 == lazy_t.torch_tensor().numel()\n\n\ndef test_wrapped_tensor_func():\n t_ref = torch.randn(4, 5)\n t = ColoTensor.init_from_torch_tensor(t_ref.clone())\n\n # non-func attr\n assert t.is_cuda == t_ref.is_cuda\n\n # TODO I don't find out a tensor function which returns None.\n\n # return 1 torch.Tensor\n t_abs = t.abs()\n assert isinstance(t_abs, ColoTensor) and torch.equal(t_abs.torch_tensor(), t_ref.abs())\n\n # return 1 non-torch.Tensor\n assert t.dim() == t_ref.dim()\n\n # return >1 torch.Tensor\n t_split1, t_split2 = t.split(2)\n assert isinstance(t_split1, ColoTensor) and isinstance(t_split2, ColoTensor)\n\n\ndef test_operand():\n t_ref = torch.randn(4, 5)\n t = ColoTensor.init_from_torch_tensor(t_ref.clone())\n\n t_ref_res = t_ref + t_ref\n t_res = t + t\n assert torch.allclose(t_ref_res, t_res)\n"
] | [
[
"torch.rand",
"torch.nn.Linear",
"torch.randint",
"torch.nn.CrossEntropyLoss"
],
[
"torch.randn",
"torch.allclose"
]
] |
wlm2019/Neural-Arithmetic-Units | [
"f9de9d004bb2dc2ee28577cd1760d0a00c185836"
] | [
"stable_nalu/layer/hard_softmax_nac.py"
] | [
"\nimport math\nimport torch\n\nfrom ..abstract import ExtendedTorchModule\nfrom ..functional import sparsity_error\nfrom ._abstract_recurrent_cell import AbstractRecurrentCell\n\nclass HardSoftmaxNACLayer(ExtendedTorchModule):\n \"\"\"Implements the NAC (Neural Accumulator)\n\n Arguments:\n in_features: number of ingoing features\n out_features: number of outgoing features\n \"\"\"\n\n def __init__(self, in_features, out_features, **kwargs):\n super().__init__('nac', **kwargs)\n self.in_features = in_features\n self.out_features = out_features\n\n # Define the target weights. Also, put 0 last such that p1 = p2 = 0\n # corresponds to p3 = 1 => w = 0.\n self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))\n\n # Initialize a tensor, that will be the placeholder for the hard samples\n self.register_buffer('sample', torch.LongTensor(out_features, in_features))\n\n # We will only two parameters per weight, this is to prevent the redundancy\n # there would otherwise exist. This also makes it much more comparable with\n # NAC.\n self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features, 2))\n self.register_buffer('W_hat_k', torch.Tensor(out_features, in_features, 1))\n\n self.register_parameter('bias', None)\n\n def reset_parameters(self):\n # Use a gain of sqrt(0.5). Lets assume that softmax'(0) ~ 1, because this\n # holds for sigmoid. Then:\n # Var[W] = 1 * Var[S_1] - 1 * Var[S_2] + 0 * Var[S_3] = 2 / (fan[in] + fan[out])\n # Var[W] = 2 * Var[S_i] = 2 / (fan[in] + fan[out])\n # Var[S_i] = 1/2 * 2 / (fan[in] + fan[out])\n # sqrt(Var[S_i]) = sqrt(1/2) * sqrt(2 / (fan[in] + fan[out]))\n # This is not exactly true, because S_1, S_2, and S_3 are not enterily uncorrelated.\n torch.nn.init.xavier_uniform_(self.W_hat, gain=math.sqrt(0.5))\n torch.nn.init.constant_(self.W_hat_k, 0)\n\n def forward(self, input, reuse=False):\n # Concat trainable and non-trainable weights\n W_hat_full = torch.cat((self.W_hat, self.W_hat_k), dim=-1) # size = [out, in, 3]\n\n # Compute W_soft\n pi = torch.nn.functional.softmax(W_hat_full, dim=-1)\n W_soft = pi @ self.target_weights\n\n # Compute W_hard\n if not reuse:\n torch.multinomial(pi.view(-1, 3), 1, True, out=self.sample.view(-1))\n W_hard = self.target_weights[self.sample]\n\n # Use W_hard in the forward pass, but use W_soft for the gradients.\n # This implementation trick comes from torch.nn.functional.gumble_softmax(hard=True)\n W = W_hard - W_soft.detach() + W_soft\n\n # Compute the linear multiplication as usual\n self.writer.add_histogram('W', W)\n self.writer.add_tensor('W', W)\n self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)\n\n return torch.nn.functional.linear(input, W, self.bias)\n\n def extra_repr(self):\n return 'in_features={}, out_features={}'.format(\n self.in_features, self.out_features\n )\n\nclass HardSoftmaxNACCell(AbstractRecurrentCell):\n \"\"\"Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell\n\n Arguments:\n input_size: number of ingoing features\n hidden_size: number of outgoing features\n \"\"\"\n def __init__(self, input_size, hidden_size, **kwargs):\n super().__init__(HardSoftmaxNACLayer, input_size, hidden_size, **kwargs)\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.functional.linear",
"torch.nn.functional.softmax",
"torch.tensor",
"torch.LongTensor",
"torch.cat",
"torch.Tensor"
]
] |
ZhaoJ9014/Multi-Human-Parsing-MHP- | [
"a24eae67e9b4e730c75bcd8aec3e2ed06cb4b046"
] | [
"Nested_Adversarial_Networks/NAN_rework/modeleag.py"
] | [
"# Rework of model.py\n# https://github.com/ddddwee1/sul\n# This wrap-up is targeted for better touching low-level implementations \nimport layers2 as L \nimport tensorflow as tf \nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\ntf.enable_eager_execution(config=config)\nimport numpy as np \nimport os \nimport random\nimport time\n\nPARAM_RELU = 0\nPARAM_LRELU = 1\nPARAM_ELU = 2\nPARAM_TANH = 3\nPARAM_MFM = 4\nPARAM_MFM_FC = 5\nPARAM_SIGMOID = 6\n\n######## util functions ###########\ndef accuracy(pred,y,name='acc', one_hot=True):\n\twith tf.variable_scope(name):\n\t\tif one_hot:\n\t\t\tcorrect = tf.equal(tf.cast(tf.argmax(pred,-1),tf.int64),tf.cast(tf.argmax(y,-1),tf.int64))\n\t\telse:\n\t\t\tcorrect = tf.equal(tf.cast(tf.argmax(pred,-1),tf.int64),tf.cast(y,tf.int64))\n\t\tacc = tf.reduce_mean(tf.cast(correct,tf.float32))\n\treturn acc\n\n##########################\n# ETA class. I want to see the ETA. It's too boring to wait here.\nclass ETA():\n\tdef __init__(self,max_value):\n\t\tself.start_time = time.time()\n\t\tself.max_value = max_value\n\t\tself.current = 0\n\n\tdef start(self):\n\t\tself.start_time = time.time()\n\t\tself.current = 0\n\n\tdef sec2hms(self,sec):\n\t\thm = sec//60\n\t\ts = sec%60\n\t\th = hm//60\n\t\tm = hm%60\n\t\treturn h,m,s\n\n\tdef get_ETA(self,current,is_string=True):\n\t\tself.current = current\n\t\ttime_div = time.time() - self.start_time\n\t\ttime_remain = time_div * float(self.max_value - self.current) / float(self.current + 1)\n\t\th,m,s = self.sec2hms(int(time_remain))\n\t\tif is_string:\n\t\t\treturn '%d:%d:%d'%(h,m,s)\n\t\telse:\n\t\t\treturn h,m,s\n\n########### universal model class ##########\nclass Model(tf.contrib.checkpoint.Checkpointable):\n\tdef __init__(self,*args,**kwargs):\n\t\tself.initialized = False\n\t\tself.variables = []\n\t\tself.initialize(*args,**kwargs)\n\n\tdef initialize(self,*args,**kwargs):\n\t\tpass\n\n\tdef _gather_variables(self):\n\t\tself.variables = []\n\t\tatrs = dir(self)\n\t\tfor i in atrs:\n\t\t\tif i[0] == '_':\n\t\t\t\tcontinue\n\t\t\tobj = getattr(self, i)\n\t\t\tself.variables += self._gather_variables_recursive(obj)\n\n\tdef _gather_variables_recursive(self, obj):\n\t\tresult = []\n\t\tif isinstance(obj, list) or isinstance(obj, tuple):\n\t\t\tfor sub_obj in obj:\n\t\t\t\tresult += self._gather_variables_recursive(sub_obj)\n\t\telif isinstance(obj, Model) or isinstance(obj, L.Layer):\n\t\t\tresult += obj.variables\n\t\treturn result\n\n\tdef get_variables(self, layers=None):\n\t\tif layers is None:\n\t\t\treturn self.variables\n\t\telse:\n\t\t\tres = []\n\t\t\tfor l in layers:\n\t\t\t\tres += l.variables\n\t\t\treturn res \n\n\tdef set_bn_training(self, is_training):\n\t\tatrs = dir(self)\n\t\t# print(atrs)\n\t\tfor i in atrs:\n\t\t\tif i[0] == '_':\n\t\t\t\tcontinue\n\t\t\tobj = getattr(self, i)\n\t\t\tself._set_bn_training_recursive(obj, is_training)\n\n\tdef _set_bn_training_recursive(self, obj, is_training):\n\t\tif isinstance(obj, list):\n\t\t\tfor sub_obj in obj:\n\t\t\t\tself._set_bn_training_recursive(sub_obj, is_training)\n\t\tif isinstance(obj, Model) and obj!=self:\n\t\t\tobj.set_bn_training(is_training)\n\t\tif isinstance(obj, L.batch_norm):\n\t\t\tobj.is_training = is_training\n\n\tdef set_bn_epsilon(self, epsilon):\n\t\tatrs = dir(self)\n\t\t# print(atrs)\n\t\tfor i in atrs:\n\t\t\tif i[0] == '_':\n\t\t\t\tcontinue\n\t\t\tobj = getattr(self, i)\n\t\t\tself._set_bn_epsilon_recursive(obj, epsilon)\n\n\tdef _set_bn_epsilon_recursive(self, obj, epsilon):\n\t\tif isinstance(obj, list):\n\t\t\tfor sub_obj in obj:\n\t\t\t\tself._set_bn_training_recursive(sub_obj, epsilon)\n\t\tif isinstance(obj, Model) and obj!=self:\n\t\t\tobj.set_bn_training(epsilon)\n\t\tif isinstance(obj, L.batch_norm):\n\t\t\tobj.epsilon = epsilon\n\n\tdef __call__(self, x, *args, **kwargs):\n\t\tx = tf.convert_to_tensor(x, preferred_dtype=tf.float32)\n\t\tres = self.forward(x, *args, **kwargs)\n\t\tif not self.initialized:\n\t\t\tself._gather_variables()\n\t\t\tself.initialized = True\n\t\treturn res \n\n########### universal layer classes ##########\nclass ConvLayer(Model):\n\tdef initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):\n\t\tself.conv = L.conv2D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass ConvLayer1D(Model):\n\tdef initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):\n\t\tself.conv = L.conv1D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass ConvLayer3D(Model):\n\tdef initialize(self, size, outchn, dilation_rate=1, stride=1,pad='SAME',activation=-1,batch_norm=False, usebias=True,kernel_data=None,bias_data=None,weight_norm=False):\n\t\tself.conv = L.conv3D(size,outchn,stride=stride,pad=pad,usebias=usebias,kernel_data=kernel_data,bias_data=bias_data,dilation_rate=dilation_rate,weight_norm=weight_norm)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass DeconvLayer(Model):\n\tdef initialize(self, size, outchn, activation=-1, stride=1, usebias=True, pad='SAME', batch_norm=False):\n\t\tself.deconv = L.deconv2D(size,outchn,stride=stride,usebias=usebias,pad=pad, name=None)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self,x):\n\t\tx = self.deconv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass DeconvLayer3D(Model):\n\tdef initialize(self, size, outchn, activation=-1, stride=1, usebias=True, pad='SAME', batch_norm=False):\n\t\tself.deconv = L.deconv3D(size,outchn,stride=stride,usebias=usebias,pad=pad, name=None)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self,x):\n\t\tx = self.deconv(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass Dense(Model):\n\tdef initialize(self, outsize, usebias=True, batch_norm=False, activation=-1):\n\t\tself.fclayer = L.fcLayer(outsize,usebias=usebias)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self,x):\n\t\tx = self.fclayer(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\nclass GraphConvLayer(Model):\n\tdef initialize(self, outsize, adj_mtx=None, adj_fn=None, usebias=True, activation=-1, batch_norm=False):\n\t\tself.GCL = L.graphConvLayer(outsize, adj_mtx=adj_mtx, adj_fn=adj_fn, usebias=usebias)\n\t\tself.batch_norm = batch_norm\n\t\tself.activation_ = activation\n\t\tif batch_norm:\n\t\t\tself.bn = L.batch_norm()\n\t\tif activation!=-1:\n\t\t\tself.activation = L.activation(activation)\n\n\tdef forward(self, x):\n\t\tx = self.GCL(x)\n\t\tif self.batch_norm:\n\t\t\tx = self.bn(x)\n\t\tif self.activation_!=-1:\n\t\t\tx = self.activation(x)\n\t\treturn x \n\n\nflatten = L.flatten()\nmaxPool = L.maxpoolLayer\navgPool = L.avgpoolLayer\n\n########### higher wrapped block ##########\n\nclass ResBlock(Model):\n\tdef initialize(self, outchn, stride=1, ratio=4, activation=PARAM_RELU):\n\t\tself.outchn = outchn\n\t\t# self.stride = stride\n\t\tself.activ = L.activation(activation)\n\t\tself.bn = L.batch_norm()\n\t\tself.l1 = ConvLayer(1, outchn//ratio, activation=PARAM_RELU, batch_norm=True)\n\t\tself.l2 = ConvLayer(3, outchn//ratio, activation=PARAM_RELU, batch_norm=True, stride=stride)\n\t\tself.l3 = ConvLayer(1, outchn)\n\t\tself.shortcut_conv = ConvLayer(1, outchn, activation=PARAM_RELU, stride=stride)\n\t\tself.shortcut_pool = L.maxpoolLayer(stride)\n\n\tdef forward(self, x):\n\t\tinshape = x.get_shape().as_list()[-1]\n\t\tif inshape==self.outchn:\n\t\t\tshort = self.shortcut_pool(x)\n\t\telse:\n\t\t\tshort = self.shortcut_conv(x)\n\n\t\tbranch = self.bn(x)\n\t\tbranch = self.activ(branch)\n\t\tbranch = self.l1(branch)\n\t\tbranch = self.l2(branch)\n\t\tbranch = self.l3(branch)\n\n\t\treturn branch + short\n\nclass Sequential(Model):\n\tdef initialize(self, modules):\n\t\tself.modules = modules\n\n\tdef forward(self, x):\n\t\tfor m in self.modules:\n\t\t\tx = m(x)\n\t\treturn x\n\n########### saver ##########\nclass Saver():\n\tdef __init__(self, model, optim=None):\n\t\tself.mod = model\n\n\t\tself.obj = tf.contrib.checkpoint.Checkpointable()\n\t\tself.obj.m = self.mod\n\t\tself.optim = optim \n\t\tif optim is None:\n\t\t\tself.ckpt = tf.train.Checkpoint(model=self.obj, optimizer_step=tf.train.get_or_create_global_step())\n\t\telse:\n\t\t\tself.ckpt = tf.train.Checkpoint(optimizer=optim, model=self.obj, optimizer_step=tf.train.get_or_create_global_step())\n\t\n\tdef save(self, path):\n\t\tprint('Saving model to path:',path)\n\t\thead, tail = os.path.split(path)\n\t\tif not os.path.exists(head):\n\t\t\tos.makedirs(head)\n\t\tself.ckpt.save(path)\n\t\tprint('Model saved to path:',path)\n\n\tdef restore(self, path, ptype='folder'):\n\t\tprint('Load from:', path)\n\t\ttry:\n\t\t\tif ptype=='folder':\n\t\t\t\tlast_ckpt = tf.train.latest_checkpoint(path)\n\t\t\t\tprint('Checkpoint:', last_ckpt)\n\t\t\t\tif last_ckpt is None:\n\t\t\t\t\tprint('No model found in checkpoint.')\n\t\t\t\t\tprint('Model will auto-initialize after first iteration.')\n\t\t\t\tself.ckpt.restore(last_ckpt)\n\t\t\telse:\n\t\t\t\tself.ckpt.restore(path)\n\t\t\tprint('Finish loading.')\n\t\texcept Exception as e:\n\t\t\tprint('Model restore failed, Exception:',e)\n\t\t\tprint('Model will auto-initialize after first iteration.')\n\n######### Gradient accumulator #########\nclass GradAccumulator():\n\tdef __init__(self):\n\t\tself.steps = 0\n\t\tself.grads = []\n\n\tdef accumulate(self, grads):\n\t\tif len(grads) == 0:\n\t\t\tself.grads = grads\n\t\telse:\n\t\t\tfor old_g, new_g in zip(self.grads, grads):\n\t\t\t\told_g.assign_add(new_g)\n\t\tself.steps += 1\n\n\tdef get_gradient(self):\n\t\tres = [i/self.steps for i in self.grads]\n\t\tself.grads = []\n\t\tself.steps = 0\n\t\treturn res\n\n\tdef get_step(self):\n\t\treturn self.steps\n\n######### Data Reader Template (serial) ##########\nclass DataReaderSerial():\n\tdef __init__(self, one_hot=None):\n\t\tself.data_pos = 0\n\t\tself.val_pos = 0\n\t\tself.data = []\n\t\tself.val = []\n\t\tself.one_hot = False\n\t\tif one_hot is not None:\n\t\t\tself.one_hot = True\n\t\t\tself.eye = np.eye(one_hot)\n\t\tself.load_data()\n\t\t\n\tdef get_next_batch(self,BSIZE):\n\t\tif self.data_pos + BSIZE > len(self.data):\n\t\t\trandom.shuffle(self.data)\n\t\t\tself.data_pos = 0\n\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.data_pos += BSIZE\n\t\treturn x,y\n\n\tdef get_val_next_batch(self, BSIZE):\n\t\tif self.val_pos + BSIZE >= len(self.val):\n\t\t\tbatch = self.val[self.val_pos:]\n\t\t\trandom.shuffle(self.val)\n\t\t\tself.val_pos = 0\n\t\t\tis_end = True\n\t\telse:\n\t\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\t\tis_end = False\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.val_pos += BSIZE\n\t\treturn x,y, is_end\n\n\tdef get_train_iter(self, BSIZE):\n\t\treturn len(self.data)//BSIZE\n\n\tdef get_val_iter(self, BSIZE):\n\t\treturn len(self.val)//BSIZE + 1\n\nclass ListReader():\n\tdef __init__(self, one_hot=None):\n\t\tself.data_pos = 0\n\t\tself.val_pos = 0\n\t\tself.data = []\n\t\tself.val = []\n\t\tself.one_hot = False\n\t\tif one_hot is not None:\n\t\t\tself.one_hot = True\n\t\t\tself.eye = np.eye(one_hot)\n\t\tself.load_data()\n\t\t\n\tdef get_next_batch(self,BSIZE):\n\t\tif self.data_pos + BSIZE > len(self.data):\n\t\t\trandom.shuffle(self.data)\n\t\t\tself.data_pos = 0\n\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.data_pos += BSIZE\n\n\t\tx = [self.process_img(i) for i in x]\n\t\treturn x,y\n\n\tdef get_val_next_batch(self, BSIZE):\n\t\tif self.val_pos + BSIZE >= len(self.val):\n\t\t\tbatch = self.val[self.val_pos:]\n\t\t\trandom.shuffle(self.val)\n\t\t\tself.val_pos = 0\n\t\t\tis_end = True\n\t\telse:\n\t\t\tbatch = self.data[self.data_pos : self.data_pos+BSIZE]\n\t\t\tis_end = False\n\t\tx = [i[0] for i in batch]\n\t\ty = [i[1] for i in batch]\n\t\tif self.one_hot:\n\t\t\ty = self.eye[np.array(y)]\n\t\tself.val_pos += BSIZE\n\t\tx = [self.process_img(i) for i in x]\n\t\treturn x,y, is_end\n\n\tdef get_train_iter(self, BSIZE):\n\t\treturn len(self.data)//BSIZE\n\n\tdef get_val_iter(self, BSIZE):\n\t\treturn len(self.val)//BSIZE + 1\n\n######### Data Reader Template (parallel) ##########\n# multi-process to read data\nclass DataReader():\n\tdef __init__(self, data, fn, batch_size, shuffle=False, random_sample=False, processes=2, post_fn=None):\n\t\tfrom multiprocessing import Pool\n\t\tself.pool = Pool(processes)\n\t\tprint('Starting parallel data loader...')\n\t\tself.process_fn = fn\n\t\tself.data = data\n\t\tself.batch_size = batch_size\n\t\tself.position = batch_size\n\t\tself.post_fn = post_fn\n\t\tself.random_sample = random_sample\n\t\tself.shuffle = shuffle\n\t\tif shuffle:\n\t\t\trandom.shuffle(self.data)\n\t\tself._start_p(self.data[:batch_size])\n\n\tdef _start_p(self, data):\n\t\tself.ps = []\n\t\tfor i in data:\n\t\t\tself.ps.append(self.pool.apply_async(self.process_fn, [i]))\n\n\tdef get_next_batch(self):\n\t\t# print('call')\n\t\t# fetch data\n\t\tres = [i.get() for i in self.ps]\n\n\t\t# start new pre-fetch\n\t\tif self.random_sample:\n\t\t\tbatch = random.sample(self.data, self.batch_size)\n\t\telse:\n\t\t\tif self.position + self.batch_size > len(self.data):\n\t\t\t\tself.position = 0\n\t\t\t\tif self.shuffle:\n\t\t\t\t\trandom.shuffle(self.data)\t\n\t\t\tbatch = self.data[self.position:self.position+self.batch_size]\n\t\t\tself.position += self.batch_size\n\t\t\n\t\tself._start_p(batch)\n\n\t\t# post_process the data\n\t\tif self.post_fn is not None:\n\t\t\tres = self.post_fn(res)\n\t\treturn res \n\n\n######### short-cut functions #########\n\ngradient_reverse = L.gradient_reverse\n\ndef pad(x, pad):\n\tif isinstance(pad, list):\n\t\tx = tf.pad(x, [[0,0],[pad[0],pad[1]], [pad[2],pad[3]], [0,0]])\n\telse:\n\t\tx = tf.pad(x, [[0,0],[pad,pad],[pad,pad],[0,0]])\n\treturn x \n\ndef pad3D(x, pad):\n\tif isinstance(pad, list):\n\t\tx = tf.pad(x, [[0,0],[pad[0],pad[1]], [pad[2],pad[3]], [pad[4], pad[5]], [0,0]])\n\telse:\n\t\tx = tf.pad(x, [[0,0],[pad,pad],[pad,pad],[pad,pad],[0,0]])\n\treturn x \n\ndef image_transform(x, H, out_shape=None, interpolation='NEAREST'):\n\t# Will produce error if not specify 'output_shape' in eager mode\n\tshape = x.get_shape().as_list()\n\tif out_shape is None:\n\t\tif len(shape)==4:\n\t\t\tout_shape = shape[1:3]\n\t\telse:\n\t\t\tout_shape = shape[:2]\n\treturn tf.contrib.image.transform(x, H, interpolation=interpolation, output_shape=out_shape)\n \ndef zip_grad(grads, vars):\n\tassert len(grads)==len(vars)\n\tgrads_1 = []\n\tvars_1 = []\n\tfor i in range(len(grads)):\n\t\tif not grads[i] is None:\n\t\t\tgrads_1.append(grads[i])\n\t\t\tvars_1.append(vars[i])\n\tassert len(grads_1)!=0\n\treturn zip(grads_1, vars_1)\n\n"
] | [
[
"tensorflow.pad",
"tensorflow.enable_eager_execution",
"numpy.eye",
"tensorflow.contrib.image.transform",
"tensorflow.contrib.checkpoint.Checkpointable",
"tensorflow.variable_scope",
"tensorflow.cast",
"tensorflow.train.latest_checkpoint",
"tensorflow.convert_to_tensor",
"tensorflow.argmax",
"tensorflow.train.get_or_create_global_step",
"numpy.array",
"tensorflow.ConfigProto"
]
] |
cuis15/xorder | [
"6dde5a18552ffa07f29100038464a38c49495527"
] | [
"data/utils.py"
] | [
"import numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom numba import jit\n\n\ndef array2str(tmp_array, sep = \" \"):\n str_list = [\"{:.3f}\".format(tmp_item) for tmp_item in tmp_array]\n return sep.join(str_list)\n\n\ndef generate_sorted_groups(pred, y, a):\n a_idx = np.where(a == 0)\n b_idx = np.where(a == 1)\n b_score = pred[b_idx].reshape(-1)\n b_index = np.argsort(-b_score)\n b_score_sort = b_score[b_index]\n b_label = y[b_idx]\n b_label_sort = b_label[b_index]\n\n a_score = pred[a_idx].reshape(-1)\n a_index = np.argsort(-a_score)\n a_score_sort = a_score[a_index]\n a_label = y[a_idx]\n a_label_sort = a_label[a_index]\n\n return a_score_sort,b_score_sort,a_label_sort,b_label_sort\n\n\ndef cal_fairness_metric_by_groups(a_score, b_score, a_label, b_label, metric = \"xauc\"):\n if metric == \"xauc\":\n metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)\n else:\n metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)\n return abs(metric_ab - metric_ba),metric_ab,metric_ba\n\n\ndef cal_fairness_metric(pred, y, a, metric = \"xauc\"):\n a_idx, b_idx = np.where(a == 0), np.where(a == 1)\n a_score, b_score = pred[a_idx].reshape(-1), pred[b_idx].reshape(-1)\n a_label, b_label = y[a_idx].reshape(-1), y[b_idx].reshape(-1)\n if metric == \"xauc\":\n metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)\n else:\n metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)\n return abs(metric_ab - metric_ba),metric_ab,metric_ba\n\n\ndef AUC(score, label):\n ###[from big to small]\n sum_ = 0\n num = len(label)\n for i in range(num):\n for j in range(num):\n if label[i]==1 and label[j]==0:\n if score[i]>score[j]: \n sum_ += 1\n\n return sum_/(np.sum(label)*(num-np.sum(label))), sum_\n\n\ndef xAUC(a_score, b_score, a_label, b_label):\n sum_ab = 0\n sum_ba = 0\n numa = len(a_label)\n numb = len(b_label)\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n for i in range(numa):\n for j in range(numb):\n if a_label[i] ==1 and b_label[j] ==0:\n if a_score[i]>b_score[j]:\n sum_ab+=1\n elif a_label[i]==0 and b_label[j]==1:\n if b_score[j]>a_score[i]:\n sum_ba+=1\n return sum_ab/(a_num1*b_num0), sum_ba/(b_num1*a_num0), sum_ab+sum_ba \n\n\ndef xAUC_fast(a_score, b_score, a_label, b_label):\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n\n a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]\n b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]\n\n ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0))))\n ab_score = np.concatenate((a_score1,b_score0))\n xauc_ab = roc_auc_score(ab_label,ab_score)\n\n ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0))))\n ba_score = np.concatenate((b_score1,a_score0))\n xauc_ba = roc_auc_score(ba_label,ba_score)\n\n return xauc_ab, xauc_ba, xauc_ab * a_num1 * b_num0 + xauc_ba * b_num1 * a_num0\n\n\ndef post_score(train_score, train_score_post, test_score):\n tep_id = 0\n bins = [[] for i in range(len(train_score)+1)]\n for i in range(len(test_score)):\n s = test_score[i]\n if s>train_score[0]:\n bins[0].append(s)\n elif s<=train_score[-1]:\n bins[-1].append(s)\n else:\n for j in range(tep_id,len(train_score)):\n if train_score[j-1]>=s and train_score[j]<s:\n bins[j].append(s)\n tep_id = j\n break\n changed_b_score = []\n for bin_ in range(len(bins)):\n for item in range(len(bins[bin_])):\n num = (len(bins[bin_]))\n if bin_==0:\n changed_b_score.append((item)*train_score_post[bin_]/num+(num-item)/num)\n elif bin_==len(train_score_post):\n changed_b_score.append((num -item)*train_score_post[bin_-1]/num)\n else:\n changed_b_score.append((item)*train_score_post[bin_]/num + (num-item)*train_score_post[bin_-1]/num)\n \n return np.array(changed_b_score)\n\n\n@jit(nopython=True)\ndef maxAUC(a_label, b_label):\n\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n path = np.zeros((M+1, N+1,2,2))\n\n cost = np.zeros((M+1, N+1))\n for i in range(1,M+1):\n if a_label[i]==1:\n cost[i,0] = N-b_1 + cost[i-1, 0]\n else:\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost[0, i] = cost[0,i-1]+ M - a_1\n else:\n cost[0, i] = cost[0,i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n\n for i in range(2, M+1+N+1):\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if i-j+1>N or a_label[j]==0:\n tep_b = 0 \n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n\n if j+1>M or b_label[i-j]==0:\n tep_a = 0\n else:\n tep_a = M - j -np.sum(a_label[j+1:])\n\n if cost[j-1, i-j] + tep_b > cost[j, i-j-1] + tep_a:\n cost[j, i-j] = cost[j-1, i-j] + tep_b\n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost[j, i-j] = cost[j, i-j-1] + tep_a\n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n return cost[M,N], path\n\n\n@jit(nopython=True)\ndef xAUC_post(a_label, b_label, lamb):\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n\n a_1_b_0 = a_1*(N-b_1)\n b_1_a_0 = b_1*(M - a_1)\n\n path = np.zeros((M+1, N+1,2,2))\n cost_unfair = np.zeros((M+1, N+1))\n cost = np.zeros((M+1, N+1))\n for i in range(1,M+1):\n if a_label[i]==1:\n cost_unfair[i, 0] = (N-b_1)/a_1_b_0*lamb + cost_unfair[i-1,0]\n cost[i,0] = N-b_1 + cost[i-1, 0] \n else:\n cost_unfair[i, 0] = cost_unfair[i-1,0]\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost_unfair[0,i] = -(M-a_1)/b_1_a_0*lamb + cost_unfair[0, i-1]\n cost[0, i] = cost[0,i-1] + M - a_1\n else:\n cost[0, i] = cost[0,i-1]\n cost_unfair[0, i] = cost_unfair[0,i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n for i in range(2, M+1+N+1):\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if i-j+1>N or a_label[j]==0:\n tep_b = 0 \n tep_unfair_b = 0\n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n tep_unfair_b = tep_b/a_1_b_0*lamb \n\n if j+1>M or b_label[i-j]==0:\n tep_a = 0\n tep_unfair_a = 0\n else:\n tep_a = M - j -np.sum(a_label[j+1:])\n tep_unfair_a = -tep_a/b_1_a_0*lamb\n\n if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):\n cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]\n cost[j, i-j] = cost[j-1, i-j] + tep_b \n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]\n cost[j, i-j] = cost[j, i-j-1] + tep_a \n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n\n return cost, path, cost_unfair\n\n@jit(nopython=True)\ndef xAUC_post_(a_label, b_label, lamb):\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n\n a_1_b_0 = a_1*(N-b_1)\n b_1_a_0 = b_1*(M - a_1)\n\n path = np.zeros((M+1, N+1,2,2))\n cost_unfair = np.zeros((M+1, N+1))\n cost = np.zeros((M+1, N+1))\n for i in range(1,M+1):\n if a_label[i]==1:\n cost_unfair[i, 0] = (N-b_1)/a_1_b_0 * lamb + cost_unfair[i-1,0]\n cost[i,0] = N-b_1 + cost[i-1, 0] \n else:\n cost_unfair[i, 0] = cost_unfair[i-1,0]\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost_unfair[0,i] = -(M - a_1) / b_1_a_0 * lamb + cost_unfair[0, i-1]\n cost[0, i] = cost[0,i-1] + M - a_1\n else:\n cost[0, i] = cost[0,i-1]\n cost_unfair[0, i] = cost_unfair[0,i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n for i in range(2, M+1+N+1):\n # print(i)\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if a_label[j]==0:\n tep_b = 0 \n tep_unfair_b = 0\n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n tep_unfair_b = tep_b/a_1_b_0*lamb \n\n if b_label[i-j]==0:\n tep_a = 0\n tep_unfair_a = 0\n else:\n tep_a = M - j -np.sum(a_label[j+1:])\n tep_unfair_a = -tep_a/b_1_a_0*lamb\n\n if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):\n cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]\n cost[j, i-j] = cost[j-1, i-j] + tep_b \n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]\n cost[j, i-j] = cost[j, i-j-1] + tep_a \n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n\n return cost, path, cost_unfair\n\n\n@jit(nopython=True)\ndef pairwise_post(a_label, b_label, lamb):\n###a, b has been sorted decreasing sort.\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a_label)\n b_1 = np.sum(b_label)\n\n a_1_0 = a_1*((N-b_1)+(M - a_1))\n b_1_0 = b_1*((M - a_1)+(N-b_1))\n\n path = np.zeros((M+1, N+1,2,2))\n cost_unfair = np.zeros((M+1, N+1))\n cost = np.zeros((M+1, N+1))\n\n zeros_mat = np.zeros((M+1, N+1))\n zeros_mat[0,0] = ((N-b_1)+(M - a_1))\n\n for i in range(1,N+1):\n if b_label[i]==1:\n zeros_mat[0,i] = zeros_mat[0,i-1]\n else:\n zeros_mat[0,i] = zeros_mat[0,i-1]-1 \n\n for i in range(1,M+1):\n if a_label[i]==0:\n zeros_mat[i,0] = zeros_mat[i-1,0]-1\n else:\n zeros_mat[i,0] = zeros_mat[i-1,0]\n for j in range(1,N+1):\n if b_label[j]==0:\n zeros_mat[i,j] = zeros_mat[i,j-1]-1\n else:\n zeros_mat[i,j] = zeros_mat[i,j-1]\n for i in range(1,M+1):\n if a_label[i]==1:\n cost_unfair[i, 0] = zeros_mat[i,0]/a_1_0*lamb + cost_unfair[i-1,0]\n cost[i,0] = N-b_1 + cost[i-1, 0] \n else:\n cost_unfair[i, 0] = cost_unfair[i-1,0]\n cost[i,0] = cost[i-1,0]\n path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])\n\n for i in range(1,N+1):\n if b_label[i]==1:\n cost_unfair[0,i] = -zeros_mat[0,i]/b_1_0*lamb + cost_unfair[0, i-1]\n cost[0, i] = cost[0,i-1] + M - a_1\n else:\n\n cost[0, i] = cost[0,i-1]\n cost_unfair[0, i] = cost_unfair[0, i-1]\n path[0,i,:,:] = np.array([[0, i-1],[0, i]])\n\n for i in range(2, M+1+N+1):\n for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]\n\n if a_label[j]==0:\n tep_b = 0 \n tep_unfair_b = 0\n else:\n tep_b = N - (i-j) - np.sum(b_label[i-j+1:])\n tep_unfair_b = zeros_mat[j,i-j]/a_1_0*lamb \n\n\n if b_label[i-j]==0:\n tep_a = 0\n tep_unfair_a = 0\n else: \n tep_a = M - j -np.sum(a_label[j+1:])\n tep_unfair_a = -zeros_mat[j,i-j]/b_1_0*lamb\n\n if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):\n\n cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]\n cost[j, i-j] = cost[j-1, i-j] + tep_b \n path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])\n\n else:\n cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]\n cost[j, i-j] = cost[j, i-j-1] + tep_a \n path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])\n return cost, path, cost_unfair\n\n\ndef post_b_score(a_score, b_score, a_label, b_label, lamb = 0, _type=\"xauc\"): ## score has to be decreasing.\n M = len(a_score)\n N = len(b_score)\n if _type == \"xauc\":\n cost, path_ , cost_unfair = xAUC_post(a_label, b_label, lamb = lamb)\n elif _type==\"AUC\":\n cost, path_ = maxAUC(a_label, b_label)\n elif _type==\"prf\":\n cost, path_ , cost_unfair = pairwise_post(a_label, b_label, lamb = lamb)\n else:\n print(\"Unknown type\")\n exit()\n\n @jit(nopython=True)\n def pathTrace(path):\n\n trace = []\n tep = path[M,N,:,:]\n trace.append(tep[-1,:])\n trace.append(tep[0,:])\n for i in range(M+N-1):\n\n tep = path[int(tep[0][0]), int(tep[0][1]), :,:]\n trace.append(tep[0,:])\n trace.reverse()\n return trace\n\n path = pathTrace(path_)\n gap_a = [[] for i in range(M+1)]\n\n for i in range(1,len(path)):\n if int(path[i][0])==int(path[i-1][0]):\n gap_a[int(path[i][0])].append(int(path[i][1]))\n\n changed_b_score = []\n for bin_ in range(len(gap_a)):\n for item in range(len(gap_a[bin_])):\n num = (len(gap_a[bin_])+1)\n if bin_==0:\n changed_b_score.append((item+1)*a_score[bin_]/num+(num-item-1)/num)\n elif bin_==len(a_score):\n changed_b_score.append((num -item-1)*a_score[bin_-1]/num)\n else:\n changed_b_score.append((item+1)*a_score[bin_]/num + (num-item-1)*a_score[bin_-1]/num)\n if _type==\"AUC\":\n return np.array(changed_b_score), 0\n else:\n return np.array(changed_b_score), cost_unfair[-1, -1]\n\n\ndef pairwise(a_score, b_score, a_label, b_label):\n sum_ab = 0\n sum_ba = 0\n numa = len(a_label)\n numb = len(b_label)\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n\n i_AUCa = roc_auc_score(a_label, a_score)\n i_AUCb = roc_auc_score(b_label, b_score)\n\n for i in range(numa):\n for j in range(numb):\n if a_label[i] ==1 and b_label[j] ==0:\n if a_score[i]>b_score[j]:\n sum_ab+=1\n elif a_label[i]==0 and b_label[j]==1:\n if b_score[j]>a_score[i]:\n sum_ba+=1\n return (sum_ab+i_AUCa*a_num0*a_num1)/(a_num1*(b_num0+a_num0)), (sum_ba+i_AUCb*b_num0*b_num1)/(b_num1*(a_num0+b_num0))\n\n\ndef pairwise_fast(a_score, b_score, a_label, b_label):\n a_num1 = np.sum(a_label)\n a_num0 = len(a_label) - a_num1\n b_num1 = np.sum(b_label)\n b_num0 = len(b_label) - b_num1\n\n a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]\n b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]\n\n ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0+a_num0))))\n ab_score = np.concatenate((a_score1,a_score0,b_score0))\n pair_ab = roc_auc_score(ab_label,ab_score) #[a=1, 0]\n\n ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0+b_num0))))\n ba_score = np.concatenate((b_score1,b_score0, a_score0))\n pair_ba = roc_auc_score(ba_label,ba_score) #[b=1, 0]\n\n return pair_ab, pair_ba \n\n\ndef zeros_mat(a, b):\n a_label = [0] + a\n b_label = [0] + b\n M = len(a_label)-1\n N = len(b_label)-1\n a_1 = np.sum(a)\n b_1 = np.sum(b)\n zeros_mat = np.zeros((M+1, N+1))\n zeros_mat[0,0] = ((N-b_1)+(M - a_1))\n\n for i in range(1,N+1):\n if b_label[i]==1:\n zeros_mat[0,i] = zeros_mat[0,i-1]\n else:\n zeros_mat[0,i] = zeros_mat[0,i-1]-1 \n\n for i in range(1,M+1):\n if a_label[i]==0:\n zeros_mat[i,0] = zeros_mat[i-1,0]-1\n else:\n zeros_mat[i,0] = zeros_mat[i-1,0]\n for j in range(1,N+1):\n if b_label[j]==0:\n zeros_mat[i,j] = zeros_mat[i,j-1]-1\n else:\n zeros_mat[i,j] = zeros_mat[i,j-1]\n return zeros_mat\n\n\n\n\n\n\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"numpy.where",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"numpy.concatenate"
]
] |
mathuvu/nevergrad | [
"8e116190a8a29c238e655d728fc4816f7b4e0415"
] | [
"nevergrad/optimization/recastlib.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport functools\nimport math\nimport warnings\nimport weakref\nimport numpy as np\nfrom scipy import optimize as scipyoptimize\nimport nevergrad.common.typing as tp\nfrom nevergrad.parametrization import parameter as p\nfrom nevergrad.common import errors\nfrom . import base\nfrom .base import IntOrParameter\nfrom . import recaster\n\n\nclass _NonObjectMinimizeBase(recaster.SequentialRecastOptimizer):\n def __init__(\n self,\n parametrization: IntOrParameter,\n budget: tp.Optional[int] = None,\n num_workers: int = 1,\n *,\n method: str = \"Nelder-Mead\",\n random_restart: bool = False,\n ) -> None:\n super().__init__(parametrization, budget=budget, num_workers=num_workers)\n self.multirun = 1 # work in progress\n self._normalizer: tp.Any = None\n self.initial_guess: tp.Optional[tp.ArrayLike] = None\n # configuration\n assert (\n method\n in [\n \"CmaFmin2\",\n \"Nelder-Mead\",\n \"COBYLA\",\n \"SLSQP\",\n \"Powell\",\n ]\n or \"NLOPT\" in method\n ), f\"Unknown method '{method}'\"\n self.method = method\n self.random_restart = random_restart\n # The following line rescales to [0, 1] if fully bounded.\n\n if method == \"CmaFmin2\" or \"NLOPT\" in method:\n normalizer = p.helpers.Normalizer(self.parametrization)\n if normalizer.fully_bounded:\n self._normalizer = normalizer\n\n def _internal_tell_not_asked(self, candidate: p.Parameter, loss: tp.Loss) -> None:\n \"\"\"Called whenever calling \"tell\" on a candidate that was not \"asked\".\n Defaults to the standard tell pipeline.\n \"\"\" # We do not do anything; this just updates the current best.\n\n def get_optimization_function(self) -> tp.Callable[[tp.Callable[[tp.ArrayLike], float]], tp.ArrayLike]:\n return functools.partial(self._optimization_function, weakref.proxy(self))\n\n @staticmethod\n def _optimization_function(\n weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]\n ) -> tp.ArrayLike:\n # pylint:disable=unused-argument\n budget = np.inf if weakself.budget is None else weakself.budget\n best_res = np.inf\n best_x: np.ndarray = weakself.current_bests[\"average\"].x # np.zeros(self.dimension)\n if weakself.initial_guess is not None:\n best_x = np.array(weakself.initial_guess, copy=True) # copy, just to make sure it is not modified\n remaining: float = budget - weakself._num_ask\n while remaining > 0: # try to restart if budget is not elapsed\n options: tp.Dict[str, tp.Any] = {} if weakself.budget is None else {\"maxiter\": remaining}\n # options: tp.Dict[str, tp.Any] = {} if self.budget is None else {\"maxiter\": remaining}\n if weakself.method[:5] == \"NLOPT\":\n # This is NLOPT, used as in the PCSE simulator notebook.\n # ( https://github.com/ajwdewit/pcse_notebooks ).\n import nlopt\n\n def nlopt_objective_function(*args):\n data = np.asarray([arg for arg in args])[0]\n assert len(data) == weakself.dimension, (\n str(data) + \" does not have length \" + str(weakself.dimension)\n )\n if weakself._normalizer is not None:\n data = weakself._normalizer.backward(np.asarray(data, dtype=np.float32))\n return objective_function(data)\n\n # Sbplx (based on Subplex) is used by default.\n nlopt_param = (\n getattr(nlopt, weakself.method[6:]) if len(weakself.method) > 5 else nlopt.LN_SBPLX\n )\n opt = nlopt.opt(nlopt_param, weakself.dimension)\n # Assign the objective function calculator\n opt.set_min_objective(nlopt_objective_function)\n # Set the bounds.\n opt.set_lower_bounds(np.zeros(weakself.dimension))\n opt.set_upper_bounds(np.ones(weakself.dimension))\n # opt.set_initial_step([0.05, 0.05])\n opt.set_maxeval(budget)\n\n # Start the optimization with the first guess\n firstguess = 0.5 * np.ones(weakself.dimension)\n best_x = opt.optimize(firstguess)\n # print(\"\\noptimum at TDWI: %s, SPAN: %s\" % (x[0], x[1]))\n # print(\"minimum value = \", opt.last_optimum_value())\n # print(\"result code = \", opt.last_optimize_result())\n # print(\"With %i function calls\" % objfunc_calculator.n_calls)\n if weakself._normalizer is not None:\n best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32))\n\n elif weakself.method == \"CmaFmin2\":\n import cma # import inline in order to avoid matplotlib initialization warning\n\n def cma_objective_function(data):\n # Hopefully the line below does nothing if unbounded and rescales from [0, 1] if bounded.\n if weakself._normalizer is not None:\n data = weakself._normalizer.backward(np.asarray(data, dtype=np.float32))\n return objective_function(data)\n\n # cma.fmin2(objective_function, [0.0] * self.dimension, [1.0] * self.dimension, remaining)\n x0 = 0.5 * np.ones(weakself.dimension)\n num_calls = 0\n while budget - num_calls > 0:\n options = {\"maxfevals\": budget - num_calls, \"verbose\": -9}\n if weakself._normalizer is not None:\n # Tell CMA to work in [0, 1].\n options[\"bounds\"] = [0.0, 1.0]\n res = cma.fmin(\n cma_objective_function,\n x0=x0,\n sigma0=0.2,\n options=options,\n restarts=9,\n )\n x0 = 0.5 + np.random.uniform() * np.random.uniform(\n low=-0.5, high=0.5, size=weakself.dimension\n )\n if res[1] < best_res:\n best_res = res[1]\n best_x = res[0]\n if weakself._normalizer is not None:\n best_x = weakself._normalizer.backward(np.asarray(best_x, dtype=np.float32))\n num_calls += res[2]\n else:\n res = scipyoptimize.minimize(\n objective_function,\n best_x\n if not weakself.random_restart\n else weakself._rng.normal(0.0, 1.0, weakself.dimension),\n method=weakself.method,\n options=options,\n tol=0,\n )\n if res.fun < best_res:\n best_res = res.fun\n best_x = res.x\n remaining = budget - weakself._num_ask\n return best_x\n\n\nclass NonObjectOptimizer(base.ConfiguredOptimizer):\n \"\"\"Wrapper over Scipy optimizer implementations, in standard ask and tell format.\n This is actually an import from scipy-optimize, including Sequential Quadratic Programming,\n\n Parameters\n ----------\n method: str\n Name of the method to use among:\n\n - Nelder-Mead\n - COBYLA\n - SQP (or SLSQP): very powerful e.g. in continuous noisy optimization. It is based on\n approximating the objective function by quadratic models.\n - Powell\n - NLOPT* (https://nlopt.readthedocs.io/en/latest/; by default, uses Sbplx, based on Subplex);\n can be NLOPT,\n NLOPT_LN_SBPLX,\n NLOPT_LN_PRAXIS,\n NLOPT_GN_DIRECT,\n NLOPT_GN_DIRECT_L,\n NLOPT_GN_CRS2_LM,\n NLOPT_GN_AGS,\n NLOPT_GN_ISRES,\n NLOPT_GN_ESCH,\n NLOPT_LN_COBYLA,\n NLOPT_LN_BOBYQA,\n NLOPT_LN_NEWUOA_BOUND,\n NLOPT_LN_NELDERMEAD.\n random_restart: bool\n whether to restart at a random point if the optimizer converged but the budget is not entirely\n spent yet (otherwise, restarts from best point)\n\n Note\n ----\n These optimizers do not support asking several candidates in a row\n \"\"\"\n\n recast = True\n no_parallelization = True\n\n # pylint: disable=unused-argument\n def __init__(self, *, method: str = \"Nelder-Mead\", random_restart: bool = False) -> None:\n super().__init__(_NonObjectMinimizeBase, locals())\n\n\nNelderMead = NonObjectOptimizer(method=\"Nelder-Mead\").set_name(\"NelderMead\", register=True)\nCmaFmin2 = NonObjectOptimizer(method=\"CmaFmin2\").set_name(\"CmaFmin2\", register=True)\nNLOPT = NonObjectOptimizer(method=\"NLOPT\").set_name(\"NLOPT\", register=True)\nPowell = NonObjectOptimizer(method=\"Powell\").set_name(\"Powell\", register=True)\nRPowell = NonObjectOptimizer(method=\"Powell\", random_restart=True).set_name(\"RPowell\", register=True)\nCobyla = NonObjectOptimizer(method=\"COBYLA\").set_name(\"Cobyla\", register=True)\nRCobyla = NonObjectOptimizer(method=\"COBYLA\", random_restart=True).set_name(\"RCobyla\", register=True)\nSQP = NonObjectOptimizer(method=\"SLSQP\").set_name(\"SQP\", register=True)\nSLSQP = SQP # Just so that people who are familiar with SLSQP naming are not lost.\nRSQP = NonObjectOptimizer(method=\"SLSQP\", random_restart=True).set_name(\"RSQP\", register=True)\nRSLSQP = RSQP # Just so that people who are familiar with SLSQP naming are not lost.\n\n\nclass _PymooMinimizeBase(recaster.SequentialRecastOptimizer):\n def __init__(\n self,\n parametrization: IntOrParameter,\n budget: tp.Optional[int] = None,\n num_workers: int = 1,\n *,\n algorithm: str,\n ) -> None:\n super().__init__(parametrization, budget=budget, num_workers=num_workers)\n # configuration\n self.algorithm = algorithm\n self._no_hypervolume = True\n self._initial_seed = -1\n\n def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], tp.Optional[tp.ArrayLike]]:\n if self._initial_seed == -1:\n self._initial_seed = self._rng.randint(2**30)\n return functools.partial(self._optimization_function, weakref.proxy(self))\n # pylint:disable=useless-return\n\n @staticmethod\n def _optimization_function(\n weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]\n ) -> tp.Optional[tp.ArrayLike]:\n # pylint:disable=unused-argument, import-outside-toplevel\n from pymoo import optimize as pymoooptimize\n\n from pymoo.factory import get_algorithm as get_pymoo_algorithm\n\n # from pymoo.factory import get_reference_directions\n\n # reference direction code for when we want to use the other MOO optimizers in Pymoo\n # if self.algorithm in [\n # \"rnsga2\",\n # \"nsga3\",\n # \"unsga3\",\n # \"rnsga3\",\n # \"moead\",\n # \"ctaea\",\n # ]: # algorithms that require reference points or reference directions\n # the appropriate n_partitions must be looked into\n # ref_dirs = get_reference_directions(\"das-dennis\", self.num_objectives, n_partitions=12)\n # algorithm = get_pymoo_algorithm(self.algorithm, ref_dirs)\n # else:\n algorithm = get_pymoo_algorithm(weakself.algorithm)\n problem = _create_pymoo_problem(weakself, objective_function)\n pymoooptimize.minimize(problem, algorithm, seed=weakself._initial_seed)\n return None\n\n def _internal_ask_candidate(self) -> p.Parameter:\n \"\"\"\n Special version to make sure that num_objectives has been set before\n the proper _internal_ask_candidate, in our parent class, is called.\n \"\"\"\n if self.num_objectives == 0:\n # dummy ask i.e. not activating pymoo until num_objectives is set\n warnings.warn(\n \"with this optimizer, it is more efficient to set num_objectives before the optimization begins\",\n errors.NevergradRuntimeWarning,\n )\n # We need to get a datapoint that is a random point in parameter space,\n # and waste an evaluation on it.\n return self.parametrization.spawn_child()\n return super()._internal_ask_candidate()\n\n def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:\n \"\"\"\n Special version to make sure that we the extra initial evaluation which\n we may have done in order to get num_objectives, is discarded.\n Note that this discarding means that the extra point will not make it into\n replay_archive_tell. Correspondingly, because num_objectives will make it into\n the pickle, __setstate__ will never need a dummy ask.\n \"\"\"\n if self._messaging_thread is None:\n return # dummy tell i.e. not activating pymoo until num_objectives is set\n super()._internal_tell_candidate(candidate, loss)\n\n def _post_loss(self, candidate: p.Parameter, loss: float) -> tp.Loss:\n # pylint: disable=unused-argument\n \"\"\"\n Multi-Objective override for this function.\n \"\"\"\n return candidate.losses\n\n\nclass Pymoo(base.ConfiguredOptimizer):\n \"\"\"Wrapper over Pymoo optimizer implementations, in standard ask and tell format.\n This is actually an import from Pymoo Optimize.\n\n Parameters\n ----------\n algorithm: str\n\n Use \"algorithm-name\" with following names to access algorithm classes:\n Single-Objective\n -\"de\"\n -'ga'\n -\"brkga\"\n -\"nelder-mead\"\n -\"pattern-search\"\n -\"cmaes\"\n Multi-Objective\n -\"nsga2\"\n Multi-Objective requiring reference directions, points or lines\n -\"rnsga2\"\n -\"nsga3\"\n -\"unsga3\"\n -\"rnsga3\"\n -\"moead\"\n -\"ctaea\"\n\n Note\n ----\n These optimizers do not support asking several candidates in a row\n \"\"\"\n\n recast = True\n no_parallelization = True\n\n # pylint: disable=unused-argument\n def __init__(self, *, algorithm: str) -> None:\n super().__init__(_PymooMinimizeBase, locals())\n\n\nclass _PymooBatchMinimizeBase(recaster.BatchRecastOptimizer):\n\n # pylint: disable=abstract-method\n\n def __init__(\n self,\n parametrization: IntOrParameter,\n budget: tp.Optional[int] = None,\n num_workers: int = 1,\n *,\n algorithm: str,\n ) -> None:\n super().__init__(parametrization, budget=budget, num_workers=num_workers)\n # configuration\n self.algorithm = algorithm\n self._no_hypervolume = True\n self._initial_seed = -1\n\n def get_optimization_function(self) -> tp.Callable[[tp.Callable[..., tp.Any]], tp.Optional[tp.ArrayLike]]:\n if self._initial_seed == -1:\n self._initial_seed = self._rng.randint(2**30)\n return functools.partial(self._optimization_function, weakref.proxy(self))\n # pylint:disable=useless-return\n\n @staticmethod\n def _optimization_function(\n weakself: tp.Any, objective_function: tp.Callable[[tp.ArrayLike], float]\n ) -> tp.Optional[tp.ArrayLike]:\n # pylint:disable=unused-argument, import-outside-toplevel\n from pymoo import optimize as pymoooptimize\n\n from pymoo.factory import get_algorithm as get_pymoo_algorithm\n\n # from pymoo.factory import get_reference_directions\n\n # reference direction code for when we want to use the other MOO optimizers in Pymoo\n # if self.algorithm in [\n # \"rnsga2\",\n # \"nsga3\",\n # \"unsga3\",\n # \"rnsga3\",\n # \"moead\",\n # \"ctaea\",\n # ]: # algorithms that require reference points or reference directions\n # the appropriate n_partitions must be looked into\n # ref_dirs = get_reference_directions(\"das-dennis\", self.num_objectives, n_partitions=12)\n # algorithm = get_pymoo_algorithm(self.algorithm, ref_dirs)\n # else:\n algorithm = get_pymoo_algorithm(weakself.algorithm)\n problem = _create_pymoo_problem(weakself, objective_function, False)\n pymoooptimize.minimize(problem, algorithm, seed=weakself._initial_seed)\n return None\n\n def _internal_ask_candidate(self) -> p.Parameter:\n \"\"\"Reads messages from the thread in which the underlying optimization function is running\n New messages are sent as \"ask\".\n \"\"\"\n # get a datapoint that is a random point in parameter space\n if self.num_objectives == 0: # dummy ask i.e. not activating pymoo until num_objectives is set\n warnings.warn(\n \"with this optimizer, it is more efficient to set num_objectives before the optimization begins\",\n errors.NevergradRuntimeWarning,\n )\n return self.parametrization.spawn_child()\n return super()._internal_ask_candidate()\n\n def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:\n \"\"\"Returns value for a point which was \"asked\"\n (none asked point cannot be \"tell\")\n \"\"\"\n if self._messaging_thread is None:\n return # dummy tell i.e. not activating pymoo until num_objectives is set\n super()._internal_tell_candidate(candidate, loss)\n\n def _post_loss(self, candidate: p.Parameter, loss: float) -> tp.Loss:\n # pylint: disable=unused-argument\n \"\"\"\n Multi-Objective override for this function.\n \"\"\"\n return candidate.losses\n\n\nclass PymooBatch(base.ConfiguredOptimizer):\n \"\"\"Wrapper over Pymoo optimizer implementations, in standard ask and tell format.\n This is actually an import from Pymoo Optimize.\n\n Parameters\n ----------\n algorithm: str\n\n Use \"algorithm-name\" with following names to access algorithm classes:\n Single-Objective\n -\"de\"\n -'ga'\n -\"brkga\"\n -\"nelder-mead\"\n -\"pattern-search\"\n -\"cmaes\"\n Multi-Objective\n -\"nsga2\"\n Multi-Objective requiring reference directions, points or lines\n -\"rnsga2\"\n -\"nsga3\"\n -\"unsga3\"\n -\"rnsga3\"\n -\"moead\"\n -\"ctaea\"\n\n Note\n ----\n These optimizers do not support asking several candidates in a row\n \"\"\"\n\n recast = True\n\n # pylint: disable=unused-argument\n def __init__(self, *, algorithm: str) -> None:\n super().__init__(_PymooBatchMinimizeBase, locals())\n\n\ndef _create_pymoo_problem(\n optimizer: base.Optimizer,\n objective_function: tp.Callable[[tp.ArrayLike], float],\n elementwise: bool = True,\n):\n kwargs = {}\n try:\n # pylint:disable=import-outside-toplevel\n from pymoo.core.problem import ElementwiseProblem, Problem # type: ignore\n\n Base = ElementwiseProblem if elementwise else Problem\n except ImportError:\n # Used if pymoo < 0.5.0\n # pylint:disable=import-outside-toplevel\n from pymoo.model.problem import Problem as Base # type: ignore\n\n kwargs = {\"elementwise_evaluation\": elementwise}\n\n class _PymooProblem(Base): # type: ignore\n def __init__(self, optimizer, objective_function):\n self.objective_function = objective_function\n super().__init__(\n n_var=optimizer.dimension,\n n_obj=optimizer.num_objectives,\n n_constr=0, # constraints handled already by nevergrad\n xl=-math.pi * 0.5,\n xu=math.pi * 0.5,\n **kwargs,\n )\n\n def _evaluate(self, X, out, *args, **kwargs):\n # pylint:disable=unused-argument\n # pymoo is supplying us with bounded parameters in [-pi/2,pi/2]. Nevergrad wants unbounded reals from us.\n out[\"F\"] = self.objective_function(np.tan(X))\n\n return _PymooProblem(optimizer, objective_function)\n\n\nPymooNSGA2 = Pymoo(algorithm=\"nsga2\").set_name(\"PymooNSGA2\", register=True)\nPymooBatchNSGA2 = PymooBatch(algorithm=\"nsga2\").set_name(\"PymooBatchNSGA2\", register=False)\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"numpy.zeros",
"numpy.asarray",
"numpy.tan",
"numpy.array"
]
] |
pplonski/automlbenchmark | [
"f49ddfa2583643173296ed8ab45a8c14c62a6987"
] | [
"reports/report/visualizations/linplot.py"
] | [
"import matplotlib as mp\nimport pandas as pd\nimport seaborn as sb\n\nimport report.config as config\nfrom ..util import create_file, sort_dataframe\nfrom .util import savefig, set_scales, set_labels, task_labels\n\n\ndef draw_parallel_coord(df, class_column,\n x_labels=True, yscale='linear',\n title=None, xlabel=None, ylabel=None,\n legend_loc='best', legend_title=None, colormap=None):\n colormap = config.colormap if colormap is None else colormap\n with sb.axes_style('ticks', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):\n # print(sb.axes_style())\n parallel_fig = mp.pyplot.figure(dpi=120, figsize=(10, df.shape[0]))\n # select the first colors from the colormap to ensure we use the same colors as in the stripplot later\n colors = mp.cm.get_cmap(colormap).colors[:len(df[class_column].unique())]\n axes = pd.plotting.parallel_coordinates(df,\n class_column=class_column,\n color=colors,\n axvlines=False,\n )\n set_scales(axes, yscale=yscale)\n handles, labels = axes.get_legend_handles_labels()\n axes.legend(handles, labels, loc=legend_loc, title=legend_title)\n set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel, x_labels=x_labels,\n x_tick_params=dict(labelrotation=90))\n return parallel_fig\n\n\ndef draw_score_parallel_coord(col, results, type_filter='all', metadata=None,\n x_sort_by='name', ylabel=None, filename=None,\n **kwargs):\n res_group = results.groupby(['type', 'task', 'framework'])\n df = res_group[col].mean().unstack(['type', 'task'])\n df = df if type_filter == 'all' \\\n else df.iloc[:, df.columns.get_loc(type_filter)]\n if metadata:\n sort_by = lambda cols: getattr(metadata[cols[1]], x_sort_by)\n df = sort_dataframe(df, by=sort_by, axis=1)\n df.reset_index(inplace=True)\n fig = draw_parallel_coord(df,\n 'framework',\n x_labels=task_labels(df.columns.drop('framework')),\n # xlabel=\"Task\",\n ylabel=ylabel or \"Score\",\n legend_title=\"Framework\",\n **kwargs)\n if filename:\n savefig(fig, create_file(\"graphics\", config.results_group, filename))\n return fig\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.cm.get_cmap",
"pandas.plotting.parallel_coordinates"
]
] |
semeniuta/pdata | [
"5eb6ece8e2fb1856bc87ed76290240cd901f7654"
] | [
"pdata/dirstructure.py"
] | [
"import os\nfrom glob import glob\nimport pandas as pd\n\n\ndef get_list_of_full_child_dirs(d):\n \"\"\"\n For a directory d (full path), \n return a list of its subdirectories \n in a full path form.\n \"\"\"\n\n children = (os.path.join(d, child) for child in os.listdir(d))\n dirs = filter(os.path.isdir, children)\n\n return list(dirs)\n\n\ndef split_full_path(full_path, base_dir):\n \"\"\"\n Given a full path, return:\n \n - relative_dir: the part of the path that does not \n include the base directory and the basename\n - basename\n \"\"\"\n\n fname = os.path.basename(full_path)\n\n relative_path = full_path.split(base_dir)[-1]\n relative_dir = relative_path.split(fname)[0]\n relative_dir = relative_dir[1:-1] # clip slashes\n\n return relative_dir, fname\n\n\ndef gather_files(base_dir, file_mask):\n \"\"\"\n Walk the directory base_dir using os.walk\n and gather files that match file_mask (e.g. '*.jpg'). \n Return the result as a Pandas dataframe with columns \n 'relative_dir' and 'basename'.\n \"\"\"\n\n res_tuples = []\n\n for dir_name, subdirs, files in os.walk(base_dir):\n\n dir_has_files = len(files) > 0\n\n if dir_has_files:\n\n full_mask = os.path.join(dir_name, file_mask)\n mask_matches = glob(full_mask)\n\n res_tuples += [split_full_path(f, base_dir) for f in mask_matches]\n\n return pd.DataFrame(res_tuples, columns=['relative_dir', 'basename'])\n"
] | [
[
"pandas.DataFrame"
]
] |
wjmaddox/pytorch_ess | [
"8e189666ce7381cf760666464384c634abbc4be2"
] | [
"pytorch_ess/mean_elliptical_slice.py"
] | [
"import torch\n\nfrom .elliptical_slice import EllipticalSliceSampler\n\n\nclass MeanEllipticalSliceSampler(EllipticalSliceSampler):\n def __init__(self, f_init, dist, lnpdf, nsamples, pdf_params=()):\n \"\"\"\n Implementation of elliptical slice sampling (Murray, Adams, & Mckay, 2010).\n f_init: initial value of `f`\n dist: multivariate normal to sample from to sample from\n lnpdf: likelihood function\n n_samples: number of samples\n pdf_params: callable arguments for lnpdf\n \"\"\"\n mean_vector = dist.mean\n\n demeaned_lnpdf = lambda g: lnpdf(g + mean_vector, *pdf_params)\n\n demeaned_init = f_init - mean_vector\n\n samples = dist.sample(sample_shape = torch.Size((nsamples,))).transpose(-1, -2)\n demeaned_samples = samples - mean_vector.unsqueeze(1)\n\n super(MeanEllipticalSliceSampler, self).__init__(demeaned_init, demeaned_samples, demeaned_lnpdf, nsamples, pdf_params=())\n\n self.mean_vector = mean_vector\n\n def run(self):\n self.f_sampled, self.ell = super().run()\n\n #add means back into f_sampled\n self.f_sampled = self.f_sampled + self.mean_vector.unsqueeze(1)\n\n return self.f_sampled, self.ell"
] | [
[
"torch.Size"
]
] |
tbcole/majoranaJJ | [
"dcf31f7786fa0a4874a940b7d8dcdd55f3921a46"
] | [
"demos/sparse_op/wfuncs/H0/donut.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse.linalg as spLA\n\nimport majoranaJJ.operators.sparse.qmsops as spop #sparse operators\nimport majoranaJJ.lattice.nbrs as nb #neighbor arrays\nimport majoranaJJ.lattice.shapes as shps #lattice shapes\nimport majoranaJJ.modules.plots as plots #plotting functions\n\nR = 50\nr = 15\nax = 10 #[A]\nay = 10 #[A]\n\ncoor = shps.donut(R, r)\nNN = nb.NN_Arr(coor)\nprint(\"lattice size\", coor.shape[0])\n\nalpha = 0 #Spin-Orbit Coupling constant: [eV*A]\ngammaz = 0 #Zeeman field energy contribution: [T]\ndelta = 0 #Superconducting Gap: [eV]\nV0 = 0.0 #Amplitude of potential : [eV]\nmu = 0 #Chemical Potential: [eV]\n\nH = spop.H0(coor, ax, ay, NN)\nprint(\"H shape: \", H.shape)\n\nnum = 75 # This is the number of eigenvalues and eigenvectors you want\nsigma = 0 # This is the eigenvalue we search around\nwhich = 'LM'\neigs, vecs = spLA.eigsh(H, k = num, sigma = sigma, which = which)\n\nplots.state_cmap(coor, eigs, vecs, n = 0, title = 'SPARSE Free Particle Ground State')\nn = 39\nplots.state_cmap(coor, eigs, vecs, n = n, title = 'SPARSE: Excited State # {}'.format(n))\n"
] | [
[
"scipy.sparse.linalg.eigsh"
]
] |
Grusinator/BirdClassification | [
"c78ca3dbf70c2509c79ca4641102a2d725084d2a"
] | [
"lib/utils/SegDataGenerator.py"
] | [
"from keras.preprocessing.image import *\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras import backend as K\nfrom PIL import Image\nimport numpy as np\nimport os\n#import cv2\n\n\ndef center_crop(x, center_crop_size, data_format, **kwargs):\n if data_format == 'channels_first':\n centerh, centerw = x.shape[1] // 2, x.shape[2] // 2\n elif data_format == 'channels_last':\n centerh, centerw = x.shape[0] // 2, x.shape[1] // 2\n lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2\n rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw\n\n h_start, h_end = centerh - lh, centerh + rh\n w_start, w_end = centerw - lw, centerw + rw\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :]\n\n\ndef pair_center_crop(x, y, center_crop_size, data_format, **kwargs):\n if data_format == 'channels_first':\n centerh, centerw = x.shape[1] // 2, x.shape[2] // 2\n elif data_format == 'channels_last':\n centerh, centerw = x.shape[0] // 2, x.shape[1] // 2\n lh, lw = center_crop_size[0] // 2, center_crop_size[1] // 2\n rh, rw = center_crop_size[0] - lh, center_crop_size[1] - lw\n\n h_start, h_end = centerh - lh, centerh + rh\n w_start, w_end = centerw - lw, centerw + rw\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end], \\\n y[:, h_start:h_end, w_start:w_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :], \\\n y[h_start:h_end, w_start:w_end, :]\n\n\ndef random_crop(x, random_crop_size, data_format, sync_seed=None, **kwargs):\n np.random.seed(sync_seed)\n if data_format == 'channels_first':\n h, w = x.shape[1], x.shape[2]\n elif data_format == 'channels_last':\n h, w = x.shape[0], x.shape[1]\n rangeh = (h - random_crop_size[0]) // 2\n rangew = (w - random_crop_size[1]) // 2\n offseth = 0 if rangeh == 0 else np.random.randint(rangeh)\n offsetw = 0 if rangew == 0 else np.random.randint(rangew)\n\n h_start, h_end = offseth, offseth + random_crop_size[0]\n w_start, w_end = offsetw, offsetw + random_crop_size[1]\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :]\n\n\ndef pair_random_crop(x, y, random_crop_size, data_format, sync_seed=None, **kwargs):\n np.random.seed(sync_seed)\n if data_format == 'channels_first':\n h, w = x.shape[1], x.shape[2]\n elif data_format == 'channels_last':\n h, w = x.shape[0], x.shape[1]\n rangeh = (h - random_crop_size[0]) // 2\n rangew = (w - random_crop_size[1]) // 2\n offseth = 0 if rangeh == 0 else np.random.randint(rangeh)\n offsetw = 0 if rangew == 0 else np.random.randint(rangew)\n\n h_start, h_end = offseth, offseth + random_crop_size[0]\n w_start, w_end = offsetw, offsetw + random_crop_size[1]\n if data_format == 'channels_first':\n return x[:, h_start:h_end, w_start:w_end], y[:, h_start:h_end, h_start:h_end]\n elif data_format == 'channels_last':\n return x[h_start:h_end, w_start:w_end, :], y[h_start:h_end, w_start:w_end, :]\n\n\nclass SegDirectoryIterator(Iterator):\n '''\n Users need to ensure that all files exist.\n Label images should be png images where pixel values represents class number.\n\n find images -name *.jpg > images.txt\n find labels -name *.png > labels.txt\n\n for a file name 2011_002920.jpg, each row should contain 2011_002920\n\n file_path: location of train.txt, or val.txt in PASCAL VOC2012 format,\n listing image file path components without extension\n data_dir: location of image files referred to by file in file_path\n label_dir: location of label files\n data_suffix: image file extension, such as `.jpg` or `.png`\n label_suffix: label file suffix, such as `.png`, or `.npy`\n loss_shape: shape to use when applying loss function to the label data\n '''\n\n def __init__(self, file_path, seg_data_generator,\n data_dir, data_suffix,\n label_dir, label_suffix, classes, ignore_label=255,\n crop_mode='none', label_cval=255, pad_size=None,\n target_size=None, color_mode='rgb',\n data_format='default', class_mode='sparse',\n batch_size=1, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='jpeg',\n loss_shape=None):\n if data_format == 'default':\n data_format = K.image_data_format()\n self.file_path = file_path\n self.data_dir = data_dir\n self.data_suffix = data_suffix\n self.label_suffix = label_suffix\n self.label_dir = label_dir\n self.classes = classes\n self.seg_data_generator = seg_data_generator\n self.target_size = tuple(target_size)\n self.ignore_label = ignore_label\n self.crop_mode = crop_mode\n self.label_cval = label_cval\n self.pad_size = pad_size\n if color_mode not in {'rgb', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\" or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n self.nb_label_ch = 1\n self.loss_shape = loss_shape\n\n if (self.label_suffix == '.npy') or (self.label_suffix == 'npy'):\n self.label_file_format = 'npy'\n else:\n self.label_file_format = 'img'\n if target_size:\n if self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n if self.data_format == 'channels_last':\n self.label_shape = self.target_size + (self.nb_label_ch,)\n else:\n self.label_shape = (self.nb_label_ch,) + self.target_size\n elif batch_size != 1:\n raise ValueError(\n 'Batch size must be 1 when target image size is undetermined')\n else:\n self.image_shape = None\n self.label_shape = None\n if class_mode not in {'sparse', None}:\n raise ValueError('Invalid class_mode:', class_mode,\n '; expected one of '\n '\"sparse\", or None.')\n self.class_mode = class_mode\n if save_to_dir:\n self.palette = None\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n\n white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'npy'}\n\n # build lists for data files and label files\n self.data_files = []\n self.label_files = []\n fp = open(file_path)\n lines = fp.readlines()\n fp.close()\n self.nb_sample = len(lines)\n for line in lines:\n line = line.strip('\\n')\n self.data_files.append(line + data_suffix)\n self.label_files.append(line + label_suffix)\n super(SegDirectoryIterator, self).__init__(\n self.nb_sample, batch_size, shuffle, seed)\n\n def next(self):\n with self.lock:\n index_array, current_index, current_batch_size = next(\n self.index_generator)\n\n # The transformation of images is not under thread lock so it can be\n # done in parallel\n if self.target_size:\n # TODO(ahundt) make dtype properly configurable\n batch_x = np.zeros((current_batch_size,) + self.image_shape)\n if self.loss_shape is None and self.label_file_format is 'img':\n batch_y = np.zeros((current_batch_size,) + self.label_shape,\n dtype=int)\n elif self.loss_shape is None:\n batch_y = np.zeros((current_batch_size,) + self.label_shape)\n else:\n batch_y = np.zeros((current_batch_size,) + self.loss_shape,\n dtype=np.uint8)\n grayscale = self.color_mode == 'grayscale'\n # build batch of image data and labels\n for i, j in enumerate(index_array):\n data_file = self.data_files[j]\n label_file = self.label_files[j]\n img_file_format = 'img'\n img = load_img(os.path.join(self.data_dir, data_file),\n grayscale=grayscale, target_size=None)\n label_filepath = os.path.join(self.label_dir, label_file)\n\n if self.label_file_format == 'npy':\n y = np.load(label_filepath)\n else:\n label = Image.open(label_filepath)\n if self.save_to_dir and self.palette is None:\n self.palette = label.palette\n\n # do padding\n if self.target_size:\n if self.crop_mode != 'none':\n x = img_to_array(img, data_format=self.data_format)\n if self.label_file_format is not 'npy':\n y = img_to_array(\n label, data_format=self.data_format).astype(int)\n img_w, img_h = img.size\n if self.pad_size:\n pad_w = max(self.pad_size[1] - img_w, 0)\n pad_h = max(self.pad_size[0] - img_h, 0)\n else:\n pad_w = max(self.target_size[1] - img_w, 0)\n pad_h = max(self.target_size[0] - img_h, 0)\n if self.data_format == 'channels_first':\n x = np.lib.pad(x, ((0, 0), (pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2)), 'constant', constant_values=0.)\n y = np.lib.pad(y, ((0, 0), (pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2)),\n 'constant', constant_values=self.label_cval)\n elif self.data_format == 'channels_last':\n x = np.lib.pad(x, ((pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2), (0, 0)), 'constant', constant_values=0.)\n y = np.lib.pad(y, ((pad_h / 2, pad_h - pad_h / 2), (pad_w / 2, pad_w - pad_w / 2), (0, 0)), 'constant', constant_values=self.label_cval)\n else:\n x = img_to_array(img.resize((self.target_size[1], self.target_size[0]),\n Image.BILINEAR),\n data_format=self.data_format)\n if self.label_file_format is not 'npy':\n y = img_to_array(label.resize((self.target_size[1], self.target_size[\n 0]), Image.NEAREST), data_format=self.data_format).astype(int)\n else:\n print('ERROR: resize not implemented for label npy file')\n\n if self.target_size is None:\n batch_x = np.zeros((current_batch_size,) + x.shape)\n if self.loss_shape is not None:\n batch_y = np.zeros((current_batch_size,) + self.loss_shape)\n else:\n batch_y = np.zeros((current_batch_size,) + y.shape)\n\n x, y = self.seg_data_generator.random_transform(x, y)\n x = self.seg_data_generator.standardize(x)\n\n if self.ignore_label:\n y[np.where(y == self.ignore_label)] = self.classes\n\n if self.loss_shape is not None:\n y = np.reshape(y, self.loss_shape)\n\n batch_x[i] = x\n batch_y[i] = y\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i in range(current_batch_size):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n label = batch_y[i][:, :, 0].astype('uint8')\n label[np.where(label == self.classes)] = self.ignore_label\n label = Image.fromarray(label, mode='P')\n label.palette = self.palette\n fname = '{prefix}_{index}_{hash}'.format(prefix=self.save_prefix,\n index=current_index + i,\n hash=np.random.randint(1e4))\n img.save(os.path.join(self.save_to_dir, 'img_' +\n fname + '.{format}'.format(format=self.save_format)))\n label.save(os.path.join(self.save_to_dir,\n 'label_' + fname + '.png'))\n # return\n batch_x = preprocess_input(batch_x)\n if self.class_mode == 'sparse':\n return batch_x, batch_y\n else:\n return batch_x\n\n\nclass SegDataGenerator(object):\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n channelwise_center=False,\n rotation_range=0.,\n width_shift_range=0.,\n height_shift_range=0.,\n shear_range=0.,\n zoom_range=0.,\n zoom_maintain_shape=True,\n channel_shift_range=0.,\n fill_mode='constant',\n cval=0.,\n label_cval=255,\n crop_mode='none',\n crop_size=(0, 0),\n pad_size=None,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n data_format='default'):\n if data_format == 'default':\n data_format = K.image_data_format()\n self.__dict__.update(locals())\n self.mean = None\n self.ch_mean = None\n self.std = None\n self.principal_components = None\n self.rescale = rescale\n\n if data_format not in {'channels_last', 'channels_first'}:\n raise Exception('data_format should be channels_last (channel after row and '\n 'column) or channels_first (channel before row and column). '\n 'Received arg: ', data_format)\n if crop_mode not in {'none', 'random', 'center'}:\n raise Exception('crop_mode should be \"none\" or \"random\" or \"center\" '\n 'Received arg: ', crop_mode)\n self.data_format = data_format\n if data_format == 'channels_first':\n self.channel_index = 1\n self.row_index = 2\n self.col_index = 3\n if data_format == 'channels_last':\n self.channel_index = 3\n self.row_index = 1\n self.col_index = 2\n\n if np.isscalar(zoom_range):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif len(zoom_range) == 2:\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n else:\n raise Exception('zoom_range should be a float or '\n 'a tuple or list of two floats. '\n 'Received arg: ', zoom_range)\n\n def flow_from_directory(self, file_path, data_dir, data_suffix,\n label_dir, label_suffix, classes,\n ignore_label=255,\n target_size=None, color_mode='rgb',\n class_mode='sparse',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='jpeg',\n loss_shape=None):\n if self.crop_mode == 'random' or self.crop_mode == 'center':\n target_size = self.crop_size\n return SegDirectoryIterator(\n file_path, self,\n data_dir=data_dir, data_suffix=data_suffix,\n label_dir=label_dir, label_suffix=label_suffix,\n classes=classes, ignore_label=ignore_label,\n crop_mode=self.crop_mode, label_cval=self.label_cval,\n pad_size=self.pad_size,\n target_size=target_size, color_mode=color_mode,\n data_format=self.data_format, class_mode=class_mode,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n save_to_dir=save_to_dir, save_prefix=save_prefix,\n save_format=save_format,\n loss_shape=loss_shape)\n\n def standardize(self, x):\n if self.rescale:\n x *= self.rescale\n # x is a single image, so it doesn't have image number at index 0\n img_channel_index = self.channel_index - 1\n if self.samplewise_center:\n x -= np.mean(x, axis=img_channel_index, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)\n\n if self.featurewise_center:\n x -= self.mean\n if self.featurewise_std_normalization:\n x /= (self.std + 1e-7)\n\n if self.channelwise_center:\n x -= self.ch_mean\n return x\n\n def random_transform(self, x, y):\n # x is a single image, so it doesn't have image number at index 0\n img_row_index = self.row_index - 1\n img_col_index = self.col_index - 1\n img_channel_index = self.channel_index - 1\n if self.crop_mode == 'none':\n crop_size = (x.shape[img_row_index], x.shape[img_col_index])\n else:\n crop_size = self.crop_size\n\n assert x.shape[img_row_index] == y.shape[img_row_index] and x.shape[img_col_index] == y.shape[\n img_col_index], 'DATA ERROR: Different shape of data and label!\\ndata shape: %s, label shape: %s' % (str(x.shape), str(y.shape))\n\n # use composition of homographies to generate final transform that\n # needs to be applied\n if self.rotation_range:\n theta = np.pi / 180 * \\\n np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n if self.height_shift_range:\n # * x.shape[img_row_index]\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range) * crop_size[0]\n else:\n tx = 0\n\n if self.width_shift_range:\n # * x.shape[img_col_index]\n ty = np.random.uniform(-self.width_shift_range,\n self.width_shift_range) * crop_size[1]\n else:\n ty = 0\n\n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(\n self.zoom_range[0], self.zoom_range[1], 2)\n if self.zoom_maintain_shape:\n zy = zx\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n\n transform_matrix = np.dot(\n np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)\n\n h, w = x.shape[img_row_index], x.shape[img_col_index]\n transform_matrix = transform_matrix_offset_center(\n transform_matrix, h, w)\n\n x = apply_transform(x, transform_matrix, img_channel_index,\n fill_mode=self.fill_mode, cval=self.cval)\n y = apply_transform(y, transform_matrix, img_channel_index,\n fill_mode='constant', cval=self.label_cval)\n\n if self.channel_shift_range != 0:\n x = random_channel_shift(\n x, self.channel_shift_range, img_channel_index)\n\n if self.horizontal_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_col_index)\n y = flip_axis(y, img_col_index)\n\n if self.vertical_flip:\n if np.random.random() < 0.5:\n x = flip_axis(x, img_row_index)\n y = flip_axis(y, img_row_index)\n\n if self.crop_mode == 'center':\n x, y = pair_center_crop(x, y, self.crop_size, self.data_format)\n elif self.crop_mode == 'random':\n x, y = pair_random_crop(x, y, self.crop_size, self.data_format)\n\n # TODO:\n # channel-wise normalization\n # barrel/fisheye\n return x, y\n\n def fit(self, X,\n augment=False,\n rounds=1,\n seed=None):\n '''Required for featurewise_center and featurewise_std_normalization\n\n # Arguments\n X: Numpy array, the data to fit on.\n augment: whether to fit on randomly augmented samples\n rounds: if `augment`,\n how many augmentation passes to do over the data\n seed: random seed.\n '''\n X = np.copy(X)\n if augment:\n aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))\n for r in range(rounds):\n for i in range(X.shape[0]):\n aX[i + r * X.shape[0]] = self.random_transform(X[i])\n X = aX\n\n if self.featurewise_center:\n self.mean = np.mean(X, axis=0)\n X -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(X, axis=0)\n X /= (self.std + 1e-7)\n\n def set_ch_mean(self, ch_mean):\n self.ch_mean = ch_mean\n"
] | [
[
"numpy.random.uniform",
"numpy.load",
"numpy.sin",
"numpy.zeros",
"numpy.reshape",
"numpy.random.seed",
"numpy.isscalar",
"numpy.copy",
"numpy.cos",
"numpy.random.random",
"numpy.lib.pad",
"numpy.where",
"numpy.array",
"numpy.std",
"numpy.dot",
"numpy.random.randint",
"numpy.mean"
]
] |
AyishaR/deepC | [
"1dc9707ef5ca9000fc13c3da7f1129685a83b494"
] | [
"test/swig/Less.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n#\n# This file is part of DNN compiler maintained at\n# https://github.com/ai-techsystems/dnnCompiler\n\nimport common\n\nimport deepC.dnnc as dc\nimport numpy as np\nimport unittest\n\nclass LessTest(unittest.TestCase):\n def setUp(self):\n self.len = 24\n self.np_a = np.random.randn(self.len).astype(np.float32)\n self.np_b = np.random.randn(self.len).astype(np.float32)\n self.dc_a = dc.array(list(self.np_a));\n self.dc_b = dc.array(list(self.np_b));\n\n def test_Less1D (self):\n npr = np.less(self.np_a, self.np_b)\n dcr = dc.less(self.dc_a, self.dc_b)\n np.testing.assert_allclose(npr, np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n\n def test_Less2D (self):\n np_a = np.reshape(self.np_a, (6,4))\n np_b = np.reshape(self.np_b, (6,4))\n dc_a = dc.reshape(self.dc_a, (6,4));\n dc_b = dc.reshape(self.dc_b, (6,4));\n npr = np.less(np_a, np_b);\n dcr = dc.less(dc_a, dc_b);\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n\n def test_Less3D (self):\n np_a = np.reshape(self.np_a, (2,4,3))\n np_b = np.reshape(self.np_b, (2,4,3))\n dc_a = dc.reshape(self.dc_a, (2,4,3));\n dc_b = dc.reshape(self.dc_b, (2,4,3));\n\n npr = np.less(np_a, np_b);\n dcr = dc.less(dc_a, dc_b);\n\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n\n def test_Equal4D (self):\n np_a = np.reshape(self.np_a, (2,2,2,3))\n np_b = np.reshape(self.np_b, (2,2,2,3))\n dc_a = dc.reshape(self.dc_a, (2,2,2,3))\n dc_b = dc.reshape(self.dc_b, (2,2,2,3))\n\n npr = np.less(np_a, np_b)\n dcr = dc.less(dc_a, dc_b)\n\n np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.bool),\n rtol=1e-3, atol=1e-3)\n \n def tearDown(self):\n return \"test finished\"\n \n\nif __name__ == '__main__':\n unittest.main()\n\n"
] | [
[
"numpy.less",
"numpy.reshape",
"numpy.random.randn"
]
] |
ksboy/superglue | [
"12b5bf6d729ba5b95b8a29682f6bfa584131ae9c"
] | [
"run_classifier.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport random\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nfrom scipy.special import softmax\n# from sklearn.utils.extmath import softmax\n\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom tensorboardX import SummaryWriter\n\nfrom pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.modeling import BertForSequenceClassification\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule\n\nfrom run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics\n\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--loss_weight\",\n default=None,\n type=str,\n help=\"The Loss Weight.\")\n parser.add_argument(\"--pop_classifier_layer\",\n action='store_true',\n help=\"pop classifier layer\")\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\",\n action='store_true',\n help=\"Whether to run predict on the test set.\") \n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--predict_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for predict.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--overwrite_output_dir',\n action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n args = parser.parse_args()\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n args.device = device\n\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval and not args.do_predict:\n raise ValueError(\"At least one of `do_train`, `do_eval` or `do_predict` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n output_mode = output_modes[task_name]\n\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n print(\"pop_classifier_layer\", args.pop_classifier_layer)\n model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels, pop_classifier_layer=args.pop_classifier_layer)\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n \n print(\"loss_weight\", args.loss_weight)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n\n if args.do_train:\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n # Prepare data loader\n train_examples = processor.get_train_examples(args.data_dir)\n cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(\n list(filter(None, args.bert_model.split('/'))).pop(),\n str(args.max_seq_length),\n str(task_name)))\n try:\n with open(cached_train_features_file, \"rb\") as reader:\n train_features = pickle.load(reader)\n except:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n logger.info(\" Saving train features into cached file %s\", cached_train_features_file)\n with open(cached_train_features_file, \"wb\") as writer:\n pickle.dump(train_features, writer)\n\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)\n\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n\n # define a new function to compute loss values for both output_modes\n logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n # print(input_ids)\n # print(logits)\n # print(label_ids)\n\n if output_mode == \"classification\":\n if args.loss_weight == None:\n loss_fct = CrossEntropyLoss()\n else:\n loss_weight= [int(_) for _ in args.loss_weight.split(\",\")]\n loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())\n loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), label_ids.view(-1))\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n if args.local_rank in [-1, 0]:\n tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', loss.item(), global_step)\n\n ### Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n ### Example:\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Save a trained model, configuration and tokenizer\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)\n tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n\n # Good practice: save your training arguments together with the trained model\n output_args_file = os.path.join(args.output_dir, 'training_args.bin')\n torch.save(args, output_args_file)\n else:\n model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)\n\n model.to(device)\n\n ### Evaluation\n if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n eval_examples = processor.get_dev_examples(args.data_dir)\n cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(\n list(filter(None, args.bert_model.split('/'))).pop(),\n str(args.max_seq_length),\n str(task_name)))\n try:\n with open(cached_eval_features_file, \"rb\") as reader:\n eval_features = pickle.load(reader)\n except:\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n logger.info(\" Saving eval features into cached file %s\", cached_eval_features_file)\n with open(cached_eval_features_file, \"wb\") as writer:\n pickle.dump(eval_features, writer)\n\n\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)\n\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n if args.local_rank == -1:\n eval_sampler = SequentialSampler(eval_data)\n else:\n eval_sampler = DistributedSampler(eval_data) # Note that this sampler samples randomly\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n eval_loss = 0\n nb_eval_steps = 0\n preds = []\n out_label_ids = None\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n \n with torch.no_grad():\n logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n \n print(logits )\n print(label_ids)\n print(logits.view(-1, num_labels), label_ids.view(-1))\n # create eval loss and other metric required by the task\n if output_mode == \"classification\":\n if args.loss_weight == None:\n loss_fct = CrossEntropyLoss()\n else:\n loss_weight= [int(_) for _ in args.loss_weight.split(\",\")]\n loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())\n tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n elif output_mode == \"regression\":\n loss_fct = MSELoss()\n tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))\n \n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n out_label_ids = label_ids.detach().cpu().numpy()\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(\n out_label_ids, label_ids.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / nb_eval_steps\n preds = preds[0]\n print(preds)\n\n def swap_value(a):\n temp=a[0];a[0]=a[1];a[1]=temp\n if task_name == \"copa\":\n preds = softmax(preds,axis=1)\n print(preds)\n for i in range(int(len(preds)/2)):\n if preds[2*i][0]>=preds[2*i+1][0]:\n if preds[2*i][0]<preds[2*i][1]:\n # print(preds[2*i][0], preds[2*i][1])\n swap_value(preds[2*i])\n # print(preds[2*i][0], preds[2*i][1])\n if preds[2*i+1][0]>preds[2*i+1][1]:\n swap_value(preds[2*i+1])\n else:\n if preds[2*i][0]>preds[2*i][1]:\n swap_value(preds[2*i])\n if preds[2*i+1][0]<preds[2*i+1][1]:\n swap_value(preds[2*i+1])\n print(preds)\n if output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(preds)\n\n print(preds,out_label_ids)\n result = compute_metrics(task_name, preds, out_label_ids)\n\n loss = tr_loss/global_step if args.do_train else None\n\n result['eval_loss'] = eval_loss\n result['global_step'] = global_step\n result['loss'] = loss\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n \n ### Prediction\n if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n predict_examples = processor.get_test_examples(args.data_dir)\n cached_predict_features_file = os.path.join(args.data_dir, 'predict_{0}_{1}_{2}'.format(\n list(filter(None, args.bert_model.split('/'))).pop(),\n str(args.max_seq_length),\n str(task_name)))\n try:\n with open(cached_predict_features_file, \"rb\") as reader:\n predict_features = pickle.load(reader)\n except:\n predict_features = convert_examples_to_features(\n predict_examples, label_list, args.max_seq_length, tokenizer, output_mode)\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n logger.info(\" Saving predict features into cached file %s\", cached_predict_features_file)\n with open(cached_predict_features_file, \"wb\") as writer:\n pickle.dump(predict_features, writer)\n\n\n logger.info(\"***** Running prediction *****\")\n logger.info(\" Num examples = %d\", len(predict_examples))\n logger.info(\" Batch size = %d\", args.predict_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in predict_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in predict_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in predict_features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.float)\n\n predict_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n # Run prediction for full data\n if args.local_rank == -1:\n predict_sampler = SequentialSampler(predict_data)\n else:\n predict_sampler = DistributedSampler(predict_data) # Note that this sampler samples randomly\n predict_dataloader = DataLoader(predict_data, sampler=predict_sampler, batch_size=args.predict_batch_size)\n\n model.eval()\n # predict_loss = 0\n # nb_predict_steps = 0\n preds = []\n out_label_ids = None\n for input_ids, input_mask, segment_ids, label_ids in tqdm(predict_dataloader, desc=\"predicting\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n \n with torch.no_grad():\n logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n \n print(logits )\n print(label_ids)\n # create eval loss and other metric required by the task\n # if output_mode == \"classification\":\n # loss_fct = CrossEntropyLoss()\n # tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))\n # elif output_mode == \"regression\":\n # loss_fct = MSELoss()\n # tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))\n # \n # eval_loss += tmp_eval_loss.mean().item()\n # nb_predict_steps += 1\n if len(preds) == 0:\n preds.append(logits.detach().cpu().numpy())\n # out_label_ids = label_ids.detach().cpu().numpy()\n else:\n preds[0] = np.append(\n preds[0], logits.detach().cpu().numpy(), axis=0)\n # out_label_ids = np.append(\n # out_label_ids, label_ids.detach().cpu().numpy(), axis=0)\n # \n # eval_loss = eval_loss / nb_eval_steps\n\n preds = preds[0]\n print(preds)\n\n if task_name == \"copa\":\n preds = softmax(preds,axis=1)\n print(preds)\n results=[]\n for i in range(int(len(preds)/2)):\n if preds[2*i][0]>=preds[2*i+1][0]:\n results.append(0)\n else:\n results.append(1)\n preds= results\n label_map = {i : i for i in range(2)}\n else:\n if output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(preds)\n label_map = {i : label for i, label in enumerate(label_list)}\n\n print(preds)\n\n # result = compute_metrics(task_name, preds, out_label_ids)\n\n # loss = tr_loss/global_step if args.do_train else None\n\n # result['eval_loss'] = eval_loss\n # result['global_step'] = global_step\n # result['loss'] = loss\n\n output_predict_file = os.path.join(args.output_dir, \"predict_results.txt\")\n with open(output_predict_file, \"w\") as writer:\n logger.info(\"***** Predict results *****\")\n for i in range(len(preds)):\n label_i = label_map[preds[i]]\n # json_i= \"\\\"idx: %d, \\\"label\\\": \\\"label_i\\\"\"\n writer.write(\"{\\\"idx\\\": %d, \\\"label\\\": \\\"%s\\\"}\\n\"%(i,label_i))\n # for key in sorted(result.keys()):\n # logger.info(\" %s = %s\", key, str(result[key]))\n # writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n \n \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed_all",
"torch.no_grad",
"numpy.random.seed",
"torch.cuda.is_available",
"torch.distributed.init_process_group",
"torch.save",
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.utils.data.RandomSampler",
"torch.device",
"torch.cuda.set_device",
"torch.distributed.get_rank",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.tensor",
"numpy.argmax",
"torch.distributed.barrier",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.TensorDataset",
"torch.FloatTensor",
"torch.nn.MSELoss",
"torch.utils.data.distributed.DistributedSampler",
"numpy.squeeze",
"scipy.special.softmax",
"torch.nn.CrossEntropyLoss"
]
] |
AghaSaad04/mlops-v2 | [
"d312ae108c93bacfb3541968bb913874af060ab2"
] | [
"sales_forecast/scoring/score.py"
] | [
"import numpy\r\nimport os\r\nimport math\r\nfrom azureml.core.model import Model\r\nfrom azureml.core.dataset import Dataset\r\nfrom inference_schema.schema_decorators \\\r\n import input_schema, output_schema\r\nfrom inference_schema.parameter_types.numpy_parameter_type \\\r\n import NumpyParameterType\r\nimport keras\r\nfrom keras.models import load_model\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom azureml.core.run import Run\r\nfrom azureml.core import Dataset, Datastore, Workspace\r\nimport argparse\r\nimport json\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom azureml.core.authentication import ServicePrincipalAuthentication\r\n# from azureml.core.authentication import InteractiveLoginAuthentication\r\n\r\ndef tts(data):\r\n data['date'] = pd.to_datetime(data['date'])\r\n data['date'] = (data['date'] - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n (train, test) = data[0:-2000].values, data[-2000:].values\r\n return (train, test)\r\n\r\ndef scale_data(train_set, test_set):\r\n # apply Min Max Scaler\r\n scaler = MinMaxScaler(feature_range=(-1, 1))\r\n scaler = scaler.fit(train_set[:, :4])\r\n\r\n # reshape training set\r\n train_set = train_set.reshape(train_set.shape[0], train_set.shape[1])\r\n train_set_scaled = scaler.transform(train_set[:, :4])\r\n\r\n # reshape test set\r\n test_set = test_set.reshape(test_set.shape[0], test_set.shape[1])\r\n test_set_scaled = scaler.transform(test_set[:, :4])\r\n\r\n X_train, y_train = train_set[:, :4], train_set[:, 4:].ravel()\r\n X_test, y_test = test_set[:, :4], test_set[:, 4:].ravel()\r\n\r\n return X_train, y_train, X_test, y_test, scaler\r\n\r\ndef init():\r\n # load the model from file into a global object\r\n global model\r\n\r\n model_path = Model.get_model_path(\r\n os.getenv(\"AZUREML_MODEL_DIR\").split('/')[-2])\r\n\r\n print (\"model path\", model_path)\r\n\r\n # try:\r\n # print (\"try\")\r\n # dataset = pd.read_csv('/var/azureml-app/train.csv')\r\n # original_df = dataset.to_pandas_dataframe()\r\n # except:\r\n # print (\"except\")\r\n # train_dataset = original_df.to_csv('train.csv', index=False) \r\n \r\n # interactive_auth = InteractiveLoginAuthentication(tenant_id=\"def44f5f-0783-4b05-8f2f-dd615c5dfec4\")\r\n # ws = Workspace(subscription_id=\"6542067a-127a-43ff-b7f2-007fe21a37f0\",\r\n # resource_group=\"sales-mlops-rg\",\r\n # workspace_name=\"sales-mlops-ws\",\r\n # auth=interactive_auth)\r\n # ws.get_details()\r\n\r\n \r\n \r\n # print(original_df)\r\n\r\n model = keras.models.load_model(model_path)\r\n print(\"Current directory:\", os.getcwd())\r\n print(\"Model is loaded\")\r\n\r\n# date = '6/25/2020'\r\n# store = 3\r\n# item = 105\r\n# price = 990\r\n# date = pd.to_datetime(date)\r\n# date = (date - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n\r\n# input_sample = numpy.array([[date, store, item, price]])\r\n# output_sample = numpy.array([4])\r\n\r\ninput_sample = numpy.array([[1591833600,34,759,690]])\r\noutput_sample = numpy.array([10])\r\n\r\n@input_schema('data', NumpyParameterType(input_sample))\r\n@output_schema(NumpyParameterType(output_sample))\r\n\r\n\r\ndef run(data, request_headers):\r\n global original_df\r\n sp = ServicePrincipalAuthentication(tenant_id=\"def44f5f-0783-4b05-8f2f-dd615c5dfec4\", service_principal_id=\"add8f304-2d88-45e3-94fa-ac6cf335d5df\", service_principal_password=\"If2-.7Wlno57NW6v9~nE~xNIj~naD-DL5f\") \r\n ws = Workspace.get(name=\"sales-mlops-ws\", auth = sp, subscription_id=\"6542067a-127a-43ff-b7f2-007fe21a37f0\")\r\n ws.get_details()\r\n dataset = ws.datasets['salesforecast_ds'] \r\n original_df = dataset.to_pandas_dataframe()\r\n # date = '6/25/2020'\r\n # store = 34\r\n # item = 759\r\n # price = 690\r\n # date = pd.to_datetime(date)\r\n # date = (date - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n date = data[0][0]\r\n prev_sales = []\r\n (train, test) = tts(original_df)\r\n X_train, y_train, X_test, y_test, scaler_object = scale_data(train, test)\r\n first_date = original_df[\"date\"][0]\r\n for x in original_df.index:\r\n last_date = original_df[\"date\"][x]\r\n\r\n print(\"last date\", last_date)\r\n\r\n days_diff = (int(date) - int(last_date)) / (60 * 60 * 24)\r\n total_data_days = (int(last_date) - int(first_date)) / (60 * 60 * 24)\r\n\r\n print(\"days:\", days_diff)\r\n print(\"total_data_days:\", total_data_days)\r\n\r\n for i in original_df.index:\r\n if (original_df[\"item\"][i] == data[0][2] and original_df[\"store\"][i] == data[0][1]):\r\n prev_sales.append(original_df[\"sales\"][i])\r\n \r\n prev_sales_avg = 0\r\n prev_sales_avg = (sum(prev_sales)) / total_data_days\r\n\r\n forecast_result_array = []\r\n test_set = data\r\n test_set_scaled = scaler_object.transform(test_set)\r\n X_test = test_set_scaled[:, :4]\r\n X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])\r\n\r\n y_pred = model.predict(X_test)\r\n print(\"y_pred:\",y_pred)\r\n result = y_pred[0][0][0]\r\n result = round(result)\r\n print(\"result:\",result)\r\n prev_sales_avg = round (prev_sales_avg)\r\n next_day_prediction = math.ceil(result + prev_sales_avg)\r\n prev_sales.append(next_day_prediction)\r\n forecast_result_array.append(next_day_prediction)\r\n\r\n if days_diff > 1:\r\n for day in range(round(days_diff)):\r\n total_data_days += 1\r\n prev_sales_avg = sum(prev_sales) / total_data_days \r\n prev_sales_avg = round(prev_sales_avg)\r\n prev_sales.append(prev_sales_avg)\r\n forecast_result_array.append(prev_sales_avg)\r\n\r\n\r\n\r\n end_result = sum(forecast_result_array)\r\n print(\"end result: \", end_result)\r\n\r\n print(('{{\"RequestId\":\"{0}\", '\r\n '\"TraceParent\":\"{1}\", '\r\n '\"NumberOfPredictions\":{2}}}'\r\n ).format(\r\n request_headers.get(\"X-Ms-Request-Id\", \"\"),\r\n request_headers.get(\"Traceparent\", \"\"),\r\n end_result\r\n ))\r\n\r\n return {\"result\": end_result}\r\n\r\nif __name__ == \"__main__\":\r\n init()\r\n # date ='6/25/2020'\r\n # store = 34\r\n # item = 759\r\n # price = 690\r\n # date = pd.to_datetime(date)\r\n # date = (date - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\r\n test = numpy.array([[date, store, item, price]])\r\n #print(\"test:\",test)\r\n #test =numpy.array([[1591833600,34,759,690]])\r\n prediction = run(test, {}) \r\n print(\"Test result: \", prediction)\r\n"
] | [
[
"sklearn.preprocessing.MinMaxScaler",
"pandas.Timedelta",
"pandas.to_datetime",
"numpy.array",
"pandas.Timestamp"
]
] |
victorchen276/CarND-Advanced-Lane-Lines | [
"436d81150107c181e3f328adfd3f1c31d6a5cb15"
] | [
"source/Project.py"
] | [
"\nfrom source.camera import camera\nfrom source.LaneDetect import LaneDetect\n\nfrom moviepy.editor import VideoFileClip\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n#\n# def process_video(input_video_file):\n# clip1 = VideoFileClip(input_video_file);\n# outputclip = clip1.fl_image(process_vid)\n# outputclip.write_videofile('output_'+input_video_file, audio=False);\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\n\n\nif __name__ == \"__main__\":\n print('main')\n\n # images = glob.glob('../camera_cal/calibration*.jpg')\n # print(images)\n\n camera = camera()\n # camera.calibration(images, x_cor=9, y_cor=6, outputfilename='./camera_calibration_data_1')\n camera.load_calibration_data('./camera_calibration_data.p')\n\n\n # # images = sorted(images, key=lambda x: float(re.findall(\"(\\d+)\", x)[0]))\n #\n # print('Correction images (successfully detected corners):')\n # plt.figure(figsize=(11.5, 9))\n # gridspec.GridSpec(6, 3)\n # # Step through the list and search for chessboard corners\n # for i, image in enumerate(camera_calibrate.calibration_images_success):\n # plt.subplot2grid((6, 3), (i // 3, i % 3), colspan=1, rowspan=1)\n # plt.imshow(image)\n # plt.axis('off')\n # plt.show()\n #\n # plt.figure(figsize=(12, 4))\n # plt.figtext(.5, .8, 'Images in which cv2 failed to find desired corners', fontsize=22, ha='center')\n # for i, p in enumerate(camera_calibrate.calibration_images_fail):\n # plt.subplot(1, 3, i + 1)\n # plt.imshow(mpimg.imread(p)) # draw the first image of each class\n # plt.title(p)\n # plt.axis('off')\n # plt.tight_layout(pad=0, h_pad=0, w_pad=0)\n # plt.show()\n # plt.savefig('fail.png')\n\n # camera_calibrate.load_calibration_data('./camera_calibration_data.p')\n\n # orig_img = mpimg.imread('../test_images/test1.jpg')\n # undist_img = camera_calibrate.undistort(orig_img)\n # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))\n # ax1.imshow(orig_img)\n # ax1.set_title('Original', fontsize=20)\n # ax2.imshow(undist_img)\n # ax2.set_title('Undistorted', fontsize=20)\n # # plt.show()\n # plt.savefig('undistort2.png')\n\n # Perspective transform\n # for image in glob.glob('../test_images/*.jpg'):\n # orig_img = cv2.imread(image)\n # birdeye_img, _ = camera.birds_eye(orig_img)\n # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))\n # f.tight_layout()\n # ax1.imshow(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))\n # ax1.set_title('Original', fontsize=20)\n # ax2.imshow(cv2.cvtColor(birdeye_img, cv2.COLOR_BGR2RGB))\n # ax2.set_title('Undistorted and Warped Image', fontsize=20)\n # plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n # plt.show()\n # # plt.savefig('../output_images/warp_' + str(i) + '.png')\n\n\n\n # # edege\n # image = mpimg.imread('../test_images/test6.jpg')\n # lane_detecter = LaneDetect()\n # result = lane_detecter.get_edges(image)\n #\n # # Plot the result\n # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\n # # f.tight_layout()\n # ax1.axis('off')\n # ax1.imshow(image)\n # ax1.set_title('Original', fontsize=18)\n # ax2.axis('off')\n # ax2.set_title('Edge', fontsize=18)\n #\n #\n # ax2.imshow(result, cmap='gray')\n # plt.show()\n # plt.savefig('edge.png')\n\n # # Detect Lane line\n # for image_name in glob.glob('../test_images/*.jpg'):\n # orig_img = mpimg.imread(image_name)\n #\n # lane_detecter = LaneDetect()\n # lane_detecter.initcamera()\n # lane_detecter.initlines(orig_img)\n # output_img = lane_detecter.process_pipeline(orig_img)\n # f, (ax1) = plt.subplots(1, 1, figsize=(9, 6))\n # ax1.imshow(output_img)\n # ax1.set_title('output_img', fontsize=20)\n # plt.axis('off')\n # plt.show()\n # break\n\n # Applying pipeline to video\n clip1 = VideoFileClip('../project_video.mp4')\n lane_detecter = LaneDetect()\n lane_detecter.initcamera()\n lane_detecter.initlines(clip1.get_frame(0))\n outputclip = clip1.fl_image(lane_detecter.process_pipeline)\n outputclip.write_videofile('../output_videos/output_project_video.mp4', audio=False)\n #\n # clip1 = VideoFileClip('../harder_challenge_video.mp4');\n # lane_detecter = LaneDetect(clip1.get_frame(0))\n # outputclip = clip1.fl_image(lane_detecter.process_pipeline)\n # outputclip.write_videofile('../output_harder_challenge_video.mp4', audio=False)\n #\n # clip1 = VideoFileClip('../challenge_video.mp4')\n # lane_detecter = LaneDetect(clip1.get_frame(0))\n # outputclip = clip1.fl_image(lane_detecter.process_pipeline)\n # outputclip.write_videofile('../output_challenge_video.mp4', audio=False)"
] | [
[
"numpy.dot"
]
] |
Lee-Ft/RHA | [
"8a832a9afebc9204148bbd340c31e26c83138024"
] | [
"model/stage.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport pprint\nfrom collections import defaultdict\nfrom .context_query_attention import StructuredAttention\nfrom .encoder import StackedEncoder\nfrom .cnn import DepthwiseSeparableConv\nfrom .model_utils import save_pickle, mask_logits, flat_list_of_lists, \\\n find_max_triples, get_high_iou_sapns, expand_span\n\n\nclass LinearWrapper(nn.Module):\n \"\"\"1D conv layer\"\"\"\n def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):\n super(LinearWrapper, self).__init__()\n self.relu = relu\n layers = [nn.LayerNorm(in_hsz)] if layer_norm else []\n layers += [\n nn.Dropout(dropout),\n nn.Linear(in_hsz, out_hsz)\n ]\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"(N, L, D)\"\"\"\n if self.relu:\n return F.relu(self.conv(x), inplace=True) # (N, L, D)\n else:\n return self.conv(x) # (N, L, D)\n\n\nclass ConvLinear(nn.Module):\n \"\"\"1D conv layer\"\"\"\n def __init__(self, in_hsz, out_hsz, kernel_size=3, layer_norm=True, dropout=0.1, relu=True):\n super(ConvLinear, self).__init__()\n layers = [nn.LayerNorm(in_hsz)] if layer_norm else []\n layers += [\n nn.Dropout(dropout),\n DepthwiseSeparableConv(in_ch=in_hsz,\n out_ch=out_hsz,\n k=kernel_size,\n dim=1,\n relu=relu)\n ]\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"(N, L, D)\"\"\"\n return self.conv(x) # (N, L, D)\n\n\nclass STAGE(nn.Module):\n def __init__(self, opt):\n super(STAGE, self).__init__()\n self.opt = opt\n self.inference_mode = False\n self.sub_flag = opt.sub_flag\n self.vfeat_flag = opt.vfeat_flag\n self.vfeat_size = opt.vfeat_size\n self.t_iter = opt.t_iter\n self.extra_span_length = opt.extra_span_length\n self.add_local = opt.add_local\n self.use_sup_att = opt.use_sup_att\n self.num_negatives = opt.num_negatives\n self.negative_pool_size = opt.negative_pool_size\n self.num_hard = opt.num_hard\n self.drop_topk = opt.drop_topk\n self.margin = opt.margin\n self.att_loss_type = opt.att_loss_type\n self.scale = opt.scale\n self.alpha = opt.alpha\n self.dropout = opt.dropout\n self.hsz = opt.hsz\n self.bsz = None\n self.num_seg = None\n self.num_a = 5\n self.flag_cnt = self.sub_flag + self.vfeat_flag\n\n self.wd_size = opt.embedding_size\n self.bridge_hsz = 300\n\n self.bert_word_encoding_fc = nn.Sequential(\n nn.LayerNorm(self.wd_size),\n nn.Dropout(self.dropout),\n nn.Linear(self.wd_size, self.bridge_hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.bridge_hsz),\n )\n\n if self.sub_flag:\n print(\"Activate sub branch\")\n\n if self.vfeat_flag:\n print(\"Activate vid branch\")\n self.vid_fc = nn.Sequential(\n nn.LayerNorm(self.vfeat_size),\n nn.Dropout(self.dropout),\n nn.Linear(self.vfeat_size, self.bridge_hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.bridge_hsz)\n )\n\n if self.flag_cnt == 2:\n self.concat_fc = nn.Sequential(\n nn.LayerNorm(3 * self.hsz),\n nn.Dropout(self.dropout),\n nn.Linear(3 * self.hsz, self.hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.hsz),\n )\n\n self.input_embedding = nn.Sequential(\n nn.Dropout(self.dropout),\n nn.Linear(self.bridge_hsz, self.hsz),\n nn.ReLU(True),\n nn.LayerNorm(self.hsz),\n )\n\n self.input_encoder = StackedEncoder(n_blocks=opt.input_encoder_n_blocks,\n n_conv=opt.input_encoder_n_conv,\n kernel_size=opt.input_encoder_kernel_size,\n num_heads=opt.input_encoder_n_heads,\n hidden_size=self.hsz,\n dropout=self.dropout)\n\n self.str_attn = StructuredAttention(dropout=self.dropout,\n scale=opt.scale,\n add_void=opt.add_non_visual) # no parameters inside\n\n self.c2q_down_projection = nn.Sequential(\n nn.LayerNorm(3 * self.hsz),\n nn.Dropout(self.dropout),\n nn.Linear(3*self.hsz, self.hsz),\n nn.ReLU(True),\n )\n\n self.cls_encoder = StackedEncoder(n_blocks=opt.cls_encoder_n_blocks,\n n_conv=opt.cls_encoder_n_conv,\n kernel_size=opt.cls_encoder_kernel_size,\n num_heads=opt.cls_encoder_n_heads,\n hidden_size=self.hsz,\n dropout=self.dropout)\n\n self.cls_projection_layers = nn.ModuleList(\n [\n LinearWrapper(in_hsz=self.hsz,\n out_hsz=self.hsz,\n layer_norm=True,\n dropout=self.dropout,\n relu=True)\n ] +\n [\n ConvLinear(in_hsz=self.hsz,\n out_hsz=self.hsz,\n kernel_size=3,\n layer_norm=True,\n dropout=self.dropout,\n relu=True)\n for _ in range(self.t_iter)])\n\n self.temporal_scoring_st_layers = nn.ModuleList([\n LinearWrapper(in_hsz=self.hsz,\n out_hsz=1,\n layer_norm=True,\n dropout=self.dropout,\n relu=False)\n for _ in range(self.t_iter+1)])\n\n self.temporal_scoring_ed_layers = nn.ModuleList([\n LinearWrapper(in_hsz=self.hsz,\n out_hsz=1,\n layer_norm=True,\n dropout=self.dropout,\n relu=False)\n for _ in range(self.t_iter+1)])\n\n self.temporal_criterion = nn.CrossEntropyLoss(reduction=\"sum\")\n\n self.classifier = LinearWrapper(in_hsz=self.hsz * 2 if self.add_local else self.hsz,\n out_hsz=1,\n layer_norm=True,\n dropout=self.dropout,\n relu=False)\n\n def load_word_embedding(self, pretrained_embedding, requires_grad=False):\n self.word_embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))\n self.word_embedding.weight.requires_grad = requires_grad\n\n def forward(self, batch):\n if self.inference_mode:\n return self.forward_main(batch)\n else:\n out, att_loss, att_predictions, temporal_loss, temporal_predictions, other_outputs = self.forward_main(batch)\n return out, att_loss, att_predictions, temporal_loss, temporal_predictions\n\n def forward_main(self, batch):\n \"\"\"\n Args:\n batch: edict, keys = qas, qas_mask, qa_noun_masks, sub, sub_mask, vcpt, vcpt_mask, vid, vid_mask,\n att_labels, att_labels_mask, qid, target, vid_name, ts_label\n qas, qas_mask, qa_noun_masks: (N, 5, Lqa)\n sub, sub_mask: (N, #imgs, Ls)\n vcpt, vcpt_mask: (N, #imgs, #regions)\n vid, vid_mask: (N, #imgs, #regions, D), (N, #imgs, #regions)\n att_labels, att_labels_mask: A list of N (#imgs, #qa-words, #regions)\n qid: list(int)\n vid_name: list(str)\n target: torch.LongTensor\n use_hard_negatives: bool, true to sample hard negatives\n q_l: int, length of the tokenized question\n anno_st_idx (list of int): each element is an index (at 0.5fps) of the first image\n with spatial annotation.\n ts_label: {\"st\": (N, ), \"ed\": (N, )} for 'st_ed'. (N, L) for 'frm'\n ts_label_mask: (N, L) for both 'st_ed' and 'frm'\n Returns:\n \"\"\"\n self.bsz = len(batch.qid)\n bsz = self.bsz\n num_a = self.num_a\n hsz = self.hsz\n\n a_embed = self.base_encoder(batch.qas_bert.view(bsz*num_a, -1, self.wd_size), # (N*5, L, D)\n batch.qas_mask.view(bsz * num_a, -1), # (N*5, L)\n self.bert_word_encoding_fc,\n self.input_embedding,\n self.input_encoder) # (N*5, L, D)\n a_embed = a_embed.view(bsz, num_a, 1, -1, hsz) # (N, 5, 1, L, D)\n a_mask = batch.qas_mask.view(bsz, num_a, 1, -1) # (N, 5, 1, L)\n\n attended_sub, attended_vid, attended_vid_mask, attended_sub_mask = (None, ) * 4\n other_outputs = {} # {\"pos_noun_mask\": batch.qa_noun_masks} # used to visualization and compute att acc\n if self.sub_flag:\n num_imgs, num_words = batch.sub_bert.shape[1:3]\n sub_embed = self.base_encoder(batch.sub_bert.view(bsz*num_imgs, num_words, -1), # (N*Li, Lw)\n batch.sub_mask.view(bsz * num_imgs, num_words), # (N*Li, Lw)\n self.bert_word_encoding_fc,\n self.input_embedding,\n self.input_encoder) # (N*Li, Lw, D)\n\n sub_embed = sub_embed.contiguous().view(bsz, 1, num_imgs, num_words, -1) # (N, Li, Lw, D)\n sub_mask = batch.sub_mask.view(bsz, 1, num_imgs, num_words) # (N, 1, Li, Lw)\n\n attended_sub, attended_sub_mask, sub_raw_s, sub_normalized_s = \\\n self.qa_ctx_attention(a_embed, sub_embed, a_mask, sub_mask,\n noun_mask=None,\n non_visual_vectors=None)\n\n other_outputs[\"sub_normalized_s\"] = sub_normalized_s\n other_outputs[\"sub_raw_s\"] = sub_raw_s\n\n if self.vfeat_flag:\n num_imgs, num_regions = batch.vid.shape[1:3]\n vid_embed = F.normalize(batch.vid, p=2, dim=-1) # (N, Li, Lr, D)\n\n vid_embed = self.base_encoder(vid_embed.view(bsz*num_imgs, num_regions, -1), # (N*Li, Lw)\n batch.vid_mask.view(bsz * num_imgs, num_regions), # (N*Li, Lr)\n self.vid_fc,\n self.input_embedding,\n self.input_encoder) # (N*Li, L, D)\n\n vid_embed = vid_embed.contiguous().view(bsz, 1, num_imgs, num_regions, -1) # (N, 1, Li, Lr, D)\n vid_mask = batch.vid_mask.view(bsz, 1, num_imgs, num_regions) # (N, 1, Li, Lr)\n\n attended_vid, attended_vid_mask, vid_raw_s, vid_normalized_s = \\\n self.qa_ctx_attention(a_embed, vid_embed, a_mask, vid_mask,\n noun_mask=None,\n non_visual_vectors=None)\n\n other_outputs[\"vid_normalized_s\"] = vid_normalized_s\n other_outputs[\"vid_raw_s\"] = vid_raw_s\n\n if self.flag_cnt == 2:\n visual_text_embedding = torch.cat([attended_sub,\n attended_vid,\n attended_sub * attended_vid], dim=-1) # (N, 5, Li, Lqa, 3D)\n visual_text_embedding = self.concat_fc(visual_text_embedding) # (N, 5, Li, Lqa, D)\n out, target, t_scores = self.classfier_head_multi_proposal(\n visual_text_embedding, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,\n extra_span_length=self.extra_span_length)\n elif self.sub_flag:\n out, target, t_scores = self.classfier_head_multi_proposal(\n attended_sub, attended_sub_mask, batch.target, batch.ts_label, batch.ts_label_mask,\n extra_span_length=self.extra_span_length)\n elif self.vfeat_flag:\n out, target, t_scores = self.classfier_head_multi_proposal(\n attended_vid, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,\n extra_span_length=self.extra_span_length)\n else:\n raise NotImplementedError\n assert len(out) == len(target)\n\n other_outputs[\"temporal_scores\"] = t_scores # (N, 5, Li) or (N, 5, Li, 2)\n\n if self.inference_mode:\n inference_outputs = {\n \"answer\": out, # (N, 5)\n \"t_scores\": F.softmax(t_scores, dim=2),\n \"att_predictions\": self.get_att_prediction(\n scores=other_outputs[\"vid_raw_s\"],\n object_vocab=batch.eval_object_word_ids,\n words=batch.qas,\n vid_names=batch.vid_name,\n qids=batch.qid,\n img_indices=batch.image_indices,\n boxes=batch.boxes,\n start_indices=batch.anno_st_idx,\n ) if self.vfeat_flag else None,\n }\n return inference_outputs\n\n att_loss = 0\n att_predictions = None\n # if (self.use_sup_att or not self.training) and self.vfeat_flag:\n if self.use_sup_att and self.training and self.vfeat_flag:\n start_indices = batch.anno_st_idx\n try:\n cur_att_loss, cur_att_predictions = \\\n self.get_att_loss(other_outputs[\"vid_raw_s\"], batch.att_labels, batch.target, batch.qas,\n qids=batch.qid,\n q_lens=batch.q_l,\n vid_names=batch.vid_name,\n img_indices=batch.image_indices,\n boxes=batch.boxes,\n start_indices=start_indices,\n num_negatives=self.num_negatives,\n use_hard_negatives=batch.use_hard_negatives,\n drop_topk=self.drop_topk)\n except AssertionError as e:\n save_pickle(\n {\"batch\": batch, \"start_indices\": start_indices, \"vid_raw_s\": other_outputs[\"vid_raw_s\"]},\n \"err_dict.pickle\"\n )\n import sys\n sys.exit(1)\n att_loss += cur_att_loss\n att_predictions = cur_att_predictions\n\n temporal_loss = self.get_ts_loss(temporal_scores=t_scores,\n ts_labels=batch.ts_label,\n answer_indices=batch.target)\n\n if self.training:\n return [out, target], att_loss, att_predictions, temporal_loss, t_scores, other_outputs\n else:\n return out, att_loss, att_predictions, temporal_loss, F.softmax(t_scores, dim=2), other_outputs\n\n @classmethod\n def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):\n \"\"\" Raw data --> higher-level embedding\n Args:\n data: (N, L) for text, (N, L, D) for video\n data_mask: (N, L)\n init_encoder: word_embedding layer for text, MLP (downsize) for video\n downsize_encoder: MLP, down project to hsz\n input_encoder: multiple layer of encoder block, with residual connection, CNN, layernorm, etc\n Returns:\n encoded_data: (N, L, D)\n \"\"\"\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data, data_mask)\n\n def qa_ctx_attention(self, qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask, non_visual_vectors):\n \"\"\" Align image regions with QA words\n Args:\n qa_embed: (N, 5, 1, Lqa, D)\n qa_mask: (N, 5, 1, Lqa)\n ctx_embed: (N, 1, Li, Lr, D)\n ctx_mask: (N, 1, Li, Lr)\n noun_mask: (N, 5, Lqa)\n non_visual_vectors: (m, D), m is a tunable parameter\n Returns:\n \"\"\"\n num_img, num_region = ctx_mask.shape[2:]\n\n u_a, raw_s, s_mask, s_normalized = self.str_attn(\n qa_embed, ctx_embed, qa_mask, ctx_mask,\n noun_mask=noun_mask, void_vector=non_visual_vectors) # (N, 5, Li, Lqa, D), (N, 5, Li, Lqa, lr) x2\n qa_embed = qa_embed.repeat(1, 1, num_img, 1, 1)\n mixed = torch.cat([qa_embed,\n u_a,\n qa_embed*u_a], dim=-1) # (N, 5, Li, Lqa, D)\n mixed = self.c2q_down_projection(mixed) # (N, 5, Li, Lqa, D)\n mixed_mask = (s_mask.sum(-1) != 0).float() # (N, 5, Li, Lqa)\n return mixed, mixed_mask, raw_s, s_normalized\n\n def get_proposals(self, max_statement, max_statement_mask, temporal_scores,\n targets, ts_labels, max_num_proposal=1, iou_thd=0.5, ce_prob_thd=0.01,\n extra_span_length=3):\n \"\"\"\n Args:\n max_statement: (N, 5, Li, D)\n max_statement_mask: (N, 5, Li, 1)\n temporal_scores: (N, 5, Li, 2)\n targets: (N, )\n ts_labels: (N, Li) for frm or N * (st, ed) for st_ed\n max_num_proposal:\n iou_thd:\n ce_prob_thd:\n extra_span_length:\n Returns:\n\n \"\"\"\n bsz, num_a, num_img, _ = max_statement_mask.shape\n if self.training:\n ca_temporal_scores_st_ed = \\\n temporal_scores[torch.arange(bsz, dtype=torch.long), targets].data # (N, Li, 2)\n ca_temporal_scores_st_ed = F.softmax(ca_temporal_scores_st_ed, dim=1) # (N, Li, 2)\n ca_pred_spans = find_max_triples(ca_temporal_scores_st_ed[:, :, 0],\n ca_temporal_scores_st_ed[:, :, 1],\n topN=max_num_proposal,\n prob_thd=ce_prob_thd) # N * [(st_idx, ed_idx, confidence), ...]\n # +1 for ed index before forward into get_high_iou_spans func.\n ca_pred_spans = [[[sub_e[0], sub_e[1] + 1, sub_e[2]] for sub_e in e] for e in ca_pred_spans]\n spans = get_high_iou_sapns(zip(ts_labels[\"st\"].tolist(), (ts_labels[\"ed\"] + 1).tolist()),\n ca_pred_spans, iou_thd=iou_thd, add_gt=True) # N * [(st, ed), ...]\n local_max_max_statement_list = [] # N_new * (5, D)\n global_max_max_statement_list = [] # N_new * (5, D)\n span_targets = [] # N_new * (1,)\n for idx, (t, span_sublist) in enumerate(zip(targets, spans)):\n span_targets.extend([t] * len(span_sublist))\n cur_global_max_max_statement = \\\n torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 1)[0]\n global_max_max_statement_list.extend([cur_global_max_max_statement] * len(span_sublist))\n for span in span_sublist:\n span = expand_span(span, expand_length=extra_span_length)\n cur_span_max_statement = mask_logits(\n max_statement[idx, :, span[0]:span[1]],\n max_statement_mask[idx, :, span[0]:span[1]]) # (5, Li[st:ed], D)\n local_max_max_statement_list.append(torch.max(cur_span_max_statement, 1)[0]) # (5, D)\n local_max_max_statement = torch.stack(local_max_max_statement_list) # (N_new, 5, D)\n global_max_max_statement = torch.stack(global_max_max_statement_list) # (N_new, 5, D)\n max_max_statement = torch.cat([\n local_max_max_statement,\n global_max_max_statement], dim=-1) # (N_new, 5, 2D)\n return max_max_statement, targets.new_tensor(span_targets) # (N_new, 5, 2D), (N_new, )\n else: # testing\n temporal_scores_st_ed = F.softmax(temporal_scores, dim=2) # (N, 5, Li, 2)\n temporal_scores_st_ed_reshaped = temporal_scores_st_ed.view(bsz * num_a, -1, 2) # (N*5, Li, 2)\n pred_spans = find_max_triples(temporal_scores_st_ed_reshaped[:, :, 0],\n temporal_scores_st_ed_reshaped[:, :, 1],\n topN=1, prob_thd=None) # (N*5) * [(st, ed, confidence), ]\n pred_spans = flat_list_of_lists(pred_spans) # (N*5) * (st, ed, confidence)\n pred_spans = torch.FloatTensor(pred_spans).to(temporal_scores_st_ed_reshaped.device) # (N*5, 3)\n pred_spans, pred_scores = pred_spans[:, :2].long(), pred_spans[:, 2] # (N*5, 2), (N*5, )\n pred_spans = [[e[0], e[1] + 1] for e in pred_spans]\n max_statement = max_statement.view(bsz * num_a, num_img, -1) # (N*5, Li, D)\n max_statement_mask = max_statement_mask.view(bsz * num_a, num_img, -1) # (N*5, Li, 1)\n local_max_max_statement_list = [] # N*5 * (D, )\n global_max_max_statement_list = [] # N*5 * (D, )\n for idx, span in enumerate(pred_spans):\n span = expand_span(span, expand_length=extra_span_length)\n cur_global_max_max_statement = \\\n torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 0)[0]\n global_max_max_statement_list.append(cur_global_max_max_statement)\n cur_span_max_statement = mask_logits(\n max_statement[idx, span[0]:span[1]],\n max_statement_mask[idx, span[0]:span[1]]) # (Li[st:ed], D), words for span[0] == span[1]\n local_max_max_statement_list.append(torch.max(cur_span_max_statement, 0)[0]) # (D, )\n local_max_max_statement = torch.stack(local_max_max_statement_list) # (N*5, D)\n global_max_max_statement = torch.stack(global_max_max_statement_list) # (N*5, D)\n max_max_statement = torch.cat([\n local_max_max_statement,\n global_max_max_statement], dim=-1) # (N_new, 5, 2D)\n return max_max_statement.view(bsz, num_a, -1), targets # (N, 5, 2D), (N, )\n\n def residual_temporal_predictor(self, layer_idx, input_tensor):\n \"\"\"\n Args:\n layer_idx (int):\n input_tensor: (N, L, D)\n\n Returns:\n temporal_score\n \"\"\"\n input_tensor = input_tensor + self.cls_projection_layers[layer_idx](input_tensor) # (N, L, D)\n t_score_st = self.temporal_scoring_st_layers[layer_idx](input_tensor) # (N, L, 1)\n t_score_ed = self.temporal_scoring_ed_layers[layer_idx](input_tensor) # (N, L, 1)\n t_score = torch.cat([t_score_st, t_score_ed], dim=2) # (N, L, 2)\n return input_tensor, t_score\n\n def classfier_head_multi_proposal(self, statement, statement_mask, targets, ts_labels, ts_labels_mask,\n max_num_proposal=1, ce_prob_thd=0.01, iou_thd=0.5, extra_span_length=3):\n \"\"\"Predict the probabilities of each statements being true. Statements = QA + Context.\n Args:\n statement: (N, 5, Li, Lqa, D)\n statement_mask: (N, 5, Li, Lqa)\n targets: (N, )\n ts_labels: (N, Li) for frm or N * (st, ed) for st_ed\n ts_labels_mask: (N, Li)\n max_num_proposal (int):\n ce_prob_thd (float): threshold for p1*p2 (st, ed)\n iou_thd (float): threshold for temporal iou\n extra_span_length (int): expand the localized span to give a little bit extra context\n Returns:\n \"\"\"\n bsz, num_a, num_img, num_words = statement_mask.shape\n statement = statement.view(bsz*num_a*num_img, num_words, -1) # (N*5*Li, Lqa, D)\n statement_mask = statement_mask.view(bsz*num_a*num_img, num_words) # (N*5*Li, Lqa)\n statement = self.cls_encoder(statement, statement_mask) # (N*5*Li, Lqa, D)\n max_statement = torch.max(mask_logits(statement, statement_mask.unsqueeze(2)), 1)[0] # (N*5*Li, D)\n max_statement_mask = (statement_mask.sum(1) != 0).float().view(bsz, num_a, num_img, 1) # (N, 5, Li, 1)\n max_statement = max_statement.view(bsz*num_a, num_img, -1) # (N, 5, Li, D)\n\n t_score_container = []\n encoded_max_statement_container = []\n encoded_max_statement = max_statement # (N*5, Li, D)\n for layer_idx in range(self.t_iter+1):\n encoded_max_statement, prev_t_score = \\\n self.residual_temporal_predictor(layer_idx, encoded_max_statement)\n t_score_container.append(prev_t_score.view(bsz, num_a, num_img, 2)) # (N, 5, Li, 2)\n encoded_max_statement_container.append(encoded_max_statement) # (N*5, Li, D)\n if self.t_iter > 0:\n temporal_scores_st_ed = 0.5 * (t_score_container[0] + torch.stack(t_score_container[:1]).mean(0))\n else:\n temporal_scores_st_ed = t_score_container[0] # (N, 5, Li, 2)\n\n # mask before softmax\n temporal_scores_st_ed = mask_logits(temporal_scores_st_ed, ts_labels_mask.view(bsz, 1, num_img, 1))\n\n # when predict answer, only consider 1st level representation !!!\n # since the others are all generated from the 1st level\n stacked_max_statement = encoded_max_statement_container[0].view(bsz, num_a, num_img, -1) # (N, 5, Li, D)\n if self.add_local:\n max_max_statement, targets = self.get_proposals(\n stacked_max_statement, max_statement_mask, temporal_scores_st_ed,\n targets, ts_labels, max_num_proposal=max_num_proposal, iou_thd=iou_thd,\n ce_prob_thd=ce_prob_thd, extra_span_length=extra_span_length) # (N, 5, D)\n else:\n max_max_statement = \\\n torch.max(mask_logits(stacked_max_statement, max_statement_mask), 2)[0] # (N, 5, D)\n # targets = targets\n\n answer_scores = self.classifier(max_max_statement).squeeze(2) # (N, 5)\n return answer_scores, targets, temporal_scores_st_ed # (N_new, 5), (N_new, ) (N, 5, Li, 2)\n\n def get_ts_loss(self, temporal_scores, ts_labels, answer_indices):\n \"\"\"\n Args:\n temporal_scores: (N, 5, Li, 2)\n ts_labels: dict(st=(N, ), ed=(N, ))\n answer_indices: (N, )\n\n Returns:\n\n \"\"\"\n bsz = len(answer_indices)\n # compute loss\n ca_temporal_scores_st_ed = \\\n temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices] # (N, Li, 2)\n loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels[\"st\"])\n loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels[\"ed\"])\n return (loss_st + loss_ed) / 2.\n\n @classmethod\n def sample_negatives(cls, pred_score, pos_indices, neg_indices, num_negatives=2,\n use_hard_negatives=False, negative_pool_size=0, num_hard=2, drop_topk=0):\n \"\"\" Sample negatives from a set of indices. Several sampling strategies are supported:\n 1, random; 2, hard negatives; 3, drop_topk hard negatives; 4, mix easy and hard negatives\n 5, sampling within a pool of hard negatives; 6, sample across images of the same video.\n Args:\n pred_score: (num_img, num_words, num_region)\n pos_indices: (N_pos, 3) all positive region indices for the same word, not necessaryily the same image.\n neg_indices: (N_neg, 3) ...\n num_negatives (int):\n use_hard_negatives (bool):\n negative_pool_size (int):\n num_hard (int):\n drop_topk (int):\n Returns:\n\n \"\"\"\n num_unique_pos = len(pos_indices)\n sampled_pos_indices = torch.cat([pos_indices] * num_negatives, dim=0)\n if use_hard_negatives:\n # print(\"using use_hard_negatives\")\n neg_scores = pred_score[neg_indices[:, 0], neg_indices[:, 1], neg_indices[:, 2]] # TODO\n max_indices = torch.sort(neg_scores, descending=True)[1].tolist()\n if negative_pool_size > num_negatives: # sample from a pool of hard negatives\n hard_pool = max_indices[drop_topk:drop_topk + negative_pool_size]\n hard_pool_indices = neg_indices[hard_pool]\n num_hard_negs = num_negatives\n sampled_easy_neg_indices = []\n if num_hard < num_negatives:\n easy_pool = max_indices[drop_topk + negative_pool_size:]\n easy_pool_indices = neg_indices[easy_pool]\n num_hard_negs = num_hard\n num_easy_negs = num_negatives - num_hard_negs\n sampled_easy_neg_indices = easy_pool_indices[\n torch.randint(low=0, high=len(easy_pool_indices),\n size=(num_easy_negs * num_unique_pos, ), dtype=torch.long)\n ]\n sampled_hard_neg_indices = hard_pool_indices[\n torch.randint(low=0, high=len(hard_pool_indices),\n size=(num_hard_negs * num_unique_pos, ), dtype=torch.long)\n ]\n\n if len(sampled_easy_neg_indices) != 0:\n sampled_neg_indices = torch.cat([sampled_hard_neg_indices, sampled_easy_neg_indices], dim=0)\n else:\n sampled_neg_indices = sampled_hard_neg_indices\n\n else: # directly take the top negatives\n sampled_neg_indices = neg_indices[max_indices[drop_topk:drop_topk+len(sampled_pos_indices)]]\n else:\n sampled_neg_indices = neg_indices[\n torch.randint(low=0, high=len(neg_indices), size=(len(sampled_pos_indices),), dtype=torch.long)\n ]\n return sampled_pos_indices, sampled_neg_indices\n\n def get_att_loss(self, scores, att_labels, target, words, vid_names, qids, q_lens, img_indices, boxes,\n start_indices, num_negatives=2, use_hard_negatives=False, drop_topk=0):\n \"\"\" compute ranking loss, use for loop to find the indices,\n use advanced indexing to perform the real calculation\n Build a list contains a quaduple\n\n Args:\n scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]\n att_labels: list(tensor), each has dimension (#num_imgs, #num_words, #regions), not batched\n target: 1D tensor (N, )\n words: LongTensor (N, 5, Lqa)\n vid_names: list(str) (N,)\n qids: list(int), (N, )\n q_lens: list(int), (N, )\n img_indices: list(list(int)), (N, Li), or None\n boxes: list(list(box)) of length N, each sublist represent an image,\n each box contains the coordinates of xyxy, or None\n num_negatives: number of negatives for each positive region\n use_hard_negatives: use hard negatives, uselect negatives with high scores\n drop_topk: drop topk highest negatives (since the top negatives might be correct, they are just not labeled)\n start_indices (list of int): each element is an index (at 0.5fps) of the first image\n with spatial annotation. If with_ts, set to zero\n Returns:\n att_loss: loss value for the batch\n att_predictions: (list) [{\"gt\": gt_scores, \"pred\": pred_scores}, ], used to calculate att. accuracy\n \"\"\"\n pos_container = [] # contains tuples of 5 elements, which are (batch_i, ca_i, img_i, word_i, region_i)\n neg_container = []\n for batch_idx in range(len(target)): # batch\n ca_idx = target[batch_idx].cpu().item()\n gt_score = att_labels[batch_idx] # num_img * (num_words, num_region)\n start_idx = start_indices[batch_idx] # int\n num_img = len(gt_score)\n sen_l, _ = gt_score[0].shape\n pred_score = scores[batch_idx, ca_idx, :num_img, :sen_l] # (num_img, num_words, num_region)\n\n # find positive and negative indices\n batch_pos_indices = []\n batch_neg_indices = []\n for img_idx, img_gt_score in enumerate(gt_score):\n img_idx = start_idx + img_idx\n img_pos_indices = torch.nonzero(img_gt_score) # (N_pos, 2) ==> (#words, #regions)\n if len(img_pos_indices) == 0: # skip if no positive indices\n continue\n img_pos_indices = torch.cat([img_pos_indices.new_full([len(img_pos_indices), 1], img_idx),\n img_pos_indices], dim=1) # (N_pos, 3) ==> (#img, #words, #regions)\n\n img_neg_indices = torch.nonzero(img_gt_score == 0) # (N_neg, 2)\n img_neg_indices = torch.cat([img_neg_indices.new_full([len(img_neg_indices), 1], img_idx),\n img_neg_indices], dim=1) # (N_neg, 3)\n\n batch_pos_indices.append(img_pos_indices)\n batch_neg_indices.append(img_neg_indices)\n\n if len(batch_pos_indices) == 0: # skip if empty ==> no gt label for the video\n continue\n batch_pos_indices = torch.cat(batch_pos_indices, dim=0) # (N_pos, 3) -->\n batch_neg_indices = torch.cat(batch_neg_indices, dim=0) # (N_neg, 3)\n\n # sample positives and negatives\n available_img_indices = batch_pos_indices[:, 0].unique().tolist()\n for img_idx in available_img_indices:\n # pos_indices for a certrain img\n img_idx_pos_indices = batch_pos_indices[batch_pos_indices[:, 0] == img_idx]\n img_idx_neg_indices = batch_neg_indices[batch_neg_indices[:, 0] == img_idx]\n available_word_indices = img_idx_pos_indices[:, 1].unique().tolist()\n for word_idx in available_word_indices:\n # positives and negatives for a given image-word pair, specified by img_idx-word_idx\n img_idx_word_idx_pos_indices = img_idx_pos_indices[img_idx_pos_indices[:, 1] == word_idx]\n img_idx_word_idx_neg_indices = img_idx_neg_indices[img_idx_neg_indices[:, 1] == word_idx]\n # actually all the positives, not sampled pos\n sampled_pos_indices, sampled_neg_indices = \\\n self.sample_negatives(pred_score,\n img_idx_word_idx_pos_indices, img_idx_word_idx_neg_indices,\n num_negatives=num_negatives, use_hard_negatives=use_hard_negatives,\n negative_pool_size=self.negative_pool_size,\n num_hard=self.num_hard, drop_topk=drop_topk)\n\n base_indices = torch.LongTensor([[batch_idx, ca_idx]] * len(sampled_pos_indices)).\\\n to(sampled_pos_indices.device)\n pos_container.append(torch.cat([base_indices, sampled_pos_indices], dim=1))\n neg_container.append(torch.cat([base_indices, sampled_neg_indices], dim=1))\n\n pos_container = torch.cat(pos_container, dim=0)\n neg_container = torch.cat(neg_container, dim=0)\n\n # contain all the predictions and gt labels in this batch, only consider the ones with gt labels\n # also only consider the positive answer.\n att_predictions = None\n if not self.training and self.vfeat_flag:\n att_predictions = dict(det_q=[],\n det_ca=[])\n unique_pos_container = np.unique(pos_container.cpu().numpy(), axis=0) # unique rows in the array\n for row in unique_pos_container:\n batch_idx, ca_idx, img_idx, word_idx, region_idx = row\n start_idx = start_indices[batch_idx] # int\n cur_q_len = q_lens[batch_idx]\n num_region = att_labels[batch_idx][img_idx-start_idx].shape[1] # num_img * (num_words, num_region)\n if len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()) != \\\n len(boxes[batch_idx][img_idx-start_idx]):\n print(\"scores[batch_idx, ca_idx, img_idx, word_idx].data.cpu()\",\n len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()))\n print(\"len(boxes[batch_idx][img_idx-start_idx])\", len(boxes[batch_idx][img_idx-start_idx]))\n print(\"boxes, batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx\",\n batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx)\n print(row)\n raise AssertionError\n cur_det_data = {\n \"pred\": scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu(),\n \"word\": words[batch_idx, ca_idx, word_idx],\n \"qid\": qids[batch_idx],\n \"vid_name\": vid_names[batch_idx],\n \"img_idx\": img_indices[batch_idx][img_idx], # full indices\n \"boxes\": boxes[batch_idx][img_idx-start_idx] # located boxes\n }\n if word_idx < cur_q_len:\n att_predictions[\"det_q\"].append(cur_det_data)\n else:\n att_predictions[\"det_ca\"].append(cur_det_data)\n\n pos_scores = scores[pos_container[:, 0], pos_container[:, 1], pos_container[:, 2],\n pos_container[:, 3], pos_container[:, 4]]\n neg_scores = scores[neg_container[:, 0], neg_container[:, 1], neg_container[:, 2],\n neg_container[:, 3], neg_container[:, 4]]\n\n if self.att_loss_type == \"hinge\":\n # max(0, m + S_pos - S_neg)\n att_loss = torch.clamp(self.margin + neg_scores - pos_scores, min=0).sum()\n elif self.att_loss_type == \"lse\":\n # log[1 + exp(scale * (S_pos - S_neg))]\n att_loss = torch.log1p(torch.exp(self.alpha * (neg_scores - pos_scores))).sum()\n else:\n raise NotImplementedError(\"Only support hinge and lse\")\n return att_loss, att_predictions\n\n def get_att_prediction(self, scores, object_vocab, words, vid_names, qids, img_indices, boxes,\n start_indices, score_thd=0.2):\n \"\"\" compute ranking loss, use for loop to find the indices,\n use advanced indexing to perform the real calculation\n Build a list contains a quaduple\n\n Args:\n scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]\n object_vocab: list, object word ids in the vocabulary\n words: LongTensor (N, 5, Lqa)\n vid_names: list(str) (N,)\n qids: list(int), (N, )\n img_indices: list(list(int)), (N, Li), or None\n boxes: list(list(box)) of length N, each sublist represent an image,\n each box contains the coordinates of xyxy, or None\n start_indices (list of int): each element is an index (at 0.5fps) of the first image\n with spatial annotation. If with_ts, set to zero\n score_thd: only keep boxes with score higher than this value\n Returns:\n att_loss: loss value for the batch\n att_predictions: (list) [{\"gt\": gt_scores, \"pred\": pred_scores}, ], used to calculate att. accuracy\n \"\"\"\n # contain all the predictions and gt labels in this batch, only consider the ones with gt labels\n # also only consider the positive answer.\n att_predictions = None\n if self.vfeat_flag:\n att_predictions = []\n for batch_idx in range(len(scores)):\n start_idx = start_indices[batch_idx] # int\n q_att_predictions = dict() # predictions associated with this question\n for ans_idx in range(5):\n q_att_predictions[ans_idx] = []\n for img_idx_local in range(len(boxes[batch_idx])):\n # img_idx_local: for the imgs with box anno\n # img_idx_global: for all the imgs, including ones without box anno\n img_idx_global = img_idx_local + start_idx\n cur_img_scores = scores[batch_idx, ans_idx, img_idx_global] # (Lqa, Lr)\n cur_words = words[batch_idx, ans_idx].tolist() # (Lqa, )\n cur_img_boxes = boxes[batch_idx][img_idx_local]\n for word_idx, w in enumerate(cur_words):\n if w in object_vocab:\n cur_word_region_scores = cur_img_scores[word_idx].data.cpu().numpy() # (Lr, )\n accepted_region_ids = np.nonzero(cur_word_region_scores >= score_thd)[0].tolist()\n accepted_region_scores = [float(cur_word_region_scores[i]) for i in accepted_region_ids]\n accepted_region_boxes = [cur_img_boxes[i] for i in accepted_region_ids]\n sorted_indices = np.argsort(accepted_region_scores)\n accepted_region_scores = [accepted_region_scores[i] for i in sorted_indices]\n accepted_region_boxes = [accepted_region_boxes[i] for i in sorted_indices]\n cur_det_data = {\n \"pred\": accepted_region_scores,\n \"bbox\": accepted_region_boxes,\n \"word\": int(words[batch_idx, ans_idx, word_idx]),\n \"qid\": int(qids[batch_idx]),\n \"vid_name\": vid_names[batch_idx],\n \"img_idx\": img_indices[batch_idx][img_idx_global], # image file name id\n }\n q_att_predictions[ans_idx].append(cur_det_data)\n att_predictions.append(q_att_predictions)\n return att_predictions\n"
] | [
[
"torch.stack",
"torch.nonzero",
"torch.nn.functional.softmax",
"numpy.argsort",
"torch.max",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.from_numpy",
"torch.arange",
"numpy.nonzero",
"torch.sort",
"torch.nn.functional.normalize",
"torch.FloatTensor",
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.exp",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.clamp"
]
] |
janewen134/catsdogs | [
"051dc0d4bf695ca2db03df6fc3cf758331df4aaa"
] | [
"cats_and_dogs_classification.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Cats and Dogs Classification\n\n# Data Loading and Exploring\n\n# In[1]:\n\n\nimport os\nbase_dir = './cats_and_dogs_filtered'\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\n\n# cat training pictures\ntrain_cats_dir = os.path.join(train_dir, 'cats')\n\n# dog training pictures\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\n\n# cat validation pictures\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\n\n# dog validation pictures\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\n\n\n# In[2]:\n\n\n# view file names\ntrain_cat_fnames = os.listdir(train_cats_dir)\nprint(train_cat_fnames[:10])\n\ntrain_dog_fnames = os.listdir(train_dogs_dir)\ntrain_dog_fnames.sort()\nprint(train_dog_fnames[:10])\n\n\n# In[3]:\n\n\n# preview images to know what the dataset is like\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Parameters for our graph; we'll output images in a 4*4 configuration\nnrows = 4\nncols = 4\n\n# Index for iterating over images\npic_index = 0\n\n# Set up matplotlib fig, and size it to fit 4*4 pics\nfig = plt.gcf()\nfig.set_size_inches(ncols*4, nrows*4)\n\n# 8 images for cats and dogs separately\npic_index += 8\n\nnext_cat_pix = [os.path.join(train_cats_dir, fname) for fname in train_cat_fnames[pic_index-8:pic_index]]\nnext_dog_pix = [os.path.join(train_dogs_dir, fname) for fname in train_dog_fnames[pic_index-8:pic_index]]\n\nfor i, img_path in enumerate(next_cat_pix + next_dog_pix):\n # Set up subplot; subplot indices starts at 1\n sp = plt.subplot(nrows, ncols, i+1)\n sp.axis('Off')\n \n img = mpimg.imread(img_path)\n plt.imshow(img)\nplt.show() \n\n\n# build a small convnet from scratch to get to 72% accuracy\n\n# In[4]:\n\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\n\n# Our input feature map is 150*150*3: 150*150 for the image pixels, \n# and 3 for the three color channels: R, G and B\nimg_input = layers.Input(shape=(150,150,3))\n\n# First convolution extracts 16 filters that are 3*3\n# Convolution is followed by max-pooling layer with a 2*2 window\nx = layers.Conv2D(16,3,activation='relu')(img_input)\nx = layers.MaxPooling2D(2)(x)\n\n# Second convolution extracts 32 filters that are 3*3\n# Convolution is followed by max-pooling layer with a 2*2 window\nx = layers.Conv2D(32,3,activation='relu')(x)\nx = layers.MaxPooling2D(2)(x)\n\n# Third convolution extracts 64 filters that are 3*3\n# Convolution is followed by max-pooling layer with a 2*2 window\nx = layers.Conv2D(64,3, activation='relu')(x)\nx = layers.MaxPooling2D(2)(x)\n\n\n# fully-connected layers: because we are facing a binary classification problem, we will end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1.\n\n# In[5]:\n\n\n# Flatten feature map to a 1-dim tensor so we can add fully connected layers\nx = layers.Flatten()(x)\n# Generate a fully connected layer with ReLU activation and 512 hidden units\nx = layers.Dense(512,activation='relu')(x)\n\n# Create output layer with a single node and sigmoid activation\noutput = layers.Dense(1, activation='sigmoid')(x)\n\n# Create Model\n# input = input feature map\n# output = output feature map\n# connected layer + sigmoid output layer \nmodel = Model(img_input,output)\n\n\n# Let's summarize the model architecture\n\n# In[6]:\n\n\nmodel.summary()\n\n\n# In[7]:\n\n\n# use RMSprop instead of stochastic gradient \nfrom tensorflow.keras.optimizers import RMSprop\n\nmodel.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])\n\n\n# Data Preprocessing\n\n# In[8]:\n\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# All images will be rescaled by 1./255\ntrain_datagen = ImageDataGenerator(rescale=1./255)\nval_datagen = ImageDataGenerator(rescale=1./255)\n\n# Flow training images in batches of 20 using train_datagen generator\ntrain_generator = train_datagen.flow_from_directory(\n train_dir, # This is the source directory for training images\n target_size=(150,150),\n batch_size=20,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary'\n)\n\n# Flow validation images in batches of 20 using val_datagen generator\nvalidation_generator = val_datagen.flow_from_directory(\n validation_dir,\n target_size=(150,150),\n batch_size=20,\n class_mode='binary'\n)\n\n\n# Training\n# <br>train on 2000 images, for 15 epochs and validate on 1000 images\n\n# In[ ]:\n\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=100, # 2000 images = batch_size * steps\n epochs=15,\n validation_data=validation_generator,\n validation_steps=50, # 1000 images = batch_size * steps\n verbose=1\n)\n\n# Visualizing Intermediate Representations\n# Visualize how an input gets transformed as it goes through the convnet\n\n# In[ ]:\n\nimport numpy as np\nimport random\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\n\n# define a new Model that takes an img as input and will output\n# intermediate representations for all layers in the previous model after\n# the first\nsuccessive_outputs = [layers.output for layer in model.layers[1:]]\nvisualization_model = Model(img_input, successive_outputs)\n\n# prepare a random input img of a cat or dog from the training set\ncat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]\ndog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]\nimg_path = random.choice(cat_img_files + dog_img_files)\n\nimg = load_img(img_path, target_size=(150, 150)) # this is a PIL img\nx = img_to_array(img) # Numpy array with shape (150, 150, 3)\nx = x.reshape((1,) + x.shape)\n\n# Rescale by 1/255\nx /= 255\n\n# Let's run our image through our network, thus obtaining all\n# intermediate representations for this img.\nsuccessive_feature_maps = visualization_model.predict(x)\n\n# These are names of the layers\nlayer_names = [layer.name for layer in model.layers]\n\n# Now let's display our representations\nfor layer_name, feature_map in zip(layer_names, successive_feature_maps):\n if len(feature_map.shape) == 4:\n # Just do this for the conv/ maxpool layers, not the fully-connected layers\n n_features = feature_map.shape[-1] # number of features in feature map\n\n # retrieve a list of lists results on training and validattion data\n # sets for each training epoch\n loss = history.history['val_loss']\n\n # Get number of epochs\n epochs = range(len(acc))\n\n # Plot training and validation accuracy per epoch\n plt.plot(epochs, acc)\n plt.plot(epochs, val_acc)\n plt.title('Train and validation accuracy')\n\n plt.figure()\n\n # plot training and validation loss per epoch\n plt.plot(epochs, loss)\n plt.plot(epochs, val_loss)\n plt.title('Training and validation loss')\n\n\n# Evaluating Accuracy and Loss for the Model\n# plot the training / validation accuracy and loss as collected during training\n# In[ ]:\n\n# Retrieve a list of accuracy results on training and validation data\n# sets for each training epoch\nacc = history.history['acc']\nval_acc = history.history['val_acc']\n\n# Retrieve a list of list results on training and validation data\n# sets for each training epoch\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\n# Get number of epochs\nepochs = range(len(acc))\n\n# Plot training and validation accuracy per epoch\nplt.plot(epochs, acc)\nplt.plot(epochs, val_acc)\nplt.title('Training and validation accuracy')\n\nplt.figure()\n\n# Plot training and validation loss per epoch\nplt.plot(epochs, loss)\nplt.plot(epochs, val_loss)\nplt.title('Training and validation loss')\n\n# Clean Up\n# In[ ]:\n\nimport os, signal\nos.kill(os.getpid(), signal.SIGKILL)\n"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gcf",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.Model",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"matplotlib.pyplot.plot",
"matplotlib.image.imread",
"tensorflow.keras.layers.Input"
]
] |