code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
/*!
* jQuery JavaScript Library v1.4.2
* http://jquery.com/
*
* Copyright 2010, John Resig
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* Includes Sizzle.js
* http://sizzlejs.com/
* Copyright 2010, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
*
* Date: Sat Feb 13 22:33:48 2010 -0500
*/
(function(A,w){function ma(){if(!c.isReady){try{s.documentElement.doScroll("left")}catch(a){setTimeout(ma,1);return}c.ready()}}function Qa(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}function X(a,b,d,f,e,j){var i=a.length;if(typeof b==="object"){for(var o in b)X(a,o,b[o],f,e,d);return a}if(d!==w){f=!j&&f&&c.isFunction(d);for(o=0;o<i;o++)e(a[o],b,f?d.call(a[o],o,e(a[o],b)):d,j);return a}return i?
e(a[0],b):w}function J(){return(new Date).getTime()}function Y(){return false}function Z(){return true}function na(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function oa(a){var b,d=[],f=[],e=arguments,j,i,o,k,n,r;i=c.data(this,"events");if(!(a.liveFired===this||!i||!i.live||a.button&&a.type==="click")){a.liveFired=this;var u=i.live.slice(0);for(k=0;k<u.length;k++){i=u[k];i.origType.replace(O,"")===a.type?f.push(i.selector):u.splice(k--,1)}j=c(a.target).closest(f,a.currentTarget);n=0;for(r=
j.length;n<r;n++)for(k=0;k<u.length;k++){i=u[k];if(j[n].selector===i.selector){o=j[n].elem;f=null;if(i.preType==="mouseenter"||i.preType==="mouseleave")f=c(a.relatedTarget).closest(i.selector)[0];if(!f||f!==o)d.push({elem:o,handleObj:i})}}n=0;for(r=d.length;n<r;n++){j=d[n];a.currentTarget=j.elem;a.data=j.handleObj.data;a.handleObj=j.handleObj;if(j.handleObj.origHandler.apply(j.elem,e)===false){b=false;break}}return b}}function pa(a,b){return"live."+(a&&a!=="*"?a+".":"")+b.replace(/\./g,"`").replace(/ /g,
"&")}function qa(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function ra(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var f=c.data(a[d++]),e=c.data(this,f);if(f=f&&f.events){delete e.handle;e.events={};for(var j in f)for(var i in f[j])c.event.add(this,j,f[j][i],f[j][i].data)}}})}function sa(a,b,d){var f,e,j;b=b&&b[0]?b[0].ownerDocument||b[0]:s;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===s&&!ta.test(a[0])&&(c.support.checkClone||!ua.test(a[0]))){e=
true;if(j=c.fragments[a[0]])if(j!==1)f=j}if(!f){f=b.createDocumentFragment();c.clean(a,b,f,d)}if(e)c.fragments[a[0]]=j?f:1;return{fragment:f,cacheable:e}}function K(a,b){var d={};c.each(va.concat.apply([],va.slice(0,b)),function(){d[this]=a});return d}function wa(a){return"scrollTo"in a&&a.document?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var c=function(a,b){return new c.fn.init(a,b)},Ra=A.jQuery,Sa=A.$,s=A.document,T,Ta=/^[^<]*(<[\w\W]+>)[^>]*$|^#([\w-]+)$/,Ua=/^.[^:#\[\.,]*$/,Va=/\S/,
Wa=/^(\s|\u00A0)+|(\s|\u00A0)+$/g,Xa=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,P=navigator.userAgent,xa=false,Q=[],L,$=Object.prototype.toString,aa=Object.prototype.hasOwnProperty,ba=Array.prototype.push,R=Array.prototype.slice,ya=Array.prototype.indexOf;c.fn=c.prototype={init:function(a,b){var d,f;if(!a)return this;if(a.nodeType){this.context=this[0]=a;this.length=1;return this}if(a==="body"&&!b){this.context=s;this[0]=s.body;this.selector="body";this.length=1;return this}if(typeof a==="string")if((d=Ta.exec(a))&&
(d[1]||!b))if(d[1]){f=b?b.ownerDocument||b:s;if(a=Xa.exec(a))if(c.isPlainObject(b)){a=[s.createElement(a[1])];c.fn.attr.call(a,b,true)}else a=[f.createElement(a[1])];else{a=sa([d[1]],[f]);a=(a.cacheable?a.fragment.cloneNode(true):a.fragment).childNodes}return c.merge(this,a)}else{if(b=s.getElementById(d[2])){if(b.id!==d[2])return T.find(a);this.length=1;this[0]=b}this.context=s;this.selector=a;return this}else if(!b&&/^\w+$/.test(a)){this.selector=a;this.context=s;a=s.getElementsByTagName(a);return c.merge(this,
a)}else return!b||b.jquery?(b||T).find(a):c(b).find(a);else if(c.isFunction(a))return T.ready(a);if(a.selector!==w){this.selector=a.selector;this.context=a.context}return c.makeArray(a,this)},selector:"",jquery:"1.4.2",length:0,size:function(){return this.length},toArray:function(){return R.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this.slice(a)[0]:this[a]},pushStack:function(a,b,d){var f=c();c.isArray(a)?ba.apply(f,a):c.merge(f,a);f.prevObject=this;f.context=this.context;if(b===
"find")f.selector=this.selector+(this.selector?" ":"")+d;else if(b)f.selector=this.selector+"."+b+"("+d+")";return f},each:function(a,b){return c.each(this,a,b)},ready:function(a){c.bindReady();if(c.isReady)a.call(s,c);else Q&&Q.push(a);return this},eq:function(a){return a===-1?this.slice(a):this.slice(a,+a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(R.apply(this,arguments),"slice",R.call(arguments).join(","))},map:function(a){return this.pushStack(c.map(this,
function(b,d){return a.call(b,d,b)}))},end:function(){return this.prevObject||c(null)},push:ba,sort:[].sort,splice:[].splice};c.fn.init.prototype=c.fn;c.extend=c.fn.extend=function(){var a=arguments[0]||{},b=1,d=arguments.length,f=false,e,j,i,o;if(typeof a==="boolean"){f=a;a=arguments[1]||{};b=2}if(typeof a!=="object"&&!c.isFunction(a))a={};if(d===b){a=this;--b}for(;b<d;b++)if((e=arguments[b])!=null)for(j in e){i=a[j];o=e[j];if(a!==o)if(f&&o&&(c.isPlainObject(o)||c.isArray(o))){i=i&&(c.isPlainObject(i)||
c.isArray(i))?i:c.isArray(o)?[]:{};a[j]=c.extend(f,i,o)}else if(o!==w)a[j]=o}return a};c.extend({noConflict:function(a){A.$=Sa;if(a)A.jQuery=Ra;return c},isReady:false,ready:function(){if(!c.isReady){if(!s.body)return setTimeout(c.ready,13);c.isReady=true;if(Q){for(var a,b=0;a=Q[b++];)a.call(s,c);Q=null}c.fn.triggerHandler&&c(s).triggerHandler("ready")}},bindReady:function(){if(!xa){xa=true;if(s.readyState==="complete")return c.ready();if(s.addEventListener){s.addEventListener("DOMContentLoaded",
L,false);A.addEventListener("load",c.ready,false)}else if(s.attachEvent){s.attachEvent("onreadystatechange",L);A.attachEvent("onload",c.ready);var a=false;try{a=A.frameElement==null}catch(b){}s.documentElement.doScroll&&a&&ma()}}},isFunction:function(a){return $.call(a)==="[object Function]"},isArray:function(a){return $.call(a)==="[object Array]"},isPlainObject:function(a){if(!a||$.call(a)!=="[object Object]"||a.nodeType||a.setInterval)return false;if(a.constructor&&!aa.call(a,"constructor")&&!aa.call(a.constructor.prototype,
"isPrototypeOf"))return false;var b;for(b in a);return b===w||aa.call(a,b)},isEmptyObject:function(a){for(var b in a)return false;return true},error:function(a){throw a;},parseJSON:function(a){if(typeof a!=="string"||!a)return null;a=c.trim(a);if(/^[\],:{}\s]*$/.test(a.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,"@").replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,"]").replace(/(?:^|:|,)(?:\s*\[)+/g,"")))return A.JSON&&A.JSON.parse?A.JSON.parse(a):(new Function("return "+
a))();else c.error("Invalid JSON: "+a)},noop:function(){},globalEval:function(a){if(a&&Va.test(a)){var b=s.getElementsByTagName("head")[0]||s.documentElement,d=s.createElement("script");d.type="text/javascript";if(c.support.scriptEval)d.appendChild(s.createTextNode(a));else d.text=a;b.insertBefore(d,b.firstChild);b.removeChild(d)}},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,b,d){var f,e=0,j=a.length,i=j===w||c.isFunction(a);if(d)if(i)for(f in a){if(b.apply(a[f],
d)===false)break}else for(;e<j;){if(b.apply(a[e++],d)===false)break}else if(i)for(f in a){if(b.call(a[f],f,a[f])===false)break}else for(d=a[0];e<j&&b.call(d,e,d)!==false;d=a[++e]);return a},trim:function(a){return(a||"").replace(Wa,"")},makeArray:function(a,b){b=b||[];if(a!=null)a.length==null||typeof a==="string"||c.isFunction(a)||typeof a!=="function"&&a.setInterval?ba.call(b,a):c.merge(b,a);return b},inArray:function(a,b){if(b.indexOf)return b.indexOf(a);for(var d=0,f=b.length;d<f;d++)if(b[d]===
a)return d;return-1},merge:function(a,b){var d=a.length,f=0;if(typeof b.length==="number")for(var e=b.length;f<e;f++)a[d++]=b[f];else for(;b[f]!==w;)a[d++]=b[f++];a.length=d;return a},grep:function(a,b,d){for(var f=[],e=0,j=a.length;e<j;e++)!d!==!b(a[e],e)&&f.push(a[e]);return f},map:function(a,b,d){for(var f=[],e,j=0,i=a.length;j<i;j++){e=b(a[j],j,d);if(e!=null)f[f.length]=e}return f.concat.apply([],f)},guid:1,proxy:function(a,b,d){if(arguments.length===2)if(typeof b==="string"){d=a;a=d[b];b=w}else if(b&&
!c.isFunction(b)){d=b;b=w}if(!b&&a)b=function(){return a.apply(d||this,arguments)};if(a)b.guid=a.guid=a.guid||b.guid||c.guid++;return b},uaMatch:function(a){a=a.toLowerCase();a=/(webkit)[ \/]([\w.]+)/.exec(a)||/(opera)(?:.*version)?[ \/]([\w.]+)/.exec(a)||/(msie) ([\w.]+)/.exec(a)||!/compatible/.test(a)&&/(mozilla)(?:.*? rv:([\w.]+))?/.exec(a)||[];return{browser:a[1]||"",version:a[2]||"0"}},browser:{}});P=c.uaMatch(P);if(P.browser){c.browser[P.browser]=true;c.browser.version=P.version}if(c.browser.webkit)c.browser.safari=
true;if(ya)c.inArray=function(a,b){return ya.call(b,a)};T=c(s);if(s.addEventListener)L=function(){s.removeEventListener("DOMContentLoaded",L,false);c.ready()};else if(s.attachEvent)L=function(){if(s.readyState==="complete"){s.detachEvent("onreadystatechange",L);c.ready()}};(function(){c.support={};var a=s.documentElement,b=s.createElement("script"),d=s.createElement("div"),f="script"+J();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";
var e=d.getElementsByTagName("*"),j=d.getElementsByTagName("a")[0];if(!(!e||!e.length||!j)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(j.getAttribute("style")),hrefNormalized:j.getAttribute("href")==="/a",opacity:/^0.55$/.test(j.style.opacity),cssFloat:!!j.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:s.createElement("select").appendChild(s.createElement("option")).selected,
parentNode:d.removeChild(d.appendChild(s.createElement("div"))).parentNode===null,deleteExpando:true,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null};b.type="text/javascript";try{b.appendChild(s.createTextNode("window."+f+"=1;"))}catch(i){}a.insertBefore(b,a.firstChild);if(A[f]){c.support.scriptEval=true;delete A[f]}try{delete b.test}catch(o){c.support.deleteExpando=false}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function k(){c.support.noCloneEvent=
false;d.detachEvent("onclick",k)});d.cloneNode(true).fireEvent("onclick")}d=s.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=s.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var k=s.createElement("div");k.style.width=k.style.paddingLeft="1px";s.body.appendChild(k);c.boxModel=c.support.boxModel=k.offsetWidth===2;s.body.removeChild(k).style.display="none"});a=function(k){var n=
s.createElement("div");k="on"+k;var r=k in n;if(!r){n.setAttribute(k,"return;");r=typeof n[k]==="function"}return r};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=e=j=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var G="jQuery"+J(),Ya=0,za={};c.extend({cache:{},expando:G,noData:{embed:true,object:true,
applet:true},data:function(a,b,d){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var f=a[G],e=c.cache;if(!f&&typeof b==="string"&&d===w)return null;f||(f=++Ya);if(typeof b==="object"){a[G]=f;e[f]=c.extend(true,{},b)}else if(!e[f]){a[G]=f;e[f]={}}a=e[f];if(d!==w)a[b]=d;return typeof b==="string"?a[b]:a}},removeData:function(a,b){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var d=a[G],f=c.cache,e=f[d];if(b){if(e){delete e[b];c.isEmptyObject(e)&&c.removeData(a)}}else{if(c.support.deleteExpando)delete a[c.expando];
else a.removeAttribute&&a.removeAttribute(c.expando);delete f[d]}}}});c.fn.extend({data:function(a,b){if(typeof a==="undefined"&&this.length)return c.data(this[0]);else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===w){var f=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(f===w&&this.length)f=c.data(this[0],a);return f===w&&d[1]?this.data(d[0]):f}else return this.trigger("setData"+d[1]+"!",[d[0],b]).each(function(){c.data(this,
a,b)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var f=c.data(a,b);if(!d)return f||[];if(!f||c.isArray(d))f=c.data(a,b,c.makeArray(d));else f.push(d);return f}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),f=d.shift();if(f==="inprogress")f=d.shift();if(f){b==="fx"&&d.unshift("inprogress");f.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===
w)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var Aa=/[\n\t]/g,ca=/\s+/,Za=/\r/g,$a=/href|src|style/,ab=/(button|input)/i,bb=/(button|input|object|select|textarea)/i,
cb=/^(a|area)$/i,Ba=/radio|checkbox/;c.fn.extend({attr:function(a,b){return X(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(n){var r=c(this);r.addClass(a.call(this,n,r.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1)if(e.className){for(var j=" "+e.className+" ",
i=e.className,o=0,k=b.length;o<k;o++)if(j.indexOf(" "+b[o]+" ")<0)i+=" "+b[o];e.className=c.trim(i)}else e.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(k){var n=c(this);n.removeClass(a.call(this,k,n.attr("class")))});if(a&&typeof a==="string"||a===w)for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1&&e.className)if(a){for(var j=(" "+e.className+" ").replace(Aa," "),i=0,o=b.length;i<o;i++)j=j.replace(" "+b[i]+" ",
" ");e.className=c.trim(j)}else e.className=""}return this},toggleClass:function(a,b){var d=typeof a,f=typeof b==="boolean";if(c.isFunction(a))return this.each(function(e){var j=c(this);j.toggleClass(a.call(this,e,j.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var e,j=0,i=c(this),o=b,k=a.split(ca);e=k[j++];){o=f?o:!i.hasClass(e);i[o?"addClass":"removeClass"](e)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=
this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(Aa," ").indexOf(a)>-1)return true;return false},val:function(a){if(a===w){var b=this[0];if(b){if(c.nodeName(b,"option"))return(b.attributes.value||{}).specified?b.value:b.text;if(c.nodeName(b,"select")){var d=b.selectedIndex,f=[],e=b.options;b=b.type==="select-one";if(d<0)return null;var j=b?d:0;for(d=b?d+1:e.length;j<d;j++){var i=
e[j];if(i.selected){a=c(i).val();if(b)return a;f.push(a)}}return f}if(Ba.test(b.type)&&!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Za,"")}return w}var o=c.isFunction(a);return this.each(function(k){var n=c(this),r=a;if(this.nodeType===1){if(o)r=a.call(this,k,n.val());if(typeof r==="number")r+="";if(c.isArray(r)&&Ba.test(this.type))this.checked=c.inArray(n.val(),r)>=0;else if(c.nodeName(this,"select")){var u=c.makeArray(r);c("option",this).each(function(){this.selected=
c.inArray(c(this).val(),u)>=0});if(!u.length)this.selectedIndex=-1}else this.value=r}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,f){if(!a||a.nodeType===3||a.nodeType===8)return w;if(f&&b in c.attrFn)return c(a)[b](d);f=a.nodeType!==1||!c.isXMLDoc(a);var e=d!==w;b=f&&c.props[b]||b;if(a.nodeType===1){var j=$a.test(b);if(b in a&&f&&!j){if(e){b==="type"&&ab.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:bb.test(a.nodeName)||cb.test(a.nodeName)&&a.href?0:w;return a[b]}if(!c.support.style&&f&&b==="style"){if(e)a.style.cssText=""+d;return a.style.cssText}e&&a.setAttribute(b,""+d);a=!c.support.hrefNormalized&&f&&j?a.getAttribute(b,2):a.getAttribute(b);return a===null?w:a}return c.style(a,b,d)}});var O=/\.(.*)$/,db=function(a){return a.replace(/[^\w\s\.\|`]/g,
function(b){return"\\"+b})};c.event={add:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){if(a.setInterval&&a!==A&&!a.frameElement)a=A;var e,j;if(d.handler){e=d;d=e.handler}if(!d.guid)d.guid=c.guid++;if(j=c.data(a)){var i=j.events=j.events||{},o=j.handle;if(!o)j.handle=o=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(o.elem,arguments):w};o.elem=a;b=b.split(" ");for(var k,n=0,r;k=b[n++];){j=e?c.extend({},e):{handler:d,data:f};if(k.indexOf(".")>-1){r=k.split(".");
k=r.shift();j.namespace=r.slice(0).sort().join(".")}else{r=[];j.namespace=""}j.type=k;j.guid=d.guid;var u=i[k],z=c.event.special[k]||{};if(!u){u=i[k]=[];if(!z.setup||z.setup.call(a,f,r,o)===false)if(a.addEventListener)a.addEventListener(k,o,false);else a.attachEvent&&a.attachEvent("on"+k,o)}if(z.add){z.add.call(a,j);if(!j.handler.guid)j.handler.guid=d.guid}u.push(j);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){var e,j=0,i,o,k,n,r,u,z=c.data(a),
C=z&&z.events;if(z&&C){if(b&&b.type){d=b.handler;b=b.type}if(!b||typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(e in C)c.event.remove(a,e+b)}else{for(b=b.split(" ");e=b[j++];){n=e;i=e.indexOf(".")<0;o=[];if(!i){o=e.split(".");e=o.shift();k=new RegExp("(^|\\.)"+c.map(o.slice(0).sort(),db).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(r=C[e])if(d){n=c.event.special[e]||{};for(B=f||0;B<r.length;B++){u=r[B];if(d.guid===u.guid){if(i||k.test(u.namespace)){f==null&&r.splice(B--,1);n.remove&&n.remove.call(a,u)}if(f!=
null)break}}if(r.length===0||f!=null&&r.length===1){if(!n.teardown||n.teardown.call(a,o)===false)Ca(a,e,z.handle);delete C[e]}}else for(var B=0;B<r.length;B++){u=r[B];if(i||k.test(u.namespace)){c.event.remove(a,n,u.handler,B);r.splice(B--,1)}}}if(c.isEmptyObject(C)){if(b=z.handle)b.elem=null;delete z.events;delete z.handle;c.isEmptyObject(z)&&c.removeData(a)}}}}},trigger:function(a,b,d,f){var e=a.type||a;if(!f){a=typeof a==="object"?a[G]?a:c.extend(c.Event(e),a):c.Event(e);if(e.indexOf("!")>=0){a.type=
e=e.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[e]&&c.each(c.cache,function(){this.events&&this.events[e]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return w;a.result=w;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(f=c.data(d,"handle"))&&f.apply(d,b);f=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+e]&&d["on"+e].apply(d,b)===false)a.result=false}catch(j){}if(!a.isPropagationStopped()&&
f)c.event.trigger(a,b,f,true);else if(!a.isDefaultPrevented()){f=a.target;var i,o=c.nodeName(f,"a")&&e==="click",k=c.event.special[e]||{};if((!k._default||k._default.call(d,a)===false)&&!o&&!(f&&f.nodeName&&c.noData[f.nodeName.toLowerCase()])){try{if(f[e]){if(i=f["on"+e])f["on"+e]=null;c.event.triggered=true;f[e]()}}catch(n){}if(i)f["on"+e]=i;c.event.triggered=false}}},handle:function(a){var b,d,f,e;a=arguments[0]=c.event.fix(a||A.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;
if(!b){d=a.type.split(".");a.type=d.shift();f=new RegExp("(^|\\.)"+d.slice(0).sort().join("\\.(?:.*\\.)?")+"(\\.|$)")}e=c.data(this,"events");d=e[a.type];if(e&&d){d=d.slice(0);e=0;for(var j=d.length;e<j;e++){var i=d[e];if(b||f.test(i.namespace)){a.handler=i.handler;a.data=i.data;a.handleObj=i;i=i.handler.apply(this,arguments);if(i!==w){a.result=i;if(i===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
fix:function(a){if(a[G])return a;var b=a;a=c.Event(b);for(var d=this.props.length,f;d;){f=this.props[--d];a[f]=b[f]}if(!a.target)a.target=a.srcElement||s;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=s.documentElement;d=s.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(!a.which&&(a.charCode||a.charCode===0?a.charCode:a.keyCode))a.which=a.charCode||a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==w)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,a.origType,c.extend({},a,{handler:oa}))},remove:function(a){var b=true,d=a.origType.replace(O,"");c.each(c.data(this,
"events").live||[],function(){if(d===this.origType.replace(O,""))return b=false});b&&c.event.remove(this,a.origType,oa)}},beforeunload:{setup:function(a,b,d){if(this.setInterval)this.onbeforeunload=d;return false},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};var Ca=s.removeEventListener?function(a,b,d){a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=
a;this.type=a.type}else this.type=a;this.timeStamp=J();this[G]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=Z;var a=this.originalEvent;if(a){a.preventDefault&&a.preventDefault();a.returnValue=false}},stopPropagation:function(){this.isPropagationStopped=Z;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=Z;this.stopPropagation()},isDefaultPrevented:Y,isPropagationStopped:Y,
isImmediatePropagationStopped:Y};var Da=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},Ea=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?Ea:Da,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?Ea:Da)}}});if(!c.support.submitBubbles)c.event.special.submit=
{setup:function(){if(this.nodeName.toLowerCase()!=="form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length)return na("submit",this,arguments)});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13)return na("submit",this,arguments)})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};
if(!c.support.changeBubbles){var da=/textarea|input|select/i,ea,Fa=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(f){return f.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},fa=function(a,b){var d=a.target,f,e;if(!(!da.test(d.nodeName)||d.readOnly)){f=c.data(d,"_change_data");e=Fa(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",
e);if(!(f===w||e===f))if(f!=null||e){a.type="change";return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:fa,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return fa.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return fa.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,
"_change_data",Fa(a))}},setup:function(){if(this.type==="file")return false;for(var a in ea)c.event.add(this,a+".specialChange",ea[a]);return da.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return da.test(this.nodeName)}};ea=c.event.special.change.filters}s.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(f){f=c.event.fix(f);f.type=b;return c.event.handle.call(this,f)}c.event.special[b]={setup:function(){this.addEventListener(a,
d,true)},teardown:function(){this.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,f,e){if(typeof d==="object"){for(var j in d)this[b](j,f,d[j],e);return this}if(c.isFunction(f)){e=f;f=w}var i=b==="one"?c.proxy(e,function(k){c(this).unbind(k,i);return e.apply(this,arguments)}):e;if(d==="unload"&&b!=="one")this.one(d,f,e);else{j=0;for(var o=this.length;j<o;j++)c.event.add(this[j],d,i,f)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&
!a.preventDefault)for(var d in a)this.unbind(d,a[d]);else{d=0;for(var f=this.length;d<f;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,f){return this.live(b,d,f,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){a=c.Event(a);a.preventDefault();a.stopPropagation();c.event.trigger(a,b,this[0]);return a.result}},
toggle:function(a){for(var b=arguments,d=1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(f){var e=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,e+1);f.preventDefault();return b[e].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Ga={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,f,e,j){var i,o=0,k,n,r=j||this.selector,
u=j?this:c(this.context);if(c.isFunction(f)){e=f;f=w}for(d=(d||"").split(" ");(i=d[o++])!=null;){j=O.exec(i);k="";if(j){k=j[0];i=i.replace(O,"")}if(i==="hover")d.push("mouseenter"+k,"mouseleave"+k);else{n=i;if(i==="focus"||i==="blur"){d.push(Ga[i]+k);i+=k}else i=(Ga[i]||i)+k;b==="live"?u.each(function(){c.event.add(this,pa(i,r),{data:f,selector:r,handler:e,origType:i,origHandler:e,preType:n})}):u.unbind(pa(i,r),e)}}return this}});c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),
function(a,b){c.fn[b]=function(d){return d?this.bind(b,d):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});A.attachEvent&&!A.addEventListener&&A.attachEvent("onunload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});(function(){function a(g){for(var h="",l,m=0;g[m];m++){l=g[m];if(l.nodeType===3||l.nodeType===4)h+=l.nodeValue;else if(l.nodeType!==8)h+=a(l.childNodes)}return h}function b(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];
if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1&&!p){t.sizcache=l;t.sizset=q}if(t.nodeName.toLowerCase()===h){y=t;break}t=t[g]}m[q]=y}}}function d(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1){if(!p){t.sizcache=l;t.sizset=q}if(typeof h!=="string"){if(t===h){y=true;break}}else if(k.filter(h,[t]).length>0){y=t;break}}t=t[g]}m[q]=y}}}var f=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
e=0,j=Object.prototype.toString,i=false,o=true;[0,0].sort(function(){o=false;return 0});var k=function(g,h,l,m){l=l||[];var q=h=h||s;if(h.nodeType!==1&&h.nodeType!==9)return[];if(!g||typeof g!=="string")return l;for(var p=[],v,t,y,S,H=true,M=x(h),I=g;(f.exec(""),v=f.exec(I))!==null;){I=v[3];p.push(v[1]);if(v[2]){S=v[3];break}}if(p.length>1&&r.exec(g))if(p.length===2&&n.relative[p[0]])t=ga(p[0]+p[1],h);else for(t=n.relative[p[0]]?[h]:k(p.shift(),h);p.length;){g=p.shift();if(n.relative[g])g+=p.shift();
t=ga(g,t)}else{if(!m&&p.length>1&&h.nodeType===9&&!M&&n.match.ID.test(p[0])&&!n.match.ID.test(p[p.length-1])){v=k.find(p.shift(),h,M);h=v.expr?k.filter(v.expr,v.set)[0]:v.set[0]}if(h){v=m?{expr:p.pop(),set:z(m)}:k.find(p.pop(),p.length===1&&(p[0]==="~"||p[0]==="+")&&h.parentNode?h.parentNode:h,M);t=v.expr?k.filter(v.expr,v.set):v.set;if(p.length>0)y=z(t);else H=false;for(;p.length;){var D=p.pop();v=D;if(n.relative[D])v=p.pop();else D="";if(v==null)v=h;n.relative[D](y,v,M)}}else y=[]}y||(y=t);y||k.error(D||
g);if(j.call(y)==="[object Array]")if(H)if(h&&h.nodeType===1)for(g=0;y[g]!=null;g++){if(y[g]&&(y[g]===true||y[g].nodeType===1&&E(h,y[g])))l.push(t[g])}else for(g=0;y[g]!=null;g++)y[g]&&y[g].nodeType===1&&l.push(t[g]);else l.push.apply(l,y);else z(y,l);if(S){k(S,q,l,m);k.uniqueSort(l)}return l};k.uniqueSort=function(g){if(B){i=o;g.sort(B);if(i)for(var h=1;h<g.length;h++)g[h]===g[h-1]&&g.splice(h--,1)}return g};k.matches=function(g,h){return k(g,null,null,h)};k.find=function(g,h,l){var m,q;if(!g)return[];
for(var p=0,v=n.order.length;p<v;p++){var t=n.order[p];if(q=n.leftMatch[t].exec(g)){var y=q[1];q.splice(1,1);if(y.substr(y.length-1)!=="\\"){q[1]=(q[1]||"").replace(/\\/g,"");m=n.find[t](q,h,l);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=h.getElementsByTagName("*"));return{set:m,expr:g}};k.filter=function(g,h,l,m){for(var q=g,p=[],v=h,t,y,S=h&&h[0]&&x(h[0]);g&&h.length;){for(var H in n.filter)if((t=n.leftMatch[H].exec(g))!=null&&t[2]){var M=n.filter[H],I,D;D=t[1];y=false;t.splice(1,1);if(D.substr(D.length-
1)!=="\\"){if(v===p)p=[];if(n.preFilter[H])if(t=n.preFilter[H](t,v,l,p,m,S)){if(t===true)continue}else y=I=true;if(t)for(var U=0;(D=v[U])!=null;U++)if(D){I=M(D,t,U,v);var Ha=m^!!I;if(l&&I!=null)if(Ha)y=true;else v[U]=false;else if(Ha){p.push(D);y=true}}if(I!==w){l||(v=p);g=g.replace(n.match[H],"");if(!y)return[];break}}}if(g===q)if(y==null)k.error(g);else break;q=g}return v};k.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=k.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF-]|\\.)+)/,
CLASS:/\.((?:[\w\u00c0-\uFFFF-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},
relative:{"+":function(g,h){var l=typeof h==="string",m=l&&!/\W/.test(h);l=l&&!m;if(m)h=h.toLowerCase();m=0;for(var q=g.length,p;m<q;m++)if(p=g[m]){for(;(p=p.previousSibling)&&p.nodeType!==1;);g[m]=l||p&&p.nodeName.toLowerCase()===h?p||false:p===h}l&&k.filter(h,g,true)},">":function(g,h){var l=typeof h==="string";if(l&&!/\W/.test(h)){h=h.toLowerCase();for(var m=0,q=g.length;m<q;m++){var p=g[m];if(p){l=p.parentNode;g[m]=l.nodeName.toLowerCase()===h?l:false}}}else{m=0;for(q=g.length;m<q;m++)if(p=g[m])g[m]=
l?p.parentNode:p.parentNode===h;l&&k.filter(h,g,true)}},"":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("parentNode",h,m,g,p,l)},"~":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("previousSibling",h,m,g,p,l)}},find:{ID:function(g,h,l){if(typeof h.getElementById!=="undefined"&&!l)return(g=h.getElementById(g[1]))?[g]:[]},NAME:function(g,h){if(typeof h.getElementsByName!=="undefined"){var l=[];
h=h.getElementsByName(g[1]);for(var m=0,q=h.length;m<q;m++)h[m].getAttribute("name")===g[1]&&l.push(h[m]);return l.length===0?null:l}},TAG:function(g,h){return h.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,h,l,m,q,p){g=" "+g[1].replace(/\\/g,"")+" ";if(p)return g;p=0;for(var v;(v=h[p])!=null;p++)if(v)if(q^(v.className&&(" "+v.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))l||m.push(v);else if(l)h[p]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},
CHILD:function(g){if(g[1]==="nth"){var h=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=h[1]+(h[2]||1)-0;g[3]=h[3]-0}g[0]=e++;return g},ATTR:function(g,h,l,m,q,p){h=g[1].replace(/\\/g,"");if(!p&&n.attrMap[h])g[1]=n.attrMap[h];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,h,l,m,q){if(g[1]==="not")if((f.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=k(g[3],null,null,h);else{g=k.filter(g[3],h,l,true^q);l||m.push.apply(m,
g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,h,l){return!!k(l[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},
text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},
setFilters:{first:function(g,h){return h===0},last:function(g,h,l,m){return h===m.length-1},even:function(g,h){return h%2===0},odd:function(g,h){return h%2===1},lt:function(g,h,l){return h<l[3]-0},gt:function(g,h,l){return h>l[3]-0},nth:function(g,h,l){return l[3]-0===h},eq:function(g,h,l){return l[3]-0===h}},filter:{PSEUDO:function(g,h,l,m){var q=h[1],p=n.filters[q];if(p)return p(g,l,h,m);else if(q==="contains")return(g.textContent||g.innerText||a([g])||"").indexOf(h[3])>=0;else if(q==="not"){h=
h[3];l=0;for(m=h.length;l<m;l++)if(h[l]===g)return false;return true}else k.error("Syntax error, unrecognized expression: "+q)},CHILD:function(g,h){var l=h[1],m=g;switch(l){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(l==="first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":l=h[2];var q=h[3];if(l===1&&q===0)return true;h=h[0];var p=g.parentNode;if(p&&(p.sizcache!==h||!g.nodeIndex)){var v=0;for(m=p.firstChild;m;m=
m.nextSibling)if(m.nodeType===1)m.nodeIndex=++v;p.sizcache=h}g=g.nodeIndex-q;return l===0?g===0:g%l===0&&g/l>=0}},ID:function(g,h){return g.nodeType===1&&g.getAttribute("id")===h},TAG:function(g,h){return h==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===h},CLASS:function(g,h){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(h)>-1},ATTR:function(g,h){var l=h[1];g=n.attrHandle[l]?n.attrHandle[l](g):g[l]!=null?g[l]:g.getAttribute(l);l=g+"";var m=h[2];h=h[4];return g==null?m==="!=":m===
"="?l===h:m==="*="?l.indexOf(h)>=0:m==="~="?(" "+l+" ").indexOf(h)>=0:!h?l&&g!==false:m==="!="?l!==h:m==="^="?l.indexOf(h)===0:m==="$="?l.substr(l.length-h.length)===h:m==="|="?l===h||l.substr(0,h.length+1)===h+"-":false},POS:function(g,h,l,m){var q=n.setFilters[h[2]];if(q)return q(g,l,h,m)}}},r=n.match.POS;for(var u in n.match){n.match[u]=new RegExp(n.match[u].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[u]=new RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[u].source.replace(/\\(\d+)/g,function(g,
h){return"\\"+(h-0+1)}))}var z=function(g,h){g=Array.prototype.slice.call(g,0);if(h){h.push.apply(h,g);return h}return g};try{Array.prototype.slice.call(s.documentElement.childNodes,0)}catch(C){z=function(g,h){h=h||[];if(j.call(g)==="[object Array]")Array.prototype.push.apply(h,g);else if(typeof g.length==="number")for(var l=0,m=g.length;l<m;l++)h.push(g[l]);else for(l=0;g[l];l++)h.push(g[l]);return h}}var B;if(s.documentElement.compareDocumentPosition)B=function(g,h){if(!g.compareDocumentPosition||
!h.compareDocumentPosition){if(g==h)i=true;return g.compareDocumentPosition?-1:1}g=g.compareDocumentPosition(h)&4?-1:g===h?0:1;if(g===0)i=true;return g};else if("sourceIndex"in s.documentElement)B=function(g,h){if(!g.sourceIndex||!h.sourceIndex){if(g==h)i=true;return g.sourceIndex?-1:1}g=g.sourceIndex-h.sourceIndex;if(g===0)i=true;return g};else if(s.createRange)B=function(g,h){if(!g.ownerDocument||!h.ownerDocument){if(g==h)i=true;return g.ownerDocument?-1:1}var l=g.ownerDocument.createRange(),m=
h.ownerDocument.createRange();l.setStart(g,0);l.setEnd(g,0);m.setStart(h,0);m.setEnd(h,0);g=l.compareBoundaryPoints(Range.START_TO_END,m);if(g===0)i=true;return g};(function(){var g=s.createElement("div"),h="script"+(new Date).getTime();g.innerHTML="<a name='"+h+"'/>";var l=s.documentElement;l.insertBefore(g,l.firstChild);if(s.getElementById(h)){n.find.ID=function(m,q,p){if(typeof q.getElementById!=="undefined"&&!p)return(q=q.getElementById(m[1]))?q.id===m[1]||typeof q.getAttributeNode!=="undefined"&&
q.getAttributeNode("id").nodeValue===m[1]?[q]:w:[]};n.filter.ID=function(m,q){var p=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&p&&p.nodeValue===q}}l.removeChild(g);l=g=null})();(function(){var g=s.createElement("div");g.appendChild(s.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(h,l){l=l.getElementsByTagName(h[1]);if(h[1]==="*"){h=[];for(var m=0;l[m];m++)l[m].nodeType===1&&h.push(l[m]);l=h}return l};g.innerHTML="<a href='#'></a>";
if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(h){return h.getAttribute("href",2)};g=null})();s.querySelectorAll&&function(){var g=k,h=s.createElement("div");h.innerHTML="<p class='TEST'></p>";if(!(h.querySelectorAll&&h.querySelectorAll(".TEST").length===0)){k=function(m,q,p,v){q=q||s;if(!v&&q.nodeType===9&&!x(q))try{return z(q.querySelectorAll(m),p)}catch(t){}return g(m,q,p,v)};for(var l in g)k[l]=g[l];h=null}}();
(function(){var g=s.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(h,l,m){if(typeof l.getElementsByClassName!=="undefined"&&!m)return l.getElementsByClassName(h[1])};g=null}}})();var E=s.compareDocumentPosition?function(g,h){return!!(g.compareDocumentPosition(h)&16)}:
function(g,h){return g!==h&&(g.contains?g.contains(h):true)},x=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false},ga=function(g,h){var l=[],m="",q;for(h=h.nodeType?[h]:h;q=n.match.PSEUDO.exec(g);){m+=q[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;q=0;for(var p=h.length;q<p;q++)k(g,h[q],l);return k.filter(m,l)};c.find=k;c.expr=k.selectors;c.expr[":"]=c.expr.filters;c.unique=k.uniqueSort;c.text=a;c.isXMLDoc=x;c.contains=E})();var eb=/Until$/,fb=/^(?:parents|prevUntil|prevAll)/,
gb=/,/;R=Array.prototype.slice;var Ia=function(a,b,d){if(c.isFunction(b))return c.grep(a,function(e,j){return!!b.call(e,j,e)===d});else if(b.nodeType)return c.grep(a,function(e){return e===b===d});else if(typeof b==="string"){var f=c.grep(a,function(e){return e.nodeType===1});if(Ua.test(b))return c.filter(b,f,!d);else b=c.filter(b,f)}return c.grep(a,function(e){return c.inArray(e,b)>=0===d})};c.fn.extend({find:function(a){for(var b=this.pushStack("","find",a),d=0,f=0,e=this.length;f<e;f++){d=b.length;
c.find(a,this[f],b);if(f>0)for(var j=d;j<b.length;j++)for(var i=0;i<d;i++)if(b[i]===b[j]){b.splice(j--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,f=b.length;d<f;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(Ia(this,a,false),"not",a)},filter:function(a){return this.pushStack(Ia(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,b){if(c.isArray(a)){var d=[],f=this[0],e,j=
{},i;if(f&&a.length){e=0;for(var o=a.length;e<o;e++){i=a[e];j[i]||(j[i]=c.expr.match.POS.test(i)?c(i,b||this.context):i)}for(;f&&f.ownerDocument&&f!==b;){for(i in j){e=j[i];if(e.jquery?e.index(f)>-1:c(f).is(e)){d.push({selector:i,elem:f});delete j[i]}}f=f.parentNode}}return d}var k=c.expr.match.POS.test(a)?c(a,b||this.context):null;return this.map(function(n,r){for(;r&&r.ownerDocument&&r!==b;){if(k?k.index(r)>-1:c(r).is(a))return r;r=r.parentNode}return null})},index:function(a){if(!a||typeof a===
"string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){a=typeof a==="string"?c(a,b||this.context):c.makeArray(a);b=c.merge(this.get(),a);return this.pushStack(qa(a[0])||qa(b[0])?b:c.unique(b))},andSelf:function(){return this.add(this.prevObject)}});c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",
d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?
a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,f){var e=c.map(this,b,d);eb.test(a)||(f=d);if(f&&typeof f==="string")e=c.filter(f,e);e=this.length>1?c.unique(e):e;if((this.length>1||gb.test(f))&&fb.test(a))e=e.reverse();return this.pushStack(e,a,R.call(arguments).join(","))}});c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return c.find.matches(a,b)},dir:function(a,b,d){var f=[];for(a=a[b];a&&a.nodeType!==9&&(d===w||a.nodeType!==1||!c(a).is(d));){a.nodeType===
1&&f.push(a);a=a[b]}return f},nth:function(a,b,d){b=b||1;for(var f=0;a;a=a[d])if(a.nodeType===1&&++f===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var Ja=/ jQuery\d+="(?:\d+|null)"/g,V=/^\s+/,Ka=/(<([\w:]+)[^>]*?)\/>/g,hb=/^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i,La=/<([\w:]+)/,ib=/<tbody/i,jb=/<|&#?\w+;/,ta=/<script|<object|<embed|<option|<style/i,ua=/checked\s*(?:[^=]|=\s*.checked.)/i,Ma=function(a,b,d){return hb.test(d)?
a:b+"></"+d+">"},F={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]};F.optgroup=F.option;F.tbody=F.tfoot=F.colgroup=F.caption=F.thead;F.th=F.td;if(!c.support.htmlSerialize)F._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=
c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==w)return this.empty().append((this[0]&&this[0].ownerDocument||s).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},
wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},
prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,
this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,f;(f=this[d])!=null;d++)if(!a||c.filter(a,[f]).length){if(!b&&f.nodeType===1){c.cleanData(f.getElementsByTagName("*"));c.cleanData([f])}f.parentNode&&f.parentNode.removeChild(f)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);
return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,f=this.ownerDocument;if(!d){d=f.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(Ja,"").replace(/=([^="'>\s]+\/)>/g,'="$1">').replace(V,"")],f)[0]}else return this.cloneNode(true)});if(a===true){ra(this,b);ra(this.find("*"),b.find("*"))}return b},html:function(a){if(a===w)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(Ja,
""):null;else if(typeof a==="string"&&!ta.test(a)&&(c.support.leadingWhitespace||!V.test(a))&&!F[(La.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Ka,Ma);try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(f){this.empty().append(a)}}else c.isFunction(a)?this.each(function(e){var j=c(this),i=j.html();j.empty().append(function(){return a.call(this,e,i)})}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&
this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),f=d.html();d.replaceWith(a.call(this,b,f))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,true)},domManip:function(a,b,d){function f(u){return c.nodeName(u,"table")?u.getElementsByTagName("tbody")[0]||
u.appendChild(u.ownerDocument.createElement("tbody")):u}var e,j,i=a[0],o=[],k;if(!c.support.checkClone&&arguments.length===3&&typeof i==="string"&&ua.test(i))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(i))return this.each(function(u){var z=c(this);a[0]=i.call(this,u,b?z.html():w);z.domManip(a,b,d)});if(this[0]){e=i&&i.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:sa(a,this,o);k=e.fragment;if(j=k.childNodes.length===
1?(k=k.firstChild):k.firstChild){b=b&&c.nodeName(j,"tr");for(var n=0,r=this.length;n<r;n++)d.call(b?f(this[n],j):this[n],n>0||e.cacheable||this.length>1?k.cloneNode(true):k)}o.length&&c.each(o,Qa)}return this}});c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var f=[];d=c(d);var e=this.length===1&&this[0].parentNode;if(e&&e.nodeType===11&&e.childNodes.length===1&&d.length===1){d[b](this[0]);
return this}else{e=0;for(var j=d.length;e<j;e++){var i=(e>0?this.clone(true):this).get();c.fn[b].apply(c(d[e]),i);f=f.concat(i)}return this.pushStack(f,a,d.selector)}}});c.extend({clean:function(a,b,d,f){b=b||s;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||s;for(var e=[],j=0,i;(i=a[j])!=null;j++){if(typeof i==="number")i+="";if(i){if(typeof i==="string"&&!jb.test(i))i=b.createTextNode(i);else if(typeof i==="string"){i=i.replace(Ka,Ma);var o=(La.exec(i)||["",
""])[1].toLowerCase(),k=F[o]||F._default,n=k[0],r=b.createElement("div");for(r.innerHTML=k[1]+i+k[2];n--;)r=r.lastChild;if(!c.support.tbody){n=ib.test(i);o=o==="table"&&!n?r.firstChild&&r.firstChild.childNodes:k[1]==="<table>"&&!n?r.childNodes:[];for(k=o.length-1;k>=0;--k)c.nodeName(o[k],"tbody")&&!o[k].childNodes.length&&o[k].parentNode.removeChild(o[k])}!c.support.leadingWhitespace&&V.test(i)&&r.insertBefore(b.createTextNode(V.exec(i)[0]),r.firstChild);i=r.childNodes}if(i.nodeType)e.push(i);else e=
c.merge(e,i)}}if(d)for(j=0;e[j];j++)if(f&&c.nodeName(e[j],"script")&&(!e[j].type||e[j].type.toLowerCase()==="text/javascript"))f.push(e[j].parentNode?e[j].parentNode.removeChild(e[j]):e[j]);else{e[j].nodeType===1&&e.splice.apply(e,[j+1,0].concat(c.makeArray(e[j].getElementsByTagName("script"))));d.appendChild(e[j])}return e},cleanData:function(a){for(var b,d,f=c.cache,e=c.event.special,j=c.support.deleteExpando,i=0,o;(o=a[i])!=null;i++)if(d=o[c.expando]){b=f[d];if(b.events)for(var k in b.events)e[k]?
c.event.remove(o,k):Ca(o,k,b.handle);if(j)delete o[c.expando];else o.removeAttribute&&o.removeAttribute(c.expando);delete f[d]}}});var kb=/z-?index|font-?weight|opacity|zoom|line-?height/i,Na=/alpha\([^)]*\)/,Oa=/opacity=([^)]*)/,ha=/float/i,ia=/-([a-z])/ig,lb=/([A-Z])/g,mb=/^-?\d+(?:px)?$/i,nb=/^-?\d/,ob={position:"absolute",visibility:"hidden",display:"block"},pb=["Left","Right"],qb=["Top","Bottom"],rb=s.defaultView&&s.defaultView.getComputedStyle,Pa=c.support.cssFloat?"cssFloat":"styleFloat",ja=
function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){return X(this,a,b,true,function(d,f,e){if(e===w)return c.curCSS(d,f);if(typeof e==="number"&&!kb.test(f))e+="px";c.style(d,f,e)})};c.extend({style:function(a,b,d){if(!a||a.nodeType===3||a.nodeType===8)return w;if((b==="width"||b==="height")&&parseFloat(d)<0)d=w;var f=a.style||a,e=d!==w;if(!c.support.opacity&&b==="opacity"){if(e){f.zoom=1;b=parseInt(d,10)+""==="NaN"?"":"alpha(opacity="+d*100+")";a=f.filter||c.curCSS(a,"filter")||"";f.filter=
Na.test(a)?a.replace(Na,b):b}return f.filter&&f.filter.indexOf("opacity=")>=0?parseFloat(Oa.exec(f.filter)[1])/100+"":""}if(ha.test(b))b=Pa;b=b.replace(ia,ja);if(e)f[b]=d;return f[b]},css:function(a,b,d,f){if(b==="width"||b==="height"){var e,j=b==="width"?pb:qb;function i(){e=b==="width"?a.offsetWidth:a.offsetHeight;f!=="border"&&c.each(j,function(){f||(e-=parseFloat(c.curCSS(a,"padding"+this,true))||0);if(f==="margin")e+=parseFloat(c.curCSS(a,"margin"+this,true))||0;else e-=parseFloat(c.curCSS(a,
"border"+this+"Width",true))||0})}a.offsetWidth!==0?i():c.swap(a,ob,i);return Math.max(0,Math.round(e))}return c.curCSS(a,b,d)},curCSS:function(a,b,d){var f,e=a.style;if(!c.support.opacity&&b==="opacity"&&a.currentStyle){f=Oa.test(a.currentStyle.filter||"")?parseFloat(RegExp.$1)/100+"":"";return f===""?"1":f}if(ha.test(b))b=Pa;if(!d&&e&&e[b])f=e[b];else if(rb){if(ha.test(b))b="float";b=b.replace(lb,"-$1").toLowerCase();e=a.ownerDocument.defaultView;if(!e)return null;if(a=e.getComputedStyle(a,null))f=
a.getPropertyValue(b);if(b==="opacity"&&f==="")f="1"}else if(a.currentStyle){d=b.replace(ia,ja);f=a.currentStyle[b]||a.currentStyle[d];if(!mb.test(f)&&nb.test(f)){b=e.left;var j=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;e.left=d==="fontSize"?"1em":f||0;f=e.pixelLeft+"px";e.left=b;a.runtimeStyle.left=j}}return f},swap:function(a,b,d){var f={};for(var e in b){f[e]=a.style[e];a.style[e]=b[e]}d.call(a);for(e in b)a.style[e]=f[e]}});if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=
a.offsetWidth,d=a.offsetHeight,f=a.nodeName.toLowerCase()==="tr";return b===0&&d===0&&!f?true:b>0&&d>0&&!f?false:c.curCSS(a,"display")==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var sb=J(),tb=/<script(.|\s)*?\/script>/gi,ub=/select|textarea/i,vb=/color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week/i,N=/=\?(&|$)/,ka=/\?/,wb=/(\?|&)_=.*?(&|$)/,xb=/^(\w+:)?\/\/([^\/?#]+)/,yb=/%20/g,zb=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!==
"string")return zb.call(this,a);else if(!this.length)return this;var f=a.indexOf(" ");if(f>=0){var e=a.slice(f,a.length);a=a.slice(0,f)}f="GET";if(b)if(c.isFunction(b)){d=b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);f="POST"}var j=this;c.ajax({url:a,type:f,dataType:"html",data:b,complete:function(i,o){if(o==="success"||o==="notmodified")j.html(e?c("<div />").append(i.responseText.replace(tb,"")).find(e):i.responseText);d&&j.each(d,[i.responseText,o,i])}});return this},
serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||ub.test(this.nodeName)||vb.test(this.type))}).map(function(a,b){a=c(this).val();return a==null?null:c.isArray(a)?c.map(a,function(d){return{name:b.name,value:d}}):{name:b.name,value:a}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),
function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:f})},getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:f})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,
global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:A.XMLHttpRequest&&(A.location.protocol!=="file:"||!A.ActiveXObject)?function(){return new A.XMLHttpRequest}:function(){try{return new A.ActiveXObject("Microsoft.XMLHTTP")}catch(a){}},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},etag:{},ajax:function(a){function b(){e.success&&
e.success.call(k,o,i,x);e.global&&f("ajaxSuccess",[x,e])}function d(){e.complete&&e.complete.call(k,x,i);e.global&&f("ajaxComplete",[x,e]);e.global&&!--c.active&&c.event.trigger("ajaxStop")}function f(q,p){(e.context?c(e.context):c.event).trigger(q,p)}var e=c.extend(true,{},c.ajaxSettings,a),j,i,o,k=a&&a.context||e,n=e.type.toUpperCase();if(e.data&&e.processData&&typeof e.data!=="string")e.data=c.param(e.data,e.traditional);if(e.dataType==="jsonp"){if(n==="GET")N.test(e.url)||(e.url+=(ka.test(e.url)?
"&":"?")+(e.jsonp||"callback")+"=?");else if(!e.data||!N.test(e.data))e.data=(e.data?e.data+"&":"")+(e.jsonp||"callback")+"=?";e.dataType="json"}if(e.dataType==="json"&&(e.data&&N.test(e.data)||N.test(e.url))){j=e.jsonpCallback||"jsonp"+sb++;if(e.data)e.data=(e.data+"").replace(N,"="+j+"$1");e.url=e.url.replace(N,"="+j+"$1");e.dataType="script";A[j]=A[j]||function(q){o=q;b();d();A[j]=w;try{delete A[j]}catch(p){}z&&z.removeChild(C)}}if(e.dataType==="script"&&e.cache===null)e.cache=false;if(e.cache===
false&&n==="GET"){var r=J(),u=e.url.replace(wb,"$1_="+r+"$2");e.url=u+(u===e.url?(ka.test(e.url)?"&":"?")+"_="+r:"")}if(e.data&&n==="GET")e.url+=(ka.test(e.url)?"&":"?")+e.data;e.global&&!c.active++&&c.event.trigger("ajaxStart");r=(r=xb.exec(e.url))&&(r[1]&&r[1]!==location.protocol||r[2]!==location.host);if(e.dataType==="script"&&n==="GET"&&r){var z=s.getElementsByTagName("head")[0]||s.documentElement,C=s.createElement("script");C.src=e.url;if(e.scriptCharset)C.charset=e.scriptCharset;if(!j){var B=
false;C.onload=C.onreadystatechange=function(){if(!B&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){B=true;b();d();C.onload=C.onreadystatechange=null;z&&C.parentNode&&z.removeChild(C)}}}z.insertBefore(C,z.firstChild);return w}var E=false,x=e.xhr();if(x){e.username?x.open(n,e.url,e.async,e.username,e.password):x.open(n,e.url,e.async);try{if(e.data||a&&a.contentType)x.setRequestHeader("Content-Type",e.contentType);if(e.ifModified){c.lastModified[e.url]&&x.setRequestHeader("If-Modified-Since",
c.lastModified[e.url]);c.etag[e.url]&&x.setRequestHeader("If-None-Match",c.etag[e.url])}r||x.setRequestHeader("X-Requested-With","XMLHttpRequest");x.setRequestHeader("Accept",e.dataType&&e.accepts[e.dataType]?e.accepts[e.dataType]+", */*":e.accepts._default)}catch(ga){}if(e.beforeSend&&e.beforeSend.call(k,x,e)===false){e.global&&!--c.active&&c.event.trigger("ajaxStop");x.abort();return false}e.global&&f("ajaxSend",[x,e]);var g=x.onreadystatechange=function(q){if(!x||x.readyState===0||q==="abort"){E||
d();E=true;if(x)x.onreadystatechange=c.noop}else if(!E&&x&&(x.readyState===4||q==="timeout")){E=true;x.onreadystatechange=c.noop;i=q==="timeout"?"timeout":!c.httpSuccess(x)?"error":e.ifModified&&c.httpNotModified(x,e.url)?"notmodified":"success";var p;if(i==="success")try{o=c.httpData(x,e.dataType,e)}catch(v){i="parsererror";p=v}if(i==="success"||i==="notmodified")j||b();else c.handleError(e,x,i,p);d();q==="timeout"&&x.abort();if(e.async)x=null}};try{var h=x.abort;x.abort=function(){x&&h.call(x);
g("abort")}}catch(l){}e.async&&e.timeout>0&&setTimeout(function(){x&&!E&&g("timeout")},e.timeout);try{x.send(n==="POST"||n==="PUT"||n==="DELETE"?e.data:null)}catch(m){c.handleError(e,x,null,m);d()}e.async||g();return x}},handleError:function(a,b,d,f){if(a.error)a.error.call(a.context||a,b,d,f);if(a.global)(a.context?c(a.context):c.event).trigger("ajaxError",[b,a,f])},active:0,httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===
1223||a.status===0}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),f=a.getResponseHeader("Etag");if(d)c.lastModified[b]=d;if(f)c.etag[b]=f;return a.status===304||a.status===0},httpData:function(a,b,d){var f=a.getResponseHeader("content-type")||"",e=b==="xml"||!b&&f.indexOf("xml")>=0;a=e?a.responseXML:a.responseText;e&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b===
"json"||!b&&f.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&f.indexOf("javascript")>=0)c.globalEval(a);return a},param:function(a,b){function d(i,o){if(c.isArray(o))c.each(o,function(k,n){b||/\[\]$/.test(i)?f(i,n):d(i+"["+(typeof n==="object"||c.isArray(n)?k:"")+"]",n)});else!b&&o!=null&&typeof o==="object"?c.each(o,function(k,n){d(i+"["+k+"]",n)}):f(i,o)}function f(i,o){o=c.isFunction(o)?o():o;e[e.length]=encodeURIComponent(i)+"="+encodeURIComponent(o)}var e=[];if(b===w)b=c.ajaxSettings.traditional;
if(c.isArray(a)||a.jquery)c.each(a,function(){f(this.name,this.value)});else for(var j in a)d(j,a[j]);return e.join("&").replace(yb,"+")}});var la={},Ab=/toggle|show|hide/,Bb=/^([+-]=)?([\d+-.]+)(.*)$/,W,va=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b){if(a||a===0)return this.animate(K("show",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");
this[a].style.display=d||"";if(c.css(this[a],"display")==="none"){d=this[a].nodeName;var f;if(la[d])f=la[d];else{var e=c("<"+d+" />").appendTo("body");f=e.css("display");if(f==="none")f="block";e.remove();la[d]=f}c.data(this[a],"olddisplay",f)}}a=0;for(b=this.length;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b){if(a||a===0)return this.animate(K("hide",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");!d&&d!=="none"&&c.data(this[a],
"olddisplay",c.css(this[a],"display"))}a=0;for(b=this.length;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b){var d=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||d?this.each(function(){var f=d?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(K("toggle",3),a,b);return this},fadeTo:function(a,b,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d)},
animate:function(a,b,d,f){var e=c.speed(b,d,f);if(c.isEmptyObject(a))return this.each(e.complete);return this[e.queue===false?"each":"queue"](function(){var j=c.extend({},e),i,o=this.nodeType===1&&c(this).is(":hidden"),k=this;for(i in a){var n=i.replace(ia,ja);if(i!==n){a[n]=a[i];delete a[i];i=n}if(a[i]==="hide"&&o||a[i]==="show"&&!o)return j.complete.call(this);if((i==="height"||i==="width")&&this.style){j.display=c.css(this,"display");j.overflow=this.style.overflow}if(c.isArray(a[i])){(j.specialEasing=
j.specialEasing||{})[i]=a[i][1];a[i]=a[i][0]}}if(j.overflow!=null)this.style.overflow="hidden";j.curAnim=c.extend({},a);c.each(a,function(r,u){var z=new c.fx(k,j,r);if(Ab.test(u))z[u==="toggle"?o?"show":"hide":u](a);else{var C=Bb.exec(u),B=z.cur(true)||0;if(C){u=parseFloat(C[2]);var E=C[3]||"px";if(E!=="px"){k.style[r]=(u||1)+E;B=(u||1)/z.cur(true)*B;k.style[r]=B+E}if(C[1])u=(C[1]==="-="?-1:1)*u+B;z.custom(B,u,E)}else z.custom(B,u,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);
this.each(function(){for(var f=d.length-1;f>=0;f--)if(d[f].elem===this){b&&d[f](true);d.splice(f,1)}});b||this.dequeue();return this}});c.each({slideDown:K("show",1),slideUp:K("hide",1),slideToggle:K("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,f){return this.animate(b,d,f)}});c.extend({speed:function(a,b,d){var f=a&&typeof a==="object"?a:{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};f.duration=c.fx.off?0:typeof f.duration===
"number"?f.duration:c.fx.speeds[f.duration]||c.fx.speeds._default;f.old=f.complete;f.complete=function(){f.queue!==false&&c(this).dequeue();c.isFunction(f.old)&&f.old.call(this)};return f},easing:{linear:function(a,b,d,f){return d+f*a},swing:function(a,b,d,f){return(-Math.cos(a*Math.PI)/2+0.5)*f+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||
c.fx.step._default)(this);if((this.prop==="height"||this.prop==="width")&&this.elem.style)this.elem.style.display="block"},cur:function(a){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];return(a=parseFloat(c.css(this.elem,this.prop,a)))&&a>-10000?a:parseFloat(c.curCSS(this.elem,this.prop))||0},custom:function(a,b,d){function f(j){return e.step(j)}this.startTime=J();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;
this.pos=this.state=0;var e=this;f.elem=this.elem;if(f()&&c.timers.push(f)&&!W)W=setInterval(c.fx.tick,13)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;this.custom(this.cur(),0)},step:function(a){var b=J(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=
this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var f in this.options.curAnim)if(this.options.curAnim[f]!==true)d=false;if(d){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;a=c.data(this.elem,"olddisplay");this.elem.style.display=a?a:this.options.display;if(c.css(this.elem,"display")==="none")this.elem.style.display="block"}this.options.hide&&c(this.elem).hide();if(this.options.hide||this.options.show)for(var e in this.options.curAnim)c.style(this.elem,
e,this.options.orig[e]);this.options.complete.call(this.elem)}return false}else{e=b-this.startTime;this.state=e/this.options.duration;a=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||a](this.state,e,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||
c.fx.stop()},stop:function(){clearInterval(W);W=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===b.elem}).length};c.fn.offset="getBoundingClientRect"in s.documentElement?
function(a){var b=this[0];if(a)return this.each(function(e){c.offset.setOffset(this,a,e)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);var d=b.getBoundingClientRect(),f=b.ownerDocument;b=f.body;f=f.documentElement;return{top:d.top+(self.pageYOffset||c.support.boxModel&&f.scrollTop||b.scrollTop)-(f.clientTop||b.clientTop||0),left:d.left+(self.pageXOffset||c.support.boxModel&&f.scrollLeft||b.scrollLeft)-(f.clientLeft||b.clientLeft||0)}}:function(a){var b=
this[0];if(a)return this.each(function(r){c.offset.setOffset(this,a,r)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,f=b,e=b.ownerDocument,j,i=e.documentElement,o=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle;for(var k=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==o&&b!==i;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;j=e?e.getComputedStyle(b,null):b.currentStyle;
k-=b.scrollTop;n-=b.scrollLeft;if(b===d){k+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(b.nodeName))){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=d;d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&j.overflow!=="visible"){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=j}if(f.position==="relative"||f.position==="static"){k+=o.offsetTop;n+=o.offsetLeft}if(c.offset.supportsFixedPosition&&
f.position==="fixed"){k+=Math.max(i.scrollTop,o.scrollTop);n+=Math.max(i.scrollLeft,o.scrollLeft)}return{top:k,left:n}};c.offset={initialize:function(){var a=s.body,b=s.createElement("div"),d,f,e,j=parseFloat(c.curCSS(a,"marginTop",true))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";
a.insertBefore(b,a.firstChild);d=b.firstChild;f=d.firstChild;e=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=f.offsetTop!==5;this.doesAddBorderForTableAndCells=e.offsetTop===5;f.style.position="fixed";f.style.top="20px";this.supportsFixedPosition=f.offsetTop===20||f.offsetTop===15;f.style.position=f.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=f.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==j;a.removeChild(b);
c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.curCSS(a,"marginTop",true))||0;d+=parseFloat(c.curCSS(a,"marginLeft",true))||0}return{top:b,left:d}},setOffset:function(a,b,d){if(/static/.test(c.curCSS(a,"position")))a.style.position="relative";var f=c(a),e=f.offset(),j=parseInt(c.curCSS(a,"top",true),10)||0,i=parseInt(c.curCSS(a,"left",true),10)||0;if(c.isFunction(b))b=b.call(a,
d,e);d={top:b.top-e.top+j,left:b.left-e.left+i};"using"in b?b.using.call(a,d):f.css(d)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),f=/^body|html$/i.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.curCSS(a,"marginTop",true))||0;d.left-=parseFloat(c.curCSS(a,"marginLeft",true))||0;f.top+=parseFloat(c.curCSS(b[0],"borderTopWidth",true))||0;f.left+=parseFloat(c.curCSS(b[0],"borderLeftWidth",true))||0;return{top:d.top-
f.top,left:d.left-f.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||s.body;a&&!/^body|html$/i.test(a.nodeName)&&c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(f){var e=this[0],j;if(!e)return null;if(f!==w)return this.each(function(){if(j=wa(this))j.scrollTo(!a?f:c(j).scrollLeft(),a?f:c(j).scrollTop());else this[d]=f});else return(j=wa(e))?"pageXOffset"in j?j[a?"pageYOffset":
"pageXOffset"]:c.support.boxModel&&j.document.documentElement[d]||j.document.body[d]:e[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();c.fn["inner"+b]=function(){return this[0]?c.css(this[0],d,false,"padding"):null};c.fn["outer"+b]=function(f){return this[0]?c.css(this[0],d,false,f?"margin":"border"):null};c.fn[d]=function(f){var e=this[0];if(!e)return f==null?null:this;if(c.isFunction(f))return this.each(function(j){var i=c(this);i[d](f.call(this,j,i[d]()))});return"scrollTo"in
e&&e.document?e.document.compatMode==="CSS1Compat"&&e.document.documentElement["client"+b]||e.document.body["client"+b]:e.nodeType===9?Math.max(e.documentElement["client"+b],e.body["scroll"+b],e.documentElement["scroll"+b],e.body["offset"+b],e.documentElement["offset"+b]):f===w?c.css(e,d):this.css(d,typeof f==="string"?f:f+"px")}});A.jQuery=A.$=c})(window);
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/_static/jquery.js | jquery.js |
/*
* doctools.js
* ~~~~~~~~~~~
*
* Sphinx JavaScript utilties for all documentation.
*
* :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/**
* select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
}
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s == 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* small function to check if an array contains
* a given item.
*/
jQuery.contains = function(arr, item) {
for (var i = 0; i < arr.length; i++) {
if (arr[i] == item)
return true;
}
return false;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node) {
if (node.nodeType == 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
var span = document.createElement("span");
span.className = className;
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this);
});
}
}
return this.each(function() {
highlight(this);
});
};
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated == 'undefined')
return string;
return (typeof translated == 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated == 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<li class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></li>')
.appendTo($('.sidebar .this-page-menu'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('.sidebar .this-page-menu li.highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this == '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
});
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/_static/doctools.js | doctools.js |
/*
* sidebar.js
* ~~~~~~~~~~
*
* This script makes the Sphinx sidebar collapsible.
*
* .sphinxsidebar contains .sphinxsidebarwrapper. This script adds
* in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton
* used to collapse and expand the sidebar.
*
* When the sidebar is collapsed the .sphinxsidebarwrapper is hidden
* and the width of the sidebar and the margin-left of the document
* are decreased. When the sidebar is expanded the opposite happens.
* This script saves a per-browser/per-session cookie used to
* remember the position of the sidebar among the pages.
* Once the browser is closed the cookie is deleted and the position
* reset to the default (expanded).
*
* :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
$(function() {
// global elements used by the functions.
// the 'sidebarbutton' element is defined as global after its
// creation, in the add_sidebar_button function
var bodywrapper = $('.bodywrapper');
var sidebar = $('.sphinxsidebar');
var sidebarwrapper = $('.sphinxsidebarwrapper');
// original margin-left of the bodywrapper and width of the sidebar
// with the sidebar expanded
var bw_margin_expanded = bodywrapper.css('margin-left');
var ssb_width_expanded = sidebar.width();
// margin-left of the bodywrapper and width of the sidebar
// with the sidebar collapsed
var bw_margin_collapsed = '.8em';
var ssb_width_collapsed = '.8em';
// colors used by the current theme
var dark_color = $('.related').css('background-color');
var light_color = $('.document').css('background-color');
function sidebar_is_collapsed() {
return sidebarwrapper.is(':not(:visible)');
}
function toggle_sidebar() {
if (sidebar_is_collapsed())
expand_sidebar();
else
collapse_sidebar();
}
function collapse_sidebar() {
sidebarwrapper.hide();
sidebar.css('width', ssb_width_collapsed);
bodywrapper.css('margin-left', bw_margin_collapsed);
sidebarbutton.css({
'margin-left': '0',
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('»');
sidebarbutton.attr('title', _('Expand sidebar'));
document.cookie = 'sidebar=collapsed';
}
function expand_sidebar() {
bodywrapper.css('margin-left', bw_margin_expanded);
sidebar.css('width', ssb_width_expanded);
sidebarwrapper.show();
sidebarbutton.css({
'margin-left': ssb_width_expanded-12,
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('«');
sidebarbutton.attr('title', _('Collapse sidebar'));
document.cookie = 'sidebar=expanded';
}
function add_sidebar_button() {
sidebarwrapper.css({
'float': 'left',
'margin-right': '0',
'width': ssb_width_expanded - 28
});
// create the button
sidebar.append(
'<div id="sidebarbutton"><span>«</span></div>'
);
var sidebarbutton = $('#sidebarbutton');
light_color = sidebarbutton.css('background-color');
// find the height of the viewport to center the '<<' in the page
var viewport_height;
if (window.innerHeight)
viewport_height = window.innerHeight;
else
viewport_height = $(window).height();
sidebarbutton.find('span').css({
'display': 'block',
'margin-top': (viewport_height - sidebar.position().top - 20) / 2
});
sidebarbutton.click(toggle_sidebar);
sidebarbutton.attr('title', _('Collapse sidebar'));
sidebarbutton.css({
'color': '#FFFFFF',
'border-left': '1px solid ' + dark_color,
'font-size': '1.2em',
'cursor': 'pointer',
'height': bodywrapper.height(),
'padding-top': '1px',
'margin-left': ssb_width_expanded - 12
});
sidebarbutton.hover(
function () {
$(this).css('background-color', dark_color);
},
function () {
$(this).css('background-color', light_color);
}
);
}
function set_position_from_cookie() {
if (!document.cookie)
return;
var items = document.cookie.split(';');
for(var k=0; k<items.length; k++) {
var key_val = items[k].split('=');
var key = key_val[0];
if (key == 'sidebar') {
var value = key_val[1];
if ((value == 'collapsed') && (!sidebar_is_collapsed()))
collapse_sidebar();
else if ((value == 'expanded') && (sidebar_is_collapsed()))
expand_sidebar();
}
}
}
add_sidebar_button();
var sidebarbutton = $('#sidebarbutton');
set_position_from_cookie();
});
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/_static/sidebar.js | sidebar.js |
(function(){var j=this,n=j._,i=function(a){this._wrapped=a},m=typeof StopIteration!=="undefined"?StopIteration:"__break__",b=j._=function(a){return new i(a)};if(typeof exports!=="undefined")exports._=b;var k=Array.prototype.slice,o=Array.prototype.unshift,p=Object.prototype.toString,q=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;b.VERSION="0.5.5";b.each=function(a,c,d){try{if(a.forEach)a.forEach(c,d);else if(b.isArray(a)||b.isArguments(a))for(var e=0,f=a.length;e<f;e++)c.call(d,
a[e],e,a);else{var g=b.keys(a);f=g.length;for(e=0;e<f;e++)c.call(d,a[g[e]],g[e],a)}}catch(h){if(h!=m)throw h;}return a};b.map=function(a,c,d){if(a&&b.isFunction(a.map))return a.map(c,d);var e=[];b.each(a,function(f,g,h){e.push(c.call(d,f,g,h))});return e};b.reduce=function(a,c,d,e){if(a&&b.isFunction(a.reduce))return a.reduce(b.bind(d,e),c);b.each(a,function(f,g,h){c=d.call(e,c,f,g,h)});return c};b.reduceRight=function(a,c,d,e){if(a&&b.isFunction(a.reduceRight))return a.reduceRight(b.bind(d,e),c);
var f=b.clone(b.toArray(a)).reverse();b.each(f,function(g,h){c=d.call(e,c,g,h,a)});return c};b.detect=function(a,c,d){var e;b.each(a,function(f,g,h){if(c.call(d,f,g,h)){e=f;b.breakLoop()}});return e};b.select=function(a,c,d){if(a&&b.isFunction(a.filter))return a.filter(c,d);var e=[];b.each(a,function(f,g,h){c.call(d,f,g,h)&&e.push(f)});return e};b.reject=function(a,c,d){var e=[];b.each(a,function(f,g,h){!c.call(d,f,g,h)&&e.push(f)});return e};b.all=function(a,c,d){c=c||b.identity;if(a&&b.isFunction(a.every))return a.every(c,
d);var e=true;b.each(a,function(f,g,h){(e=e&&c.call(d,f,g,h))||b.breakLoop()});return e};b.any=function(a,c,d){c=c||b.identity;if(a&&b.isFunction(a.some))return a.some(c,d);var e=false;b.each(a,function(f,g,h){if(e=c.call(d,f,g,h))b.breakLoop()});return e};b.include=function(a,c){if(b.isArray(a))return b.indexOf(a,c)!=-1;var d=false;b.each(a,function(e){if(d=e===c)b.breakLoop()});return d};b.invoke=function(a,c){var d=b.rest(arguments,2);return b.map(a,function(e){return(c?e[c]:e).apply(e,d)})};b.pluck=
function(a,c){return b.map(a,function(d){return d[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);var e={computed:-Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;g>=e.computed&&(e={value:f,computed:g})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);var e={computed:Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;g<e.computed&&(e={value:f,computed:g})});return e.value};b.sortBy=function(a,c,d){return b.pluck(b.map(a,
function(e,f,g){return{value:e,criteria:c.call(d,e,f,g)}}).sort(function(e,f){e=e.criteria;f=f.criteria;return e<f?-1:e>f?1:0}),"value")};b.sortedIndex=function(a,c,d){d=d||b.identity;for(var e=0,f=a.length;e<f;){var g=e+f>>1;d(a[g])<d(c)?(e=g+1):(f=g)}return e};b.toArray=function(a){if(!a)return[];if(a.toArray)return a.toArray();if(b.isArray(a))return a;if(b.isArguments(a))return k.call(a);return b.values(a)};b.size=function(a){return b.toArray(a).length};b.first=function(a,c,d){return c&&!d?k.call(a,
0,c):a[0]};b.rest=function(a,c,d){return k.call(a,b.isUndefined(c)||d?1:c)};b.last=function(a){return a[a.length-1]};b.compact=function(a){return b.select(a,function(c){return!!c})};b.flatten=function(a){return b.reduce(a,[],function(c,d){if(b.isArray(d))return c.concat(b.flatten(d));c.push(d);return c})};b.without=function(a){var c=b.rest(arguments);return b.select(a,function(d){return!b.include(c,d)})};b.uniq=function(a,c){return b.reduce(a,[],function(d,e,f){if(0==f||(c===true?b.last(d)!=e:!b.include(d,
e)))d.push(e);return d})};b.intersect=function(a){var c=b.rest(arguments);return b.select(b.uniq(a),function(d){return b.all(c,function(e){return b.indexOf(e,d)>=0})})};b.zip=function(){for(var a=b.toArray(arguments),c=b.max(b.pluck(a,"length")),d=new Array(c),e=0;e<c;e++)d[e]=b.pluck(a,String(e));return d};b.indexOf=function(a,c){if(a.indexOf)return a.indexOf(c);for(var d=0,e=a.length;d<e;d++)if(a[d]===c)return d;return-1};b.lastIndexOf=function(a,c){if(a.lastIndexOf)return a.lastIndexOf(c);for(var d=
a.length;d--;)if(a[d]===c)return d;return-1};b.range=function(a,c,d){var e=b.toArray(arguments),f=e.length<=1;a=f?0:e[0];c=f?e[0]:e[1];d=e[2]||1;e=Math.ceil((c-a)/d);if(e<=0)return[];e=new Array(e);f=a;for(var g=0;1;f+=d){if((d>0?f-c:c-f)>=0)return e;e[g++]=f}};b.bind=function(a,c){var d=b.rest(arguments,2);return function(){return a.apply(c||j,d.concat(b.toArray(arguments)))}};b.bindAll=function(a){var c=b.rest(arguments);if(c.length==0)c=b.functions(a);b.each(c,function(d){a[d]=b.bind(a[d],a)});
return a};b.delay=function(a,c){var d=b.rest(arguments,2);return setTimeout(function(){return a.apply(a,d)},c)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(b.rest(arguments)))};b.wrap=function(a,c){return function(){var d=[a].concat(b.toArray(arguments));return c.apply(c,d)}};b.compose=function(){var a=b.toArray(arguments);return function(){for(var c=b.toArray(arguments),d=a.length-1;d>=0;d--)c=[a[d].apply(this,c)];return c[0]}};b.keys=function(a){if(b.isArray(a))return b.range(0,a.length);
var c=[];for(var d in a)q.call(a,d)&&c.push(d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=function(a){return b.select(b.keys(a),function(c){return b.isFunction(a[c])}).sort()};b.extend=function(a,c){for(var d in c)a[d]=c[d];return a};b.clone=function(a){if(b.isArray(a))return a.slice(0);return b.extend({},a)};b.tap=function(a,c){c(a);return a};b.isEqual=function(a,c){if(a===c)return true;var d=typeof a;if(d!=typeof c)return false;if(a==c)return true;if(!a&&c||a&&!c)return false;
if(a.isEqual)return a.isEqual(c);if(b.isDate(a)&&b.isDate(c))return a.getTime()===c.getTime();if(b.isNaN(a)&&b.isNaN(c))return true;if(b.isRegExp(a)&&b.isRegExp(c))return a.source===c.source&&a.global===c.global&&a.ignoreCase===c.ignoreCase&&a.multiline===c.multiline;if(d!=="object")return false;if(a.length&&a.length!==c.length)return false;d=b.keys(a);var e=b.keys(c);if(d.length!=e.length)return false;for(var f in a)if(!b.isEqual(a[f],c[f]))return false;return true};b.isEmpty=function(a){return b.keys(a).length==
0};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=function(a){return!!(a&&a.concat&&a.unshift)};b.isArguments=function(a){return a&&b.isNumber(a.length)&&!b.isArray(a)&&!r.call(a,"length")};b.isFunction=function(a){return!!(a&&a.constructor&&a.call&&a.apply)};b.isString=function(a){return!!(a===""||a&&a.charCodeAt&&a.substr)};b.isNumber=function(a){return p.call(a)==="[object Number]"};b.isDate=function(a){return!!(a&&a.getTimezoneOffset&&a.setUTCFullYear)};b.isRegExp=function(a){return!!(a&&
a.test&&a.exec&&(a.ignoreCase||a.ignoreCase===false))};b.isNaN=function(a){return b.isNumber(a)&&isNaN(a)};b.isNull=function(a){return a===null};b.isUndefined=function(a){return typeof a=="undefined"};b.noConflict=function(){j._=n;return this};b.identity=function(a){return a};b.breakLoop=function(){throw m;};var s=0;b.uniqueId=function(a){var c=s++;return a?a+c:c};b.template=function(a,c){a=new Function("obj","var p=[],print=function(){p.push.apply(p,arguments);};with(obj){p.push('"+a.replace(/[\r\t\n]/g,
" ").replace(/'(?=[^%]*%>)/g,"\t").split("'").join("\\'").split("\t").join("'").replace(/<%=(.+?)%>/g,"',$1,'").split("<%").join("');").split("%>").join("p.push('")+"');}return p.join('');");return c?a(c):a};b.forEach=b.each;b.foldl=b.inject=b.reduce;b.foldr=b.reduceRight;b.filter=b.select;b.every=b.all;b.some=b.any;b.head=b.first;b.tail=b.rest;b.methods=b.functions;var l=function(a,c){return c?b(a).chain():a};b.each(b.functions(b),function(a){var c=b[a];i.prototype[a]=function(){var d=b.toArray(arguments);
o.call(d,this._wrapped);return l(c.apply(b,d),this._chain)}});b.each(["pop","push","reverse","shift","sort","splice","unshift"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){c.apply(this._wrapped,arguments);return l(this._wrapped,this._chain)}});b.each(["concat","join","slice"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){return l(c.apply(this._wrapped,arguments),this._chain)}});i.prototype.chain=function(){this._chain=true;return this};i.prototype.value=function(){return this._wrapped}})();
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/_static/underscore.js | underscore.js |
/*
* searchtools.js
* ~~~~~~~~~~~~~~
*
* Sphinx JavaScript utilties for the full-text search.
*
* :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words, hlwords is the list of normal, unstemmed
* words. the first one is used to find the occurance, the
* latter for highlighting it.
*/
jQuery.makeSearchSummary = function(text, keywords, hlwords) {
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
});
start = Math.max(start - 120, 0);
var excerpt = ((start > 0) ? '...' : '') +
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<div class="context"></div>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
});
return rv;
}
/**
* Porter Stemmer
*/
var PorterStemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
/**
* Search Module
*/
var Search = {
_index : null,
_queued_query : null,
_pulse_status : -1,
init : function() {
var params = $.getQueryParameters();
if (params.q) {
var query = params.q[0];
$('input[name="q"]')[0].value = query;
this.performSearch(query);
}
},
loadIndex : function(url) {
$.ajax({type: "GET", url: url, data: null, success: null,
dataType: "script", cache: true});
},
setIndex : function(index) {
var q;
this._index = index;
if ((q = this._queued_query) !== null) {
this._queued_query = null;
Search.query(q);
}
},
hasIndex : function() {
return this._index !== null;
},
deferQuery : function(query) {
this._queued_query = query;
},
stopPulse : function() {
this._pulse_status = 0;
},
startPulse : function() {
if (this._pulse_status >= 0)
return;
function pulse() {
Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = '';
for (var i = 0; i < Search._pulse_status; i++)
dotString += '.';
Search.dots.text(dotString);
if (Search._pulse_status > -1)
window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something
*/
performSearch : function(query) {
// create the required interface elements
this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p style="display: none"></p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...'));
this.startPulse();
// index already loaded, the browser was quick!
if (this.hasIndex())
this.query(query);
else
this.deferQuery(query);
},
query : function(query) {
var stopwords = ['and', 'then', 'into', 'it', 'as', 'are', 'in',
'if', 'for', 'no', 'there', 'their', 'was', 'is',
'be', 'to', 'that', 'but', 'they', 'not', 'such',
'with', 'by', 'a', 'on', 'these', 'of', 'will',
'this', 'near', 'the', 'or', 'at'];
// stem the searchterms and add them to the correct list
var stemmer = new PorterStemmer();
var searchterms = [];
var excluded = [];
var hlterms = [];
var tmp = query.split(/\s+/);
var object = (tmp.length == 1) ? tmp[0].toLowerCase() : null;
for (var i = 0; i < tmp.length; i++) {
if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) ||
tmp[i] == "") {
// skip this "word"
continue;
}
// stem the word
var word = stemmer.stemWord(tmp[i]).toLowerCase();
// select the correct list
if (word[0] == '-') {
var toAppend = excluded;
word = word.substr(1);
}
else {
var toAppend = searchterms;
hlterms.push(tmp[i].toLowerCase());
}
// only add if not already in the list
if (!$.contains(toAppend, word))
toAppend.push(word);
};
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
// console.debug('SEARCH: searching for:');
// console.info('required: ', searchterms);
// console.info('excluded: ', excluded);
// prepare search
var filenames = this._index.filenames;
var titles = this._index.titles;
var terms = this._index.terms;
var objects = this._index.objects;
var objtypes = this._index.objtypes;
var objnames = this._index.objnames;
var fileMap = {};
var files = null;
// different result priorities
var importantResults = [];
var objectResults = [];
var regularResults = [];
var unimportantResults = [];
$('#search-progress').empty();
// lookup as object
if (object != null) {
for (var prefix in objects) {
for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name;
if (fullname.toLowerCase().indexOf(object) > -1) {
match = objects[prefix][name];
descr = objnames[match[1]] + _(', in ') + titles[match[0]];
// XXX the generated anchors are not generally correct
// XXX there may be custom prefixes
result = [filenames[match[0]], fullname, '#'+fullname, descr];
switch (match[2]) {
case 1: objectResults.push(result); break;
case 0: importantResults.push(result); break;
case 2: unimportantResults.push(result); break;
}
}
}
}
}
// sort results descending
objectResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
importantResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
unimportantResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
// perform the search on the required terms
for (var i = 0; i < searchterms.length; i++) {
var word = searchterms[i];
// no match but word was a required one
if ((files = terms[word]) == null)
break;
if (files.length == undefined) {
files = [files];
}
// create the mapping
for (var j = 0; j < files.length; j++) {
var file = files[j];
if (file in fileMap)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
}
// now check if the files don't contain excluded terms
for (var file in fileMap) {
var valid = true;
// check if all requirements are matched
if (fileMap[file].length != searchterms.length)
continue;
// ensure that none of the excluded terms is in the
// search result.
for (var i = 0; i < excluded.length; i++) {
if (terms[excluded[i]] == file ||
$.contains(terms[excluded[i]] || [], file)) {
valid = false;
break;
}
}
// if we have still a valid result we can add it
// to the result list
if (valid)
regularResults.push([filenames[file], titles[file], '', null]);
}
// delete unused variables in order to not waste
// memory until list is retrieved completely
delete filenames, titles, terms;
// now sort the regular results descending by title
regularResults.sort(function(a, b) {
var left = a[1].toLowerCase();
var right = b[1].toLowerCase();
return (left > right) ? -1 : ((left < right) ? 1 : 0);
});
// combine all results
var results = unimportantResults.concat(regularResults)
.concat(objectResults).concat(importantResults);
// print the results
var resultCount = results.length;
function displayNextItem() {
// results left, load the summary and display it
if (results.length) {
var item = results.pop();
var listItem = $('<li style="display:none"></li>');
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') {
// dirhtml builder
var dirname = item[0] + '/';
if (dirname.match(/\/index\/$/)) {
dirname = dirname.substring(0, dirname.length-6);
} else if (dirname == 'index/') {
dirname = '';
}
listItem.append($('<a/>').attr('href',
DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
highlightstring + item[2]).html(item[1]));
} else {
// normal html builders
listItem.append($('<a/>').attr('href',
item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
highlightstring + item[2]).html(item[1]));
}
if (item[3]) {
listItem.append($('<span> (' + item[3] + ')</span>'));
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
$.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
item[0] + '.txt', function(data) {
if (data != '') {
listItem.append($.makeSearchSummary(data, searchterms, hlterms));
Search.output.append(listItem);
}
listItem.slideDown(5, function() {
displayNextItem();
});
});
} else {
// no source available, just display title
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
}
}
// search finished, update title and status message
else {
Search.stopPulse();
Search.title.text(_('Search Results'));
if (!resultCount)
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
else
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
Search.status.fadeIn(500);
}
}
displayNextItem();
}
}
$(document).ready(function() {
Search.init();
});
| AChemKit | /AChemKit-0.3.0.tar.gz/AChemKit-0.3.0/doc/html/_static/searchtools.js | searchtools.js |
# A simple base client for handling responses from discord
import asyncio
import typing
import warnings
from zlib import compress
import acord
import sys
import traceback
from inspect import iscoroutinefunction
from acord.core.decoders import ETF, JSON, decompressResponse
from acord.core.signals import gateway
from .core.http import HTTPClient
from .errors import *
from functools import wraps
from typing import (
Union, Callable
)
from acord.models import User
class Client(object):
"""
Client for interacting with the discord API
Parameters
----------
loop: :class:`~asyncio.AbstractEventLoop`
An existing loop to run the client off of
token: :class:`str`
Your API Token which can be generated at the developer portal
tokenType: typing.Union[BEARER, BOT]
The token type, which controls the payload data and restrictions.
.. warning::
If BEARER, do not use the `run` method. Your able to access data normally.
commandHandler: :class:`~typing.Callable`
An optional command handler, defaults to the built-in handler at :class:`~acord.DefaultCommandHandler`.
**Parameters passed though:**
* Message: :class:`~acord.Message`
* UpdatedCache: :class:`bool`
"""
def __init__(self, *,
loop: asyncio.AbstractEventLoop = asyncio.get_event_loop(),
token: str = None,
encoding: str = "JSON",
compress: bool = False,
commandHandler: Callable = None,
) -> None:
self.loop = loop
self.token = token
self._events = dict()
self.commandHandler = commandHandler
# Gateway connection stuff
self.encoding = encoding
self.compress = compress
# Others
self.session_id = None
self.gateway_version = None
self.user = None
def bindToken(self, token: str) -> None:
self._lruPermanent = token
def event(self, func):
if not iscoroutinefunction(func):
raise ValueError('Provided function was not a coroutine')
eventName = func.__qualname__
if eventName in self._events:
self._events[eventName].append(func)
else:
self._events.update({eventName: [func]})
return func
def on_error(self, event_method):
acord.logger.error('Failed to run event "{}".'.format(event_method))
print(f'Ignoring exception in {event_method}', file=sys.stderr)
traceback.print_exc()
async def dispatch(self, event_name: str, *args, **kwargs) -> None:
if not event_name.startswith('on_'):
event_name = 'on_' + event_name
acord.logger.info('Dispatching event: {}'.format(event_name))
events = self._events.get(event_name, [])
acord.logger.info('Total of {} events found for {}'.format(len(events), event_name))
for event in events:
try:
await event(*args, **kwargs)
except Exception:
self.on_error(event)
async def handle_websocket(self, ws):
async for message in ws:
await self.dispatch('socket_recieve')
data = message.data
if type(data) is bytes:
data = decompressResponse(data)
if not data:
continue
if not data.startswith('{'):
data = ETF(data)
else:
data = JSON(data)
if data['op'] == gateway.INVALIDSESSION:
acord.logger.error('Invalid Session - Reconnecting Shortly')
raise GatewayConnectionRefused('Invalid session data, currently not handled in this version')
if data['t'] == 'READY':
await self.dispatch('ready')
self.session_id = data['d']['session_id']
self.gateway_version = data['d']['v']
self.user = User(**data['d']['user'])
continue
if data['op'] == gateway.HEARTBEATACK:
await self.dispatch('heartbeat')
def resume(self):
""" Resumes a closed gateway connection """
def run(self, token: str = None, *, reconnect: bool = True):
if (token or self.token) and getattr(self, '_lruPermanent', False):
warnings.warn("Cannot use current token as another token was binded to the client", CannotOverideTokenWarning)
token = getattr(self, '_lruPermanent', None) or (token or self.token)
if not token:
raise ValueError('No token provided')
self.http = HTTPClient(loop=self.loop)
self.token = token
# Login to create session
self.loop.run_until_complete(self.http.login(token=token))
coro = self.http._connect(
token,
encoding=self.encoding,
compress=self.compress
)
# Connect to discord, send identity packet + start heartbeat
ws = self.loop.run_until_complete(coro)
self.loop.run_until_complete(self.dispatch('connect'))
acord.logger.info('Connected to websocket')
self.loop.run_until_complete(self.handle_websocket(ws))
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/client.py | client.py |
""" Exceptions raised by the module """
class BaseResponseException(Exception):
def __new__(cls, *args, **kwargs):
return super(BaseResponseException, cls).__new__(BaseResponseException)
class HTTPException(BaseResponseException):
def __init__(self, code, message):
super().__init__(f'Status {code}: {message}')
class GatewayConnectionRefused(BaseResponseException):
""" Raised when connecting to gateway fails """
class CannotOverideTokenWarning(Warning):
""" Warned when cannot use provided token due to binded token present """
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/errors.py | errors.py |
"""
ACord - An API wrapper for the discord API.
Created by Mecha Karen, and is licensed under the GNU GENERAL PUBLIC LICENSE.
"""
from typing import NamedTuple, Literal
import logging
from .client import Client
logger = logging.getLogger("ACord")
__file__ = __import__("os").path.abspath(__file__)
__doc__ = "An API wrapper for the discord API"
__version__ = "0.0.1a"
__author__ = "Mecha Karen"
class VersionInfo(NamedTuple):
major: int
minor: int
micro: int
level: Literal["Alpha", "Beta", "Stable", "Final"]
version_info: VersionInfo = VersionInfo(major=0, minor=0, micro=1, level="Pre-Alpha")
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/__init__.py | __init__.py |
import pydantic
class User(pydantic.BaseModel):
verified: bool
username: str
mfa_enabled: bool
id: int
flags: int
discriminator: int
bot: str
avatar: str
email: str = None
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/models/user.py | user.py |
from .user import User | ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/models/__init__.py | __init__.py |
"""
Main HTTP connection and websocket interaction between discord and your application
"""
try:
import uvloop
uvloop.install()
except ImportError:
__import__('warnings').warn('Failed to import UVLoop, it is recommended to install this library\npip install uvloop', ImportWarning)
import asyncio
import typing
import aiohttp
import acord
import sys
from acord.errors import GatewayConnectionRefused, HTTPException
from . import helpers
from .heartbeat import KeepAlive
from .decoders import *
from .signals import gateway
class HTTPClient(object):
"""
Base client used to connection and interact with the websocket.
Parameters
----------
loop: :class:`~asyncio.AbstractEventLoop`
A pre-existing loop for aiohttp to run of, defaults to ``asyncio.get_event_loop()``
reconnect: :class:`bool`
Attempt to reconnect to gateway if failed, If set to a integer, it will re-attempt n times.
wsTimeout: :class:`~aiohttp.ClientTimeout`
Custom timeout configuration for
**payloadData: :class:`dict`
A dictionary of payload data to be sent with any request
.. note::
This information can be overwritten with each response
"""
def __init__(self,
token: str = None,
connecter: typing.Optional[aiohttp.BaseConnector] = None,
wsTimeout: aiohttp.ClientTimeout = aiohttp.ClientTimeout(60, connect=None),
proxy: typing.Optional[str] = None,
proxy_auth: typing.Optional[aiohttp.BasicAuth] = None,
loop: typing.Optional[asyncio.AbstractEventLoop] = asyncio.get_event_loop(),
unsync_clock: bool = True,
) -> None:
self.token = token
self.loop = loop
self.wsTimeout = wsTimeout
self.connector = connecter
self._ws_connected = False
self.proxy = proxy
self.proxy_auth = proxy_auth
self.use_clock = not unsync_clock
user_agent = "ACord - https://github.com/Mecha-Karen/ACord {0} Python{1[0]}.{1[1]} aiohttp/{2}"
self.user_agent = user_agent.format(
acord.__version__, sys.version, aiohttp.__version__
)
def getIdentityPacket(self, intents = 0):
return {
"op": gateway.IDENTIFY,
"d": {
"token": self.token,
"intents": 513,
"properties": {
"$os": sys.platform,
"$browser": "acord",
"$device": "acord"
}
}
}
def updatePayloadData(self, overwrite: bool = False, **newData) -> None:
if overwrite:
self.startingPayloadData = newData
else:
self.startingPayloadData = {**self.startingPayloadData, **newData}
async def login(self, *, token: str) -> None:
""" Define a session for the http client to use. """
self._session = aiohttp.ClientSession(connector=self.connector)
ot = self.token
self.token = token
try:
data = await self.request(
helpers.Route("GET", path="/users/@me")
)
except HTTPException as exc:
self.token = ot
acord.logger.error('Failed to login to discord, improper token passed')
raise GatewayConnectionRefused('Invalid or Improper token passed') from exc
return data
async def _fetchGatewayURL(self, token):
uri = helpers.buildURL('gateway', 'bot')
async with self._session.get(uri, headers={'Authorization': f"Bot {token}"}) as resp:
data = await resp.json()
return data
async def _connect(self, token: str, *,
encoding: helpers.GATEWAY_ENCODING, compress: int = 0,
**identityPacketKwargs
) -> None:
if not getattr(self, '_session', False):
acord.logger.warn('Session not defined, user not logged in. Called login manually')
await self.login(token=(token or self.token))
self.encoding = encoding
self.compress = compress
respData = await self._fetchGatewayURL(token)
GATEWAY_WEBHOOK_URL = respData['url']
GATEWAY_WEBHOOK_URL += f'?v={helpers.API_VERSION}'
GATEWAY_WEBHOOK_URL += f'&encoding={encoding.lower()}'
if compress:
GATEWAY_WEBHOOK_URL += "&compress=zlib-stream"
acord.logger.info('Generated websocket url: %s' % GATEWAY_WEBHOOK_URL)
kwargs = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': self.wsTimeout.total,
'autoclose': False,
'headers': {
'User-Agent': self.user_agent,
},
'compress': compress
}
ws = await self._session.ws_connect(GATEWAY_WEBHOOK_URL, **kwargs)
helloRecv = await ws.receive()
data = helloRecv.data
if compress:
data = decompressResponse(data)
if not data.startswith('{'):
data = ETF(data)
else:
data = JSON(data)
self._ws_connected = True
self.ws = ws
self.loop.create_task(KeepAlive(self.getIdentityPacket(**identityPacketKwargs), ws, data).run())
return ws
async def request(self, route: helpers.Route, data: dict = None, **payload) -> None:
url = route.url
headers = payload
headers['Authorization'] = "Bot " + self.token
headers['User-Agent'] = self.user_agent
kwargs = dict()
kwargs['data'] = data
kwargs['headers'] = headers
resp = await self._session.request(
method=route.method,
url=url,
**kwargs
)
return resp
@property
def connected(self):
return self._ws_connected
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/core/http.py | http.py |
# Basic heartbeat controller
import asyncio
from .signals import gateway
class KeepAlive(object):
def __init__(self, identity, ws, helloPacket: dict):
self._ws = ws
self.packet = helloPacket
self.identity = identity
async def run(self):
packet = self.packet
await self._ws.send_json(self.identity)
while True:
if packet['op'] != gateway.HELLO:
raise ValueError('Invalid hello packet provided')
await asyncio.sleep((packet['d']['heartbeat_interval'] / 1000))
await self._ws.send_json(await self.get_payload())
async def get_payload(self):
return {
"op": gateway.HEARTBEAT,
"d": gateway.SEQUENCE
}
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/core/heartbeat.py | heartbeat.py |
"""
All version related info for connecting to the gateway.
"""
import yarl
from typing import Optional, Literal
API_VERSION = 9
BASE_API_URL = "https://discord.com/api"
GATEWAY_ENCODING = Literal["JSON", "ETF"]
def buildURL(*paths, **parameters) -> str:
URI = f'{BASE_API_URL}/v{API_VERSION}'
for path in paths:
URI += f'/{path}'
if not parameters:
return URI
URI += '?'
for key, value in parameters.items():
URI += f'{key}={value}&'
return yarl.URL(URI)
class Route(object):
""" Simple object representing a route """
def __init__(self, method: str = "GET", *paths, path="/", **parameters):
if path:
paths = path.split('/')
self.paths = paths # Building url with yarn
self.path = '/'.join(paths)
self.parameters = parameters
self.method = method
self.url = buildURL(*paths, **parameters)
self.channel_id: Optional[...] = parameters.get('channel_id')
self.guild_id: Optional[...] = parameters.get('guild_id')
self.webhook_id: Optional[...] = parameters.get('webhook_id')
self.webhook_token: Optional[str] = parameters.get('webhook_token')
@property
def bucket(self):
return f'{self.channel_id}:{self.guild_id}:{self.path}'
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/core/helpers.py | helpers.py |
import zlib
import json
ZLIB_SUFFIX = b'\x00\x00\xff\xff'
INFLATOR = zlib.decompressobj()
BUFFER = bytearray()
def decompressResponse(msg):
if type(msg) is bytes:
BUFFER.extend(msg)
if len(msg) < 4 or msg[-4:] != b'\x00\x00\xff\xff':
return
msg = INFLATOR.decompress(BUFFER)
msg = msg.decode('utf-8')
BUFFER.clear()
return msg
def ETF(msg):
raise NotImplementedError()
def JSON(msg):
return json.loads(msg)
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/core/decoders.py | decoders.py |
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
PRESENCE = 3
VOICE = 4
RESUME = 6
RECONNECT = 7
GUILDMEMBERS = 8
INVALIDSESSION = 9
HELLO = 10
HEARTBEATACK = 11
# Error Codes
UNKNOWN = 4000
UNKNOWN_OP = 4001
DECODE_ERROR = 4002
FORBIDDEN = 4003
AUTH_FAILED = 4004
AUTH_COMPLETED = 4005
FAILED_SEQUENCE = 4007
RATELIMIT = 4008
SESSION_TIMED_OUT = 4009
INVALID_SHARD = 4010
SHARD_REQUIRED = 4011
INVALID_GATEWAY_VER = 4012
INVALID_INTENTS = 4013
DISALLOWED_INTENT = 4014
SEQUENCE = None
def heartbeatPacket():
return {
"op": HEARTBEAT,
"d": SEQUENCE
}
| ACord | /ACord-0.0.1a0-py3-none-any.whl/acord/core/signals/gateway.py | gateway.py |
# ROOTFINDER.PY
# This file uses our team's automatic differentiation package to find the roots
# of a given function.
# THINGS TO DO
# Complete implementation for the root finding methods
# Perhaps include a visualization?
# Perhaps include some type of automatation for the initial guess that utilizes our derivation package?
# Double check syntax - tensor vs. AD class
# Consider changing method definitions such that the main class initialization values are set to defaults (this allows the user to change the parameters for each individual numerical method)
# Root counter - count the estimated number of roots over a given domain. Include this?
# LIBRARIES OF USE
# NOTE THAT THE CHDIR COMMAND SHOULD BE DELETED PRIOR TO FINAL SUBMISSION. IT
# IS HERE SOLELY FOR TESTING PURPOSES
# import os
# os.chdir("C:/Users/DesktopID3412MNY/Desktop/cs107-FinalProject/")
from AD_Derivators.functions import tensor, autograd
import numpy as np
from AD_Derivators.helper_functions import ad_utils
import inspect
# MAIN ROOT FINDER FUNCTION
def root(x0, functions, x1 = None, tolerance = 1e-9, max_iterations = 100,
method = "newton", min_diff = None, verbose = 1):
"""
Args:
====================
x0 (list or np.ndarray): a 1-d or 2-d numpy array matrix for initial guess
a. 1d: shape == (num_inputs,)
b. 2d: shape == (num_inputs, vec_dim)
function (list) : A list of callable function with num_inputs inputs for each
tolerance : positive scaler
How close the estimated root should be to 0. The default is 0.001.
max_iterations : INT > 0, optional
Maximum number of iterations the algorithm will be run prior to
terminating, which will occur if a value within the tolerance is not
found. The default is 100.
method : STRING, optional
The name of the root finding algorithm to use. The default is "newton".
Raises
------
Exception
An exception is raised if the user enters an algorithm type that is not
defined in this code base.
Returns
-------
(dict): {"root": np.ndarray, "iters": int, "case": string}
"""
assert isinstance(x0, list) or isinstance(x0, np.ndarray), f"x0 should be a list or np.ndarray"
assert isinstance(functions, list) and all(callable(f) for f in functions), f"functions should be a list of callable function"
assert ad_utils.check_number(tolerance) and tolerance > 0, f"Expected tolerance to be a positive number, instead received {type(tolerance)}"
assert isinstance(max_iterations, int) and max_iterations > 0, f"Expected max_iterations to be a positive integer, instead received {type(max_iterations)}"
assert isinstance(method, str)
assert ad_utils.check_number(min_diff) or (min_diff is None), f"Expected tolerance to be a positive number, instead received {type(min_diff)}"
if min_diff is None:
min_diff = tolerance
elif min_diff > tolerance:
raise ValueError("Expected the min_diff no less than tolerance")
method = method.strip().lower()
num_functions = len(functions)
num_inputs = num_functions #!!!!!
for f in functions:
if len(inspect.signature(f).parameters) != num_inputs:
raise IOError("The number of initialization for each functions should all be same as number of functions")
# convert x0 to np.array first
x0 = np.array(x0)
assert len(x0.shape) < 3, f"we only accept 1 or 2 dimensional input"
assert x0.shape[0] == num_inputs, f"the dimension of initial guess x0 should match (num_functions,)"
x0 = x0.reshape(num_functions,-1) # expand dimension for 1-dim input
vec_dim = x0.shape[1]
# expand dim and repeat
x0 = np.expand_dims(x0, axis=0)
x0 = np.repeat(x0, num_functions, axis = 0 )
if x1 is None:
# build ad class
ad = autograd.AD(num_functions, num_inputs, vec_dim)
ad.add_inputs(x0)
ad.build_function(functions)
if method == "newton":
res, iters, case = _newton(ad, tolerance, max_iterations, min_diff)
elif method == "broyden1":
res, iters, case = _broyden_good(ad, tolerance, max_iterations, min_diff)
elif method == "broyden2":
res, iters, case = _broyden_bad(ad, tolerance, max_iterations, min_diff)
# elif method == 'steffensen':
# res, iters, case = _steffensen(x0, functions, tolerance, max_iterations, min_diff)
else:
raise Exception(f"Method \"{method}\" is not a valid solver algorithm when x1 is None")
else:
x1 = np.array(x1).reshape(num_functions,-1)
x1 = np.expand_dims(x1, axis=0)
x1 = np.repeat(x1, num_functions, axis = 0 )
assert x1.shape == x0.shape, "the dimension of x0 should match x1"
if method == "secant":
res, iters, case = _secant(x0,x1, functions, tolerance, max_iterations, min_diff)
elif method == "bisection":
res, iters, case = _bisection(x0,x1, functions, tolerance, max_iterations, min_diff)
else:
raise Exception(f"Method \"{method}\" is not a valid solver algorithm when x1 is not None")
if verbose:
print(f'method: {method}')
print(f'results: {res}')
print(f'number of iterations take: {iters}')
print(case)
return {'roots': res, 'iters': iters, 'case':case}
def _newton(ad,tolerance, max_iterations, min_diff):
x0 = ad.get_inputs()[0,:,:] # x0 is a np.ndarray (num_inputs, vec_dim)
case = None
for i in range(max_iterations):
x = x0 - np.linalg.pinv(ad.jacobian)@_get_fx(x0, ad.function) # x is a np.ndarray (num_inputs, vec_dim)
if _check_root(x, ad.function, tolerance):
case = "[PASS] root found"
return x, i, case
# converged
if (np.linalg.norm(x - x0) < min_diff):
case = "[FAIL] converged"
return x, i, case
x0 = x
next_input = np.repeat([x0], ad.num_functions, axis = 0)
ad.add_inputs(next_input) # update jacobian for next round
ad.build_function(ad.function) # recalculate jacobian for next step
case = "[FAIL] maximum iteration reached"
return x, i, case
def _broyden_good(ad, tolerance, max_iterations, min_diff):
# give the initialization for Jobian inverse
try:
J = ad.jacobian
J_inv = np.linalg.inv(J)
except: # use identity initialization when jacobian is not invertible
J_inv = np.eye(ad.num_functions)
x0 = ad.get_inputs()[0,:,:] # x0 is a np.ndarray (num_inputs, vec_dim)
case = None
f0 = _get_fx(x0, ad.function)
for i in range(max_iterations):
x = x0 - J_inv@f0
if _check_root(x, ad.function, tolerance):
case = "[PASS] root found"
return x, i, case
# converged
if (np.linalg.norm(x - x0)< min_diff):
case = "[FAIL] converged"
return x, i, case
delta_x = x - x0
f = _get_fx(x, ad.function)
delta_f = f - f0
# update J_inv, f0, x0
J_inv = J_inv + np.dot((delta_x - J_inv@delta_f)/np.dot(delta_x.T@J_inv,delta_f), delta_x.T@J_inv)
f0 = f
x0 = x
case = "[FAIL] maximum iteration reached"
return x, i, case
def _broyden_bad(ad, tolerance, max_iterations, min_diff):
#J = ad.jacobian
try:
J = ad.jacobian
J_inv = np.linalg.inv(J)
except:
J_inv = np.eye(ad.num_functions)
x0 = ad.get_inputs()[0,:,:] # x0 is a np.ndarray (num_inputs, vec_dim)
case = None
f0 = _get_fx(x0, ad.function)
for i in range(max_iterations):
x = x0 - J_inv@f0
if _check_root(x, ad.function, tolerance):
#print(x,i,case)
case = "[PASS] root found"
return x, i, case
# converged
if (np.linalg.norm(x - x0) < min_diff):
case = "[FAIL] converged"
return x, i, case
delta_x = x - x0
f = _get_fx(x, ad.function)
delta_f = f - f0
J_inv = J_inv + np.dot((delta_x - J_inv@delta_f)/np.power((np.linalg.norm(delta_f)),2), delta_f.T)
f0 = f
x0 = x
case = "[FAIL] maximum iteration reached"
return x, i, case
def _check_zero(a):
"""
make sure no elements in a are 0
"""
if (a == 0).any():
a = a.astype(np.float) # convert to float first
for m in range(a.shape[0]):
for n in range(a.shape[1]):
if a[m,n] ==0:
a[m,n]+= 0.1
return a
def _secant(x0,x1, functions,tolerance, max_iterations, min_diff):
if len(functions) > 1:
raise IOError("The secant method only applys to single function with single variable")
case = None
x0 = x0.astype(np.float)
x1 = x1.astype(np.float)
if x1 == x0:
x1 = x0 + 0.1
for i in range(max_iterations):
# make sure x0 does not equal to x1
f0 = _get_fx(x0,functions)
f1 = _get_fx(x1, functions)
if (f1 - f0 == 0).any():
case = "[FAIL] Zero division encountered"
return x1,i,case
g = (x1-x0)/(f1-f0)
x = x1 - f1*g
if _check_root(x, functions, tolerance):
case = "[PASS] root found"
return x1,i,case
# converged
if (np.linalg.norm(x - x1) < min_diff):
case = "[FAIL] converged"
return x1,i,case
x0 = x1
x1 = x
case = "[FAIL] maximum iteration reached"
return x, i, case
def _bisection(x0,x1, functions,tolerance, max_iterations, min_diff):
"""
Need to make sure x0 < x1 and f(x0)f(x1) <0
"""
case = None
if len(functions) > 1:
raise IOError("The bisection method only applys to single function with single variable")
x0 = x0.astype(np.float)
x1 = x1.astype(np.float)
x0,x1 = _prepare_bisection(x0,x1,functions)
for i in range(max_iterations):
c= (x0+x1)/2
if _check_root(c, functions, tolerance):
case = "[PASS] root found"
return c, i, case
x0,x1 = _update_bisection(x0,x1,c, functions)
# converged
if (np.linalg.norm(x1 - x0) < min_diff):
case = "[FAIL] converged"
return c, i, case
case = "[FAIL] maximum iteration reached"
return c, i, case
def _prepare_bisection(x0,x1, functions):
"""
make sure all element in x0 < x1 if at the same place
"""
vec1 = x0[0,:,:]
vec2 = x1[0,:,:]
res0 = _get_fx(vec1,functions)
res1 = _get_fx(vec2, functions)
if (res0*res1 > 0).any():
raise IOError("For Bisection you need to give inputs that f(x0)f(x1) < 0")
for m in range(len(vec1)):
for n in range(len(vec1[0])):
if vec1[m,n] > vec2[m,n]:
t = vec1[m,n]
vec1[m,n] = vec2[m,n]
vec2[m,n] = t
return vec1,vec2
def _update_bisection(a,b,c, functions):
"""
a,b,c: num_inputs x vec_dim
"""
fa = _get_fx(a, functions) # num_functions x vec_dim
fb = _get_fx(b, functions) #
fx = _get_fx(c, functions)
for m in range(a.shape[0]):
for n in range(a.shape[1]):
if fa[m,n]*fx[m,n] > 0:
a[m,n] = c[m,n]
elif fb[m,n]*fx[m,n] > 0:
b[m,n] = c[m,n]
return a,b
def _check_root(x, functions, tolerance):
"""
x (np.ndarray): a 2-d array, ()
functions: a list of functions
tolerance: a positive number
"""
flag = True
for f in functions:
inputs = [x[i] for i in range(len(x))]
res = f(*inputs) # res is a np.ndarray
if np.linalg.norm(res) >= tolerance:
flag = False
break
return flag
def _get_fx(x, functions):
"""
x (np.ndarray): a numpy array ( num_functions, num_inputs, vec_dim)
"""
output = [] #use list in case the output of root are vectors
for f in functions:
inputs = [x[i] for i in range(len(x))]
res = f(*inputs) # res is a (vec_dim,) np.ndarray
output.append(res)
return np.array(output) #(num_inputs, vec_dim)
| AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/AD_Derivators/rootfinder.py | rootfinder.py |
# from __future__ import absolute_import
# from .functions import autograd
# from .functions import tensor
# __all__ = ['Tensor', 'AD'] | AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/AD_Derivators/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:58:22 2020
@author: Courage and Ethan
"""
import numpy as np
from AD_Derivators.helper_functions.ad_utils import check_number, check_array, check_list, check_anyzero, check_tan, check_anyneg, check_nontensor_input
"""
tensor.py
derivative rules for elementary operations.
"""
def sin(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
return np.sin(t)
elif isinstance(t,Tensor):
pass
else:
raise TypeError('The input of tensor.sin can only be a Tensor/list/np.array/number.')
ob = Tensor()
#new func val
ob._val = np.sin(t._val)
#chain rule
ob._der = np.cos(t._val)* t._der
return ob
def cos(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
return np.cos(t)
elif isinstance(t,Tensor):
pass
else:
raise TypeError('The input of tensor.cos can only be a Tensor/list/np.array/number.')
ob = Tensor()
#new func val
ob._val = np.cos(t._val)
#chain rule
ob._der = -np.sin(t._val)* t._der
return ob
def tan(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
if not check_tan(t):
return np.tan(t)
else:
raise ValueError('Tan undefined')
elif isinstance(t,Tensor):
if check_tan(t._val):
raise ValueError('Tan undefined')
else:
raise TypeError('The input of tensor.tan can only be a Tensor/list/np.array/number.')
ob = Tensor()
#new func val
ob._val = np.tan(t._val)
#chain rule
ob._der = t._der/(np.cos(t._val)**2)
return ob
def asin(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
t = np.array(t)
if (t > 1).any() or (t < -1).any():
raise ValueError('The value of asin is undefined outside of 1 or -1')
else:
return np.arcsin(t)
elif isinstance(t,Tensor):
if (t._val == 1).any() or (t._val == -1).any():
raise ValueError('The derivative of asin is undefined at 1 or -1')
elif (t._val > 1).any() or (t._val < -1).any():
raise ValueError('The value of asin is undefined outside of 1 or -1')
else:
ob = Tensor()
#new func val
ob._val = np.arcsin(t._val)
#chain rule
ob._der = 1/(np.sqrt(1 - t._val**2))* t._der
return ob
else:
raise TypeError('The input of tensor.asin can only be a Tensor/numpy/list/scaler object.')
def sinh(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
t = np.array(t)
return np.sinh(t)
elif isinstance(t,Tensor):
ob = Tensor()
#new func val
ob._val = np.sinh(t._val)
#chain rule
ob._der = np.cosh(t._val)* t._der
return ob
else:
raise TypeError('The input of tensor.sinh can only be a Tensor/numpy/list/scaler object.')
def acos(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
t = np.array(t)
if (t > 1).any() or (t < -1).any():
raise ValueError('The value of acos is undefined outside of 1 or -1')
else:
return np.arccos(t)
elif isinstance(t,Tensor):
if (t._val == 1).any() or (t._val == -1).any():
raise ValueError('The derivative of acos is undefined at 1 or -1')
elif (t._val > 1).any() or (t._val < -1).any():
raise ValueError('The value of acos is undefined outside of 1 or -1')
else:
ob = Tensor()
#new func val
ob._val = np.arccos(t._val)
#chain rule
ob._der = -1/(np.sqrt(1 - t._val**2))* t._der
return ob
else:
raise TypeError('The input of tensor.acos can only be a Tensor/numpy/list/scaler object.')
def cosh(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
t = np.array(t)
return np.cosh(t)
elif isinstance(t,Tensor):
ob = Tensor()
#new func val
ob._val = np.cosh(t._val)
#chain rule
ob._der = np.sinh(t._val)* t._der
return ob
else:
raise TypeError('The input of tensor.cosh can only be a Tensor/numpy/list/scaler object.')
def atan(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
t = np.array(t)
return np.arctanh(t)
elif isinstance(t,Tensor):
ob = Tensor()
#new func val
ob._val = np.arctan(t._val)
#chain rule
ob._der = t._der/(1 + t._val**2)
return ob
else:
raise TypeError('The input of tensor.atah can only be a Tensor/numpy/list/scaler object.')
def tanh(t):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
"""
if check_nontensor_input(t):
t = np.array(t)
return np.tanh(t)
elif isinstance(t,Tensor):
ob = Tensor()
#new func val
ob._val = np.tanh(t._val)
#chain rule
ob._der = t._der* (1/np.cosh(t._val))**2
return ob
else:
raise TypeError('The input of tensor.tanh can only be a Tensor/numpy/list/scaler object.')
def exp(t, base = np.e):
"""
Input
=========
t (tensor.Tensor/numpy/list/scaler)
base (scaler)
"""
if not check_number(base):
raise TypeError('The base must be a scaler.')
if check_nontensor_input(t): # no need to worry if base nonpositive
return np.power(base,t)
elif isinstance(t,Tensor):
if base <=0:
raise ValueError('The base must be positive, otherwise derivation undefined')
else:
raise TypeError('The input of tensor.exp can only be a Tensor/list/np.array/number.')
ob = Tensor()
#new func val
ob._val = np.power(base,t._val)
#chain rule
ob._der = np.power(base,t._val) * t._der * np.log(base)
return ob
def log(t, a = np.e):
"""
Input
=========
t (tensor.Tensor)
"""
if not check_number(a):
raise TypeError('The base should be a scaler')
if a <= 0:
raise ValueError('The base must be positive')
if check_nontensor_input(t):
t = np.array(t)
if (t <= 0).any():
raise ValueError('log undefined')
else:
return np.log(t)
elif isinstance(t,Tensor):
if check_anyneg(t._val):
raise ValueError('Log undefined')
else:
#create object for output and derivative
ob = Tensor()
#new func val
ob._val = np.log(t._val)/np.log(a)
#chain rule
ob._der = (1/(t._val*np.log(a)))*t._der
return ob
else:
raise TypeError('The input of tensor.log can only be a Tensor/list/np.array/number.')
def sigmoid(t, t0 = 0, L = 1, k = 1):
"""
A logistic function or logistic curve is a common S-shaped curve (sigmoid curve) with equation
f(t) = L/(1+exp(-k(t-t0)))
Input
=========
t needs to be a tensor.Tensor object
t0 is the x value of the sigmoid's midpoint. The default value is 0.
L is the curve's maximum value.
k is the logistic growth rate or steepness of the curve.
"""
if not isinstance(t,Tensor):
raise TypeError('The input of tensor.sigmoid can only be a Tensor object.')
if not check_number(t0):
raise TypeError('t0 must be either an int or float')
if not check_number(L):
raise TypeError('L must be either an int or float')
if not check_number(k):
raise TypeError('k must be either an int or float')
#create object for output and derivative
ob = Tensor()
#new func val
ob._val = L / (1+np.exp(-k * (t._val - t0)))
#chain rule
ob._der = t._der * (L * k * np.exp(-k * (t._val - t0))) / (1 + np.exp(-k * (t._val - t0)))**2
return ob
def sqrt(t):
"""
The function used to calculate the square root of a non-negative variable.
Input
============
t needs to be a tensor.Tensor object. All the elements must be non-negative.
"""
if check_nontensor_input(t):
t = np.array(t)
if (t < 0).any():
raise ValueError('The constant input must be all nonnegative value, no complex number allowed')
else:
return np.sqrt(t)
elif isinstance(t,Tensor):
if check_anyneg(t):
raise ValueError('The input must be all positive value, no complex number allowed')
else:
ob = Tensor()
ob._val = t._val**(0.5)
ob._der = 0.5* t._val**(-0.5) * t._der
return ob
else:
raise TypeError('The input of tensor.sqrt can only be a Tensor/list/number/np.ndarray object.')
class Tensor:
def __init__ (self, val = np.array([1.0])):
"""
Initialize Tensor object.
val (scalar/list/np.ndarray)
Attributes:
=============
self.val (number): the value of the Tensor
self.der (number): the derivative of the Tensor
Example
=============
>>> a = Tensor(2.0)
>>> print(a.val)
2.0
>>> print(a.der)
1.0
>>> a.der = 2
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: can't set attribute
"""
#check inputs
if check_number(val):
self._val = np.array([val])
elif check_list(val):
self._val = np.array(val)
elif check_array(val):
self._val = val
else:
raise TypeError('The input of val should be a number, a list or a numpy array.')
#self._flag = False
self._der = np.ones(len(self._val))
@property
def val(self):
return self._val
@property
def der(self):
return self._der
def __add__ (self, other):
"""
Overload the addition
EXAMPLES
==========
>>> f = Tensor(2.0) + 3.0
>>> (f.val, f.der)
(5.0, 1.0)
"""
x = Tensor()
if isinstance(other, Tensor):
x._val = self._val + other.val
x._der = self._der + other.der
return x
elif check_number(other) or check_array(other):
x._val = self._val + other
x._der = self._der
return x
else:
raise TypeError('Tensor object can only add a number or a Tensor object')
def __radd__ (self, other):
"""
Overload the addition and make it commutable
EXAMPLES
==========
>>> f = 3.0 + Tensor(2.0)
>>> (f.val, f.der)
(5.0, 1.0)
"""
return self.__add__(other)
def __sub__ (self, other):
"""
Overload the substraction
EXAMPLES
==========
>>> f = Tensor(2.0) - 3.0
>>> (f.val, f.der)
(-1.0, 1.0)
"""
x = Tensor()
try:
x._val = self._val - other.val
x._der = self._der- other.der
return x
except:
if check_number(other) or check_array(other):
x._val = self._val - other
x._der = self._der
return x
else:
raise TypeError('Tensor object can only multiply with Tensor or number')
def __rsub__ (self, other):
"""
Overload the substraction and make it commutable
EXAMPLES
==========
>>> f = 3.0 - Tensor(2.0)
>>> (f.val, f.der)
(1.0, -1.0)
"""
return - self.__sub__(other)
def __mul__ (self, other):
"""
Overload the multiplication
EXAMPLES
==========
>>> f = Tensor(2.0) * 3.0
>>> (f.val, f.der)
(6.0, 3.0)
"""
x =Tensor()
if isinstance(other, Tensor):
x._val = self._val * other.val
x._der = self._der * other.val + self._val * other.der
return x
elif check_number(other) or check_array(other):
x._val = self._val * other
x._der = self._der * other
return x
else:
raise TypeError('Tensor object can only multiply with Tensor or number')
def __rmul__ (self, other):
"""
Overload the multiplication and make it commutable
EXAMPLES
==========
>>> f = 3.0 * Tensor(2.0)
>>> (f.val, f.der)
(6.0, 3.0)
"""
return self.__mul__(other)
def __truediv__ (self, other):
"""
Overload the division, input denominator cannot include zero. Otherwise raise ValueError.
EXAMPLES
==========
>>> f = Tensor(2.0)/2.0
>>> (f.val, f.der)
(1.0, 0.5)
"""
x = Tensor()
if (check_number(other) and other == 0) or\
(isinstance(other, Tensor) and check_anyzero(other.val)) or \
(check_array(other) and check_anyzero(other)):
raise ZeroDivisionError('The Tensor is divided by 0')
if isinstance(other, Tensor):
x._val = self._val/ other.val
x._der = (self._der*other.val - self._val*other.der)/(other.val*other.val)
return x
elif check_number(other) or check_array(other):
x._val = self._val / other
x._der = self._der / other
return x
else:
raise TypeError('Tensor can only be divided by a number or a Tensor object')
def __rtruediv__ (self, other):
"""
Overload the division, and make it commutable. Input denominator cannot include zero, otherwise raise ValueError.
EXAMPLES
==========
>>> f = 2.0/Tensor(2.0)
>>> (f.val, f.der)
(1.0, -0.5)
"""
x = Tensor()
if check_anyzero(self._val):# a/tensor(0)
raise ZeroDivisionError('The Tensor object in denominator should not be zero.')
# if isinstance(other, Tensor):
# x._val = other.val/ self._val
# x._der = (self._val*other.der - self._der*other.val)/(self._val*self._val)
# return x
if check_number(other) or check_array(other):
x._val = other / self._val
x._der = -other * self._der / (self._val * self._val)
return x
else:
raise TypeError('Only an numpy array or number can be divided by Tensor.')
def __pow__ (self, other):
"""
Overload the power method
EXAMPLES
==========
>>> f = Tensor(2.0)**3
>>> (f.val, f.der)
(8.0, 12.0)
"""
x = Tensor()
if isinstance(other, Tensor): # x**a -> a*x**(a-1)
if (other.val > 0).all():
x._val = self._val ** other.val
x._der = (self._val ** other.val) * (other.der * np.log (self._val) + other.val * self._der/ self._val)
return x
# elif (self._val == 0 and other.val <1).any():
# raise ZeroDivisionError('the base cannot be 0 when power is negative')
else:
raise ValueError('log function undefined for exponent <= 0')
elif check_number(other) or (check_array(other) and len(other) == 1):
if other == 0:
x._val = 1
x._der = 0
return x
elif (self._val == 0).any() and other <1:
raise ZeroDivisionError('the base cannot be 0 when power is negative')
else:
other = float(other) #convert to float first
x._val = self._val** other
x._der = other * self._val ** (other - 1) * self._der
return x
else:
raise TypeError('Tensor base can only be operated with a Tensor object or a number/np.ndarray')
def __rpow__ (self, other):
"""
Overload the power method and make it commutable.
EXAMPLES
==========
>>> f = 3**Tensor(2.0)
>>> (f.val, f.der)
(9.0, 9.887510598012987)
"""
x = Tensor()
if check_number(other) or (check_array(other) and len(other) == 1):
if other <= 0:
raise ValueError('log function undefined for exponent <= 0')
else:
x._val = other ** self._val
x._der = (other ** self._val) * (self._der * np.log(other))
return x
else:
raise TypeError('Tensor base can only be operated with a Tensor object or a number/np.ndarray')
def __neg__ (self):
"""
Overload the negation method.
EXAMPLES
==========
>>> f = -Tensor(2.0)
>>> (f.val, f.der)
(-2.0, -1.0)
"""
x = Tensor()
x._val = -self._val
x._der = -self._der
return x
# Alice added functions
def __lt__(self, other):
try:
return self._val < other.val
except: # other is a scaler
return self._val < other
def __le__(self, other):
try:
return self._val <= other.val
except: # other is a scaler
return self._val <= other
def __gt__(self, other):
try:
return self._val > other.val
except: # other is a scaler
return self._val > other
def __ge__(self, other):
try:
return self._val >= other.val
except: # other is a scaler
return self._val >= other
def __eq__(self, other):
if not isinstance(other, Tensor):
raise TypeError('Tensor object can only be compared with Tensor object')
return (self._val == other.val).all()
def __ne__(self, other):
return not self.__eq__(other).all()
def __abs__(self):
# only used for calculation
return abs(self._val)
def __str__(self):
"""
Examples
================
>>> c = tensor.Tensor(3.0)
>>> print(c)
Tensor(3.0)
"""
return f"Tensor({self._val.tolist()})"
def __repr__(self):
"""
Examples
================
>>> c = tensor.Tensor(3.0)
>>> repr(c)
'Tensor: val(3.0), der(1.0)'
"""
return f"Tensor: val({self._val.tolist()}), der({self._der.tolist()})"
def __len__(self):
return len(self._val) | AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/AD_Derivators/functions/tensor.py | tensor.py |
import numpy as np
from AD_Derivators.functions.tensor import Tensor
from AD_Derivators.helper_functions.ad_utils import check_array, check_number, check_list, check_list_shape
class AD:
def __init__(self,num_functions, num_inputs, vec_dim = 1):
"""
Initializes the AD object with Tensor inputs and AD mode.
Args:
============
num_functions (int): number of functions
num_inputs (int)L number of inputs in each function
vec_dim: the length of each argument of functions
ATTRIBUTES:
============
self.inputs (list of Tensor): a list of Tensor objects.
self.function (function): the list of functions for automatic differentiation
self.jacobian (np.ndarray): the jacobian matrix of the inputs given self.function
self.jvp (np.ndarray): the value of automatic differentiation given inputs,
self.functions at a given direction p
self.num_functions (int): the number of functions
self.num_inputs (int): the number of inputs for each function. All should be the same.
self.shape (tuple): (self.num_functions, self.num_inputs, vec_dim). All vector Tensor should have the same length.
"""
self._num_func = num_functions
self._num_inputs = num_inputs
self._vec_dim = vec_dim
self._inputs = None
self._func = None
self._jacobian = None
self._jvp = None
@property
def num_functions(self):
return self._num_func
@property
def num_inputs(self):
return self._num_inputs
def _prepare_inputs(self, mat_inputs):
"""
This function helps user to prepare inputs of AD class by
giving a list
Args:
=======================
mat_inputs (list or np.ndarray): a. a 2-d (m x n) list for AD class with m functions, each having n inputs
b. a 3-d (m x n x v) list for class with m functions, each having n inputs and each input have a Tensor in length of v
Returns:
========================
res: a 2-d (m x n) list of Tensor
"""
if isinstance(mat_inputs, list):
mat_inputs = np.array(mat_inputs)
assert self._check_shape(mat_inputs)
res = []
for m in range(self.num_functions):
inp = []
for n in range(self.num_inputs):
inp.append(Tensor(mat_inputs[m,n]))
res.append(inp)
return res
def add_inputs(self, inputs):
"""
Add inputs for the class. The dimension of inputs should match self.shape.
Would update self.inputs
Args:
=================================
inputs(list or np.ndarray): a 2-d or 3-d array. The dimension should match self.shape.
"""
# always convert list to np.array first
if isinstance(inputs,list):
inputs = np.array(inputs)
# check the dimension
assert self._check_shape(inputs)
self._inputs = self._prepare_inputs(inputs)
self._jacobian = None # reset jacobian function
def build_function(self, input_functions):
""" Calculates the jacobian matrix given the input functions.
!!! No Tensor objects should be used in input_function
unless it's the input variable
would update self.functions and self.jacobian and erase self.jvp
Args
=========================
input_functions (list): a list of m functions. each function have n inputs. Each input could
be either a scaler or a vector. Each function should have a return vector or scalar with the
same dimension as each input of the functions.
"""
# check list and length
assert isinstance(input_functions, list) and len(input_functions) == self._num_func
# check functions
if all(callable(f) for f in input_functions):
self._func = input_functions
else:
raise TypeError('the input should be a list of callable function')
if self._inputs is None:
raise ValueError('No inputs added to AD class.')
self._jacobian = []
for f, inp in zip(self._func, self._inputs):
devs = []
if self._vec_dim == 1:
const_inp = [t.val[0] for t in inp]
else:
const_inp = [t.val.tolist() for t in inp]
for i,t in enumerate(inp):
input_values = const_inp.copy()
input_values[i] = t # only changes the ith element to be Tensor object.
# calculate partial derivatives
val = f(*input_values)
# check function returns
if not isinstance(val, Tensor):
raise TypeError('The input function should only return a Tensor object')
# if len(tensor) > 1
if self._vec_dim > 1:
devs.append(val.der.tolist())
# if tensor is a scalar
else:
devs.append(val.der[0])
self._jacobian.append(devs)
# jacobian is an np.ndarray (m x n or m x n x v)
self._jacobian = np.array(self._jacobian)
# reset self._jvp
self._jvp = None
@property
def inputs(self):
return self._inputs
def __str__(self):
"""
Examples
================
ad = autograd.AD(tensor.Tensor(2.0))
>>> print(ad)
AD(Tensor([2.0]))
"""
return f"AD(Tensor({[str(tens) for tens in self._inputs]}))"
def __repr__(self):
"""
Examples
================
ad = autograd.AD(tensor.Tensor(2.0))
>>> repr(ad)
'AD: inputs(Tensor([2.0])), function(None)'
"""
return f"AD: inputs({[str(tens) for tens in self._inputs]}), function({str(self._func)})"
@property
def function(self):
return self._func
@property
def jacobian(self):
"""Returns the Jacobian matrix given Tensor inputs and the input functions.
"""
return self._jacobian
@property
def jvp(self):
"""Returns the dot product between the Jacobian of the given
function at the point
"""
return self._jvp
def run(self, seed = [[1.0]]):
"""Returns the differentiation results given the mode.
Right now AD only allows forward mode.
would update self.jvp
INPUTS
=======
seed (list or np.ndarray): shape ==(num_inputs x vec_dim) the direction of differentiation . THE ARRAY HAS TO BE 2D!!!
RETURNS
========
results (np.ndarray): shape == (num_func x vec_dim)
"""
return self.__forward(seed)
def __forward(self, seed = [[1.0]]):
"""Returns the differentiation value of the current graph by forward mode.
INPUTS
=======
seed (list or np.ndarray): 2-d list or np.ndarray:
a. vec_dim == 1: 1 x num_inputs
b. vec_dim > 1: vec_dim x num_inputs
RETURNS
========
self._jvp (np.ndarray): shape == (num_func x vec_dim)
"""
# always convert list to np.array first
if isinstance(seed, list):
seed = np.array(seed)
if isinstance(seed, np.ndarray) and seed.shape == (self._vec_dim, self.num_inputs):
pass
else:
raise TypeError('seed should be a 2-d (vec_dim x num_inputs) list of numbers ')
self._jvp = self._jacobian@seed.T
assert self._jvp.shape == (self._num_func, self._vec_dim)
return self._jvp
@property
def shape(self):
return (self._num_func, self._num_inputs, self._vec_dim)
def get_inputs(self, option = "numpy"):
"""
option (str): "numpy" or "tensor"
Returens:
===============
if option == "numpy": returns the np.ndarray format inputs shape: (num_function, num_inputs, vec_dim)
elif option == "tensor":returns the same 2d Tensor list as calling self.inputs.
"""
if option == "tensor":
return self._inputs
elif option == "numpy":
output = []
for m in range(self.num_functions):
vec = []
for n in range(self.num_inputs):
vec.append(self._inputs[m][n].val)
output.append(vec)
return np.array(output)
else:
raise IOError("The option should be either numpy or tensor")
def _check_shape(self, array):
"""
array(np.ndarray): a 2d or 3d shape np.array
"""
flag = False
if isinstance(array, np.ndarray) and len(array.shape) ==2 and array.shape == self.shape[:2]:
flag = True
elif isinstance(array, np.ndarray) and len(array.shape) == 3 and array.shape == self.shape:
flag = True
return flag
| AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/AD_Derivators/functions/autograd.py | autograd.py |
import numpy
def check_number(x):
return not hasattr(x,"__len__")
def check_array(x):
return isinstance(x, numpy.ndarray)
def check_list(x):
return isinstance(x, list)
def check_anyzero(x):
return (x== 0).any()
def isodd(i):
if (i+1) % 2 == 0:
return True
else:
return False
def check_tan(x):
if check_number(x):
x = numpy.array([x])
count = 0
for i in x:
if isodd(i /(numpy.pi/2)):
count += 1
if count != 0:
return True
else:
return False
def check_anyneg(x):
return (x<=0).any()
def check_lengths(ll, length = None):
if length:
len_check = length
else:
len_check = len(ll[0])
return all(len(l) == len_check for l in ll)
def check_list_shape(ll):
ll_array = numpy.array(ll)
return ll_array.shape
def check_nontensor_input(x):
return check_number(x) or check_list(x) or check_array(x) | AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/AD_Derivators/helper_functions/ad_utils.py | ad_utils.py |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 22:29:50 2020
@author: Courage
"""
from AD_Derivators import rootfinder
from AD_Derivators.functions import autograd, tensor
import numpy as np
import pytest
x0 = np.array([[0,0],[0,0]])
TOL = 1e-6
MIN_DIFF = 1e-5
def f0(x):
return x**3+1
def f1(x,y):
return x+y
def f2(x,y):
return x- y - 4
def f3(x,y):
return x**2+y**3
def f4(x,y):
return x + y**2 -2
def f5(x,y):
return x**5+y**3-9
def f6(x,y):
return x**2+y-3
def f7(x,y):
return tensor.exp(x) + 0*y - 1
def test_x0_type():
with pytest.raises(AssertionError):
rootfinder.root('hello',[f1,f2])
def test_f_type():
with pytest.raises(AssertionError):
rootfinder.root([1,2], f1)
def test_tol_type():
with pytest.raises(AssertionError):
rootfinder.root([1,2], [f1,f2], tolerance=-0.01)
def test_maxiter_type():
with pytest.raises(AssertionError):
rootfinder.root([1,2], [f1,f2], max_iterations=100.5)
def test_method_type():
with pytest.raises(AssertionError):
rootfinder.root([1,2], [f1,f2], method=1)
def test_variable_number():
with pytest.raises(IOError):
rootfinder.root([1,2], [f1, f2, f3])
def test_min_diff():
x0 = [0]
func = [f0]
with pytest.raises(ValueError):
rootfinder.root(x0, func, min_diff=0.1)
def test_x0_dim():
x = np.array([[[1,2],[2,3]],[[3,4],[4,5]]]) #len(x)==3
with pytest.raises(AssertionError):
rootfinder.root(x, [f1, f2])
def test_root_newton():
x0 = [1]
func = [f0]
res = rootfinder.root(x0, func, method = "newton")
assert (abs(res['roots'] +1) < TOL).all()
def test_root_broyden1():
x0 = [0]
func = [f0]
res= rootfinder.root(x0, func, method = "broyden1")
assert(abs(res['roots'] +1) < TOL).all()
def test_root_broyden2():
x0 = [0]
func = [f0]
res= rootfinder.root(x0, func, method = "broyden2")
assert(abs(res['roots'] +1) < TOL).all()
def test_exception_error1():
x0 = [0]
func = [f0]
with pytest.raises(Exception):
rootfinder.root(x0, func, method = "hello")
def test_exception_error2():
x0 = np.array([-3])
x1 = np.array([-3])
functions = [f0]
with pytest.raises(Exception):
rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 100, tolerance= 1e-5, method = "hello", min_diff= 1e-6)
def test_check_root():
flag1 = rootfinder._check_root(np.array([[2], [-2]]), [f1,f2], 0.01)
flag2 = rootfinder._check_root(np.array([[2], [-3]]), [f1,f2], 0.01)
assert flag1
assert not flag2
def test_get_fx():
res1 = rootfinder._get_fx(np.array([[2], [-2]]), [f1,f2])
assert (res1 == np.array([[0],[0]])).all()
res2 = rootfinder._get_fx(np.array([[-2], [-3]]), [f1,f2])
assert (res2 == np.array([[-5],[-3]])).all()
def test_newton_1():
ad_test = autograd.AD(2,2,1)
x0 = np.array([0,0])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f1,f2])
x, it, case = rootfinder._newton(ad_test, TOL, 100, MIN_DIFF)
assert (abs(x - np.array([[2], [-2]])) < TOL ).all()
def test_newton_2():
ad_test = autograd.AD(2,2,1)
x0 = np.array([1,1])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f5,f6])
x, it, case = rootfinder._newton(ad_test, TOL, 100, MIN_DIFF)
a = x[0,0]
b = x[1,0]
assert abs(f5(a,b)) < TOL and abs(f6(a,b)) < TOL
def test_newton_maxreach():
ad_test = autograd.AD(2,2,1)
x0 = np.array([1,1])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f5,f6])
x, it, case = rootfinder._newton(ad_test, TOL, 2, MIN_DIFF)
assert case == "[FAIL] maximum iteration reached"
def test_broyden1():
ad_test = autograd.AD(2,2,1)
x0 = np.array([0,0])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f3,f4])
x, it, case = rootfinder._broyden_good(ad_test, TOL, 100, min_diff = 1e-9)
assert (abs(x - np.array([[1], [-1]])) < TOL ).all()
def test_broyden1_maxreach():
ad_test = autograd.AD(2,2,1)
x0 = np.array([0,0])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f3,f4])
x, it, case = rootfinder._broyden_good(ad_test, TOL, 2, min_diff = 1e-9)
assert case == "[FAIL] maximum iteration reached"
def test_broyden2():
ad_test = autograd.AD(2,2,1)
x0 = np.array([10,10])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f1,f7])
x, it, case = rootfinder._broyden_bad(ad_test, TOL, 100, min_diff= 1e-9)
assert (abs(x - np.array([[0], [0]])) < TOL ).all()
def test_broyden2_maxreach():
ad_test = autograd.AD(2,2,1)
x0 = np.array([0,0])
x0 = np.repeat([x0], 2, axis = 0)
ad_test.add_inputs(x0)
ad_test.build_function([f3,f4])
x, it, case = rootfinder._broyden_bad(ad_test, TOL, 2, min_diff = 1e-9)
assert case == "[FAIL] maximum iteration reached"
def test_bisection_IOerror():
x0 = np.array([-5, 0])
x1 = np.array([0, -5])
functions = [f1, f2]
with pytest.raises(IOError):
rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 100, tolerance= 1e-5, method = "bisection", min_diff= 1e-6)
def test_bisection():
x0 = np.array([-5])
x1 = np.array([0])
functions = [f0]
x = rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 100, tolerance= 1e-5, method = "bisection", min_diff= 1e-6)
assert (abs(x["roots"] - np.array([-1])) < 1e-5 ).all()
def test_bisection_maxreach():
x0 = np.array([-5])
x1 = np.array([0])
functions = [f0]
x = rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 2, tolerance= 1e-5, method = "bisection", min_diff= 1e-6)
assert x.get('case') == "[FAIL] maximum iteration reached"
def test_secant():
x0 = np.array([-3])
x1 = np.array([-3])
functions = [f0]
x = rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 100, tolerance= 1e-5, method = "secant", min_diff= 1e-6)
assert (abs(x["roots"] - np.array([[-1]])) < 1e-3 ).all()
def test_secant_IOerror():
x0 = np.array([-3, 1])
x1 = np.array([-3, 1])
functions = [f1, f2]
with pytest.raises(IOError):
rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 100, tolerance= 1e-5, method = "secant", min_diff= 1e-6)
def test_secant_maxreach():
x0 = np.array([-3])
x1 = np.array([-3])
functions = [f0]
res = rootfinder.root(x0 = x0, x1 = x1, functions = functions, max_iterations = 2, tolerance= 1e-5, method = "secant", min_diff= 1e-6)
assert res.get('case') == "[FAIL] maximum iteration reached"
def test_check_zero():
a = np.array([1, 2, 5, 0, 7, 12, 0]).reshape(-1,1)
check_a = rootfinder._check_zero(a)
assert not (check_a == 0).any()
def test_prepare_bisection():
funcs = [f0]
x0 = np.array([1])
x0 = x0.reshape(len(funcs),-1)
x0 = np.expand_dims(x0, axis=0)
x0 = np.repeat(x0, len(funcs), axis = 0 )
x1 = np.array([-2])
x1 = x1.reshape(len(funcs),-1)
x1 = np.expand_dims(x1, axis=0)
x1 = np.repeat(x1, len(funcs), axis = 0 )
res1,res2 = rootfinder._prepare_bisection(x0, x1, funcs)
assert res1 == np.array([[[-2]]]) and res2 == np.array([[[1]]])
def test_prepare_bisection_IOerror():
funcs = [f0]
x0 = np.array([1])
x0 = x0.reshape(len(funcs),-1)
x0 = np.expand_dims(x0, axis=0)
x0 = np.repeat(x0, len(funcs), axis = 0 )
x1 = np.array([2])
x1 = x1.reshape(len(funcs),-1)
x1 = np.expand_dims(x1, axis=0)
x1 = np.repeat(x1, len(funcs), axis = 0 )
with pytest.raises(IOError):
rootfinder._prepare_bisection(x0, x1, funcs)
| AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/tests/test_rootfinder.py | test_rootfinder.py |
import numpy as np
from AD_Derivators.functions import autograd, tensor
import pytest
TOL = 1e-9
def input_function1(x):
return x**2+tensor.log(x)- tensor.exp(x)
def input_function2(x,y):
return x**3+tensor.sin(y)*x
def input_function3(x,y):
return x**2+tensor.log(y)- tensor.exp(x)
def input_function4(x,y,z):
return x+y+z-2
def input_function5(x,y,z):
return x*y*z
input1= tensor.Tensor(2)
input2= tensor.Tensor([-1])
input3= tensor.Tensor(np.array([3,1,2]))
ad1 = autograd.AD(2,2,1)
ad2 = autograd.AD(2,3,1)
ad3 = autograd.AD(1,2,3)
def test_build_AD_obj():
ad1 = autograd.AD(1,2,3)
assert ad1.num_functions == 1
assert ad1.num_inputs == 2
assert ad1.shape == (1,2,3)
assert ad1.function is None
assert ad1.jacobian is None
assert ad1.jvp is None
ad2 = autograd.AD(2,2)
assert ad2.num_functions == 2
assert ad2.num_inputs == 2
assert ad2.shape == (2,2,1)
def test_prepare_inputs():
mat_inputs1 = [[1,2],[3,4]]
mat_inputs2 = np.random.rand(2,3)
mat_inputs3 = np.random.rand(1,2,3)
ad1.add_inputs(mat_inputs1)
ad2.add_inputs(mat_inputs2)
ad3.add_inputs(mat_inputs3)
assert ad1.inputs== [[tensor.Tensor(1),tensor.Tensor(2)],\
[tensor.Tensor(3),tensor.Tensor(4)]]
assert ad2.inputs == [[tensor.Tensor(mat_inputs2[0,0]),tensor.Tensor(mat_inputs2[0,1]),tensor.Tensor(mat_inputs2[0,2])],\
[tensor.Tensor(mat_inputs2[1,0]),tensor.Tensor(mat_inputs2[1,1]),tensor.Tensor(mat_inputs2[1,2])]]
assert ad3.inputs == [[tensor.Tensor(mat_inputs3[0,0,:]),tensor.Tensor(mat_inputs3[0,1,:])]]
def test_build_function():
ad1.build_function([input_function2, input_function3])
ad2.build_function([input_function4, input_function5])
a1 = ad1.inputs
x1 = a1[0][0].val[0]
y1 = a1[0][1].val[0]
z1 = a1[1][0].val[0]
h1 = a1[1][1].val[0]
assert (abs(ad1.jacobian - np.array([[3*x1**2+np.sin(y1), x1*np.cos(y1)],[2*z1-np.exp(z1), 1/h1]])) < TOL).all()
def test_build_function_1():
with pytest.raises(TypeError):
ad1.build_function(input_function2, input_function3)
def test_build_function_2():
with pytest.raises(AssertionError):
ad1 = autograd.AD(0,0,0)
ad1.build_function([input_function2, input_function3])
def test_build_function_3():
with pytest.raises(ValueError):
ad1 = autograd.AD(2,2,1)
ad1.build_function([input_function2, input_function3])
def test_run():
seed1 = [[0.2,0.8]]
seed2 = np.array([[1,1]])
res1 = ad1.run(seed1)
assert (res1 == ad1.jvp).all()
assert (res1 == ad1.jacobian@np.array(seed1).T).all()
res2 = ad1.run(seed2)
assert (res2 == ad1.jvp).all()
assert (res2 == ad1.jacobian@seed2.T).all()
| AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/tests/test_autograd.py | test_autograd.py |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 18 14:41:11 2020
@author: Courage
"""
import numpy as np
#import tensor
from AD_Derivators.helper_functions.ad_utils import check_number, check_array, check_list, check_anyzero, check_tan, check_anyneg, isodd
from AD_Derivators.functions import tensor
import pytest
alpha = 2.0
beta = 3.0
a = 2.0
s = "hello"
v1 = tensor.Tensor(val = np.array([0,0,0]))
v2 = tensor.Tensor(val = np.array([np.pi/2,np.pi/2,np.pi/2]))
v3 = tensor.Tensor(val = np.array([1,1,1]))
v4 = tensor.Tensor(val = 1)
v5 = tensor.Tensor(val = np.array([-2]))
v6 = tensor.Tensor(val = np.array([np.pi]))
v7 = tensor.Tensor(val = np.array([1,2,3,4]))
v8 = tensor.Tensor(val = np.array([2,3]))
def test_sin_type_error():
with pytest.raises(TypeError):
tensor.sin(s)
def test_cos_type_error():
with pytest.raises(TypeError):
tensor.cos(s)
def test_tan_type_error():
with pytest.raises(TypeError):
tensor.tan(s)
def test_sin_result_1():
val = tensor.sin(v1).val
der = tensor.sin(v1).der
v_check = [np.sin(0),np.sin(0),np.sin(0)]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_sin_result_2():
val = tensor.sin(v6).val
der = tensor.sin(v6).der
v_check = [np.sin(np.pi)]
der_check = [-1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_sin_result_3():
val = tensor.sin([alpha,beta])
v_check = np.sin([alpha,beta])
assert np.array_equal(val,v_check)
def test_cos_result():
val = tensor.cos(v1).val
der = tensor.cos(v1).der
v_check = [np.cos(0),np.cos(0),np.cos(0)]
der_check = [0,0,0]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_cos_result_1():
val = tensor.cos([alpha,beta])
v_check = np.cos([alpha,beta])
assert np.array_equal(val,v_check)
def test_tan_result():
val = tensor.tan(v1).val
der = tensor.tan(v1).der
v_check = [np.tan(0),np.tan(0),np.tan(0)]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_tan_result_1():
val = tensor.tan([0,0])
v_check = np.tan([0,0])
assert np.array_equal(val,v_check)
def test_tan_undefined():
with pytest.raises(ValueError):
tensor.tan(v2)
def test_tan_undefined_1():
with pytest.raises(ValueError):
tensor.tan([np.pi/2,np.pi/2])
def test_asin_type_error():
with pytest.raises(TypeError):
tensor.asin(s)
def test_acos_type_error():
with pytest.raises(TypeError):
tensor.acos(s)
def test_atan_type_error():
with pytest.raises(TypeError):
tensor.atan(s)
def test_sinh_type_error():
with pytest.raises(TypeError):
tensor.sinh(s)
def test_cosh_type_error():
with pytest.raises(TypeError):
tensor.cosh(s)
def test_tanh_type_error():
with pytest.raises(TypeError):
tensor.tanh(s)
def test_asin_result_1():
val = tensor.asin(v1).val
der = tensor.asin(v1).der
v_check = [0,0,0]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_asin_result_2():
with pytest.raises(ValueError):
tensor.asin(v3)
def test_asin_result_3():
with pytest.raises(ValueError):
tensor.asin(v8)
def test_asin_result_4():
with pytest.raises(ValueError):
tensor.asin(alpha)
def test_asin_result_5():
val = tensor.asin([0,0,0])
v_check = [0,0,0]
assert np.array_equal(val,v_check)
def test_sinh_result():
val = tensor.sinh(v1).val
der = tensor.sinh(v1).der
v_check = [0,0,0]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_sinh_result_1():
val = tensor.sinh([0,0,0])
v_check = [0,0,0]
assert np.array_equal(val,v_check)
def test_acos_result_1():
val = tensor.acos(v1).val
der = tensor.acos(v1).der
v_check = [np.pi/2,np.pi/2,np.pi/2]
der_check = [-1, -1, -1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_acos_result_2():
with pytest.raises(ValueError):
tensor.acos(v3)
def test_acos_result_3():
with pytest.raises(ValueError):
tensor.acos(v8)
def test_acos_result_4():
with pytest.raises(ValueError):
tensor.acos(alpha)
def test_acos_result_5():
val = tensor.acos([0,0,0])
v_check = [np.pi/2,np.pi/2,np.pi/2]
assert np.array_equal(val,v_check)
def test_cosh_result():
val = tensor.cosh(v1).val
der = tensor.cosh(v1).der
v_check = [1,1,1]
der_check = [0,0,0]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_cosh_result_1():
val = tensor.cosh([0,0,0])
v_check = [1,1,1]
assert np.array_equal(val,v_check)
def test_atan_result():
val = tensor.atan(v1).val
der = tensor.atan(v1).der
v_check = [0,0,0]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_atan_result_1():
val = tensor.atan([0,0,0])
v_check = [0,0,0]
assert np.array_equal(val,v_check)
def test_tanh_result():
val = tensor.tanh(v1).val
der = tensor.tanh(v1).der
v_check = [0,0,0]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_tanh_result_1():
val = tensor.tanh([0,0,0])
v_check = [0,0,0]
assert np.array_equal(val,v_check)
def test_exp_result():
val = tensor.exp(v1).val
der = tensor.exp(v1).der
v_check = [1,1,1]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_exp_result_1():
val = tensor.exp([0,0,0])
v_check = [1,1,1]
assert np.array_equal(val,v_check)
def test_exp_base_type():
with pytest.raises(TypeError):
tensor.exp(v1, s)
def test_exp_base_value():
with pytest.raises(ValueError):
tensor.exp(v1, -2)
def test_exp_type_error():
with pytest.raises(TypeError):
tensor.exp(s)
def test_log_result():
val = tensor.log(v3).val
der = tensor.log(v3).der
v_check = [0,0,0]
der_check = [1,1,1]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_log_result_1():
val = tensor.log([1,1,1])
v_check = [0,0,0]
assert np.array_equal(val,v_check)
def test_log_base_type():
with pytest.raises(TypeError):
tensor.log(v3, s)
def test_log_base_value():
with pytest.raises(ValueError):
tensor.log(v3, -2)
def test_log_type_error():
with pytest.raises(TypeError):
tensor.log(s)
def test_log_zero():
with pytest.raises(ValueError):
tensor.log(v1)
def test_log_zero_1():
with pytest.raises(ValueError):
tensor.log(0)
def test_log_neg():
with pytest.raises(ValueError):
tensor.log(v5)
def test_log_neg_1():
with pytest.raises(ValueError):
tensor.log(-1)
def test_sigmoid_result():
val = tensor.sigmoid(v8).val
der = tensor.sigmoid(v8).der
v_check = np.array([1/(1+np.e**(-2)), 1/(1+np.e**(-3))])
der_check = np.array([np.e**(-2)/(1+np.e**(-2))**2, np.e**(-3)/(1+np.e**(-3))**2])
assert np.allclose(val,v_check) and np.allclose(der,der_check)
def test_sigmoid_result1():
with pytest.raises(TypeError):
tensor.sigmoid(s)
def test_sigmoid_result2():
with pytest.raises(TypeError):
tensor.sigmoid(v8, t0 = s)
def test_sigmoid_result3():
with pytest.raises(TypeError):
tensor.sigmoid(v8, L = s)
def test_sigmoid_result4():
with pytest.raises(TypeError):
tensor.sigmoid(v8, k = s)
def test_datatype():
with pytest.raises(TypeError):
tensor.Tensor(s)
def test_sqrt_result():
val = tensor.sqrt(v3).val
der = tensor.sqrt(v3).der
v_check = [1,1,1]
der_check = [0.5,0.5,0.5]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_sqrt_result_1():
val = tensor.sqrt([1,1,1])
v_check = [1,1,1]
assert np.array_equal(val,v_check)
def test_sqrt_result1():
with pytest.raises(TypeError):
tensor.sqrt(s)
def test_sqrt_result2():
with pytest.raises(ValueError):
tensor.sqrt(v5)
def test_sqrt_result3():
with pytest.raises(ValueError):
tensor.sqrt(v1)
def test_sqrt_result4():
with pytest.raises(ValueError):
tensor.sqrt(-1)
def test_add_result():
f = v3 + v3
val = f.val
der = f.der
v_check = [2,2,2]
der_check = [2,2,2]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_add_typeerror():
with pytest.raises(TypeError):
v3 + s
def test_radd_result():
f = v7 + v7
val = f.val
der = f.der
v_check = [2,4,6,8]
der_check = [2,2,2,2]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_radd_typeerror():
with pytest.raises(TypeError):
s + v3
def test_sub_result():
f = v3 - v3
val = f.val
der = f.der
v_check = [0,0,0]
der_check = [0,0,0]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_sub_typeerror():
with pytest.raises(TypeError):
v3 - s
def test_rsub_result():
f = v4 - v4
val = f.val
der = f.der
v_check = [0]
der_check = [0]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_rsub_typeerror():
with pytest.raises(TypeError):
s - v3
def test_mul_result():
f = v3 * v3
val = f.val
der = f.der
v_check = [1,1,1]
der_check = [2,2,2]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_mul_typeerror():
with pytest.raises(TypeError):
v3 * s
def test_rmul_result():
f = v3 * v3
val = f.val
der = f.der
v_check = [1,1,1]
der_check = [2,2,2]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_rmul_typeerror():
with pytest.raises(TypeError):
s * v3
def test_div_result():
f = v3 / v3
val = f.val
der = f.der
v_check = [1,1,1]
der_check = [0,0,0]
assert np.array_equal(val,v_check) and np.array_equal(der,der_check)
def test_div_typeerror():
with pytest.raises(TypeError):
v3 / s
def test_div_zero():
with pytest.raises(ZeroDivisionError):
v3 / v1
def test_rdiv_typeerror():
with pytest.raises(TypeError):
s / v3
def test_pow_posnum_result():
x = tensor.Tensor(a)
f = x**alpha
assert f.val == 4.0 and f.der == 4.0
def test_pow_negnum_result():
x = tensor.Tensor(1)
f = x**(-2)
assert f.val == 1.0 and f.der == -2.0
def test_pow_array_result():
f = v7 ** 2
val = f.val
der = f.der
v_check = [1, 4, 9, 16]
der_check = [2, 4, 6, 8]
assert np.array_equal(val,v_check) and np.array_equal(der, der_check)
def test_pow_result():
x = tensor.Tensor(a)
f = x**x
assert f.val == 4.0 and f.der == 6.772588722239782
def test_pow_typeerror():
with pytest.raises(TypeError):
(tensor.Tensor(a))**s
def test_pow_zero_valueerror():
with pytest.raises(ValueError):
x = tensor.Tensor(np.array([0]))
x**x
def test_pow_neg_valueerror():
with pytest.raises(ValueError):
v5**v5
def test_pow_zeroerror():
with pytest.raises(ZeroDivisionError):
v1**(-2)
def test_pow_zerovalue():
x = tensor.Tensor(0)
f = x**0
assert f.val == 1 and f.der == 0
def test_rpow_num_result():
x = tensor.Tensor(a)
f = alpha**x
assert f.val == 4.0 and f.der == 4 * np.log(2)
def test_rpow_array_result():
f = 3**v8
val = f.val
der = f.der
v_check = [9, 27]
der_check = [9.887510598012987, 29.662531794038966]
assert np.array_equal(val,v_check) and np.array_equal(der, der_check)
def test_rpow_num_valueerror():
with pytest.raises(ValueError):
x = tensor.Tensor(a)
(-3)**x
def test_rpow_array_valueerror():
with pytest.raises(ValueError):
(-3)**v8
def test_rpow_typeerror():
with pytest.raises(TypeError):
s**(tensor.Tensor(a))
def test_rpow_valueerror_zero():
with pytest.raises(ValueError):
x = tensor.Tensor(np.array([0]))
x**x
def test_neg__num_result():
x = tensor.Tensor(2)
f = -x
assert f.val == -2.0 and f.der == -1.0
def test_neg_array_result():
f = -v7
val = f.val
der = f.der
v_check = [-1, -2, -3, -4]
der_check = [-1, -1, -1, -1]
assert np.array_equal(val,v_check) and np.array_equal(der, der_check)
def test_lt_result():
assert (v1 < v3).all()
def test_le_result():
assert (v1 <= v3).all()
assert (v1 <= v1).all()
def test_gt_result():
assert (v3 > v1).all()
def test_ge_result():
assert (v3 >= v1).all()
assert (v3 >= v3).all()
def test_eq_result():
assert v3 == v3
def test_ne_result():
assert v3 != v1 | AD-Derivators | /AD_Derivators-0.0.2-py3-none-any.whl/tests/test_tensor.py | test_tensor.py |
cs107 Final Project - Group 28: Byte Me
=====
Members
-----
- Luo, Yanqi
- Pasco, Paolo
- Ayouch, Rayane
- Clifton, Logan
## TravisCI badge
[](https://app.travis-ci.com/cs107-byteme/cs107-FinalProject)
## Codecov badge
[](https://codecov.io/gh/cs107-byteme/cs107-FinalProject)
## Broader Impact and Inclusivity Statement
Our automatic differentiation library can lead to a number of benefits in a variety of fields. Since automatic differentiation can be used in fields like physics, statistics, and machine learning, our package has the potential to accelerate advances in those fields by facilitating the key process of automatic differentiation. One potential impact to be wary of is the fact that while the package can be used for many important scientific methods, we don't have much control over how the package is used on the user end. This means that the burden is on the user to use our package responsibly (i.e., if the user is using this package for machine learning, it's important to check that the input data is free of biases or error. Otherwise, this package will be used to perpetuate the biases present in the training data). Ultimately, this package is a tool, and like all tools, it's helpful for its presumed purpose but can be misused.
In addition, we've worked to make this package as accessible and easy to understand as possible, making its use approachable for anyone regardless of programming/mathematical experience. Our workflow for this project has also been inclusive—all pull requests are reviewed by other members of the team, giving everyone a hand in contributing to the code. The biggest barrier to access (both on the contributing side and the implementation side) is getting started. While we've attempted to document the code and make it intuitive enough that any user can get started with our package, the fact remains that the steps needed to install/start contributing to the package (creating a virtual environment, or getting started with git/test suites/version control), while not impossible to learn, can be intimidating to someone without a certain level of programming experience. Our hope is that these steps are easy enough to research that this intimidation can be overcome, but we do recognize that for underrepresented populations without as much access to CS education, an intimidating first step can be enough to dissuade someone from pursuing a project further. | AD-byteme | /AD-byteme-0.1.5.tar.gz/AD-byteme-0.1.5/README.md | README.md |
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as file:
long_description = file.read()
setup(
name = 'AD-byteme', # How you named your package folder (MyLib)
packages = ['AD-byteme'], # Chose the same as "name"
version = '0.1.5', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Automatic differentiation library', # Give a short description about your library
author = 'CS107 Group 28: ByteMe', # Type in your name
author_email = 'gcpasco@college.harvard.edu', # Type in your E-Mail
#long_description = long_description,
#long_description_content_type = "text/markdown",
url = 'https://github.com/cs107-byteme/cs107-FinalProject', # Provide either the link to your github or to your website
download_url = 'https://github.com/cs107-byteme/cs107-FinalProject/archive/refs/tags/alpha.tar.gz',
keywords = ['AUTOMATIC DIFFERENTIATION'], # Keywords that define your package best
install_requires=[ # All of our dependencies
'numpy'
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which python versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | AD-byteme | /AD-byteme-0.1.5.tar.gz/AD-byteme-0.1.5/setup.py | setup.py |
from ADbase import *
import numpy as np
import math
import pytest
'''
For this milestone, we only consider scalar input, so for the name attribute here, we just store the name of 'self' instance after the operations,
and use default name attribute in the test file, which would be all 'x', but we will improve this part for next milestone.
For the next milestone, we will store all the sorted unique variable names, like ['x','y','z'] and calculate their corresponding derivatives, like [[1,0,0],[0,1,0],[0,0,1]].
'''
# Testing inputs
def test_invalid_der():
with pytest.raises(TypeError):
x = AD(0, 'hello', 'x')
def test_invalid_val():
with pytest.raises(TypeError):
x = AD('hello', 1, 'x')
def test_invalid_name():
with pytest.raises(TypeError):
x = AD(0 , 1, 1)
# Testing __str__
def test_str():
x = AD(0 , 1, 'x')
assert x.__str__() == 'Value is:0, derivative is:1, Name is:x'
# Testing __add__
def test_add():
#add constant
x = AD(0, 1, 'x')
z = x + 2
assert z.value == 2
assert z.deriv == 1
assert z.name == 'x'
#add AD
y = AD(2, 1)
k = x + y
assert k.value == 2
assert k.deriv == 2
assert k.name == 'x'
#type error
with pytest.raises(TypeError):
x+'s'
# Testing __radd__
def test_radd():
#add constant
x = AD(0, 1, 'x')
z = 2 + x
assert z.value == 2
assert z.deriv == 1
assert z.name == 'x'
#add AD
y = AD(2, 1)
k = y + x
assert k.value == 2
assert k.deriv == 2
assert k.name == 'x'
#type error
with pytest.raises(TypeError):
's'+ x
# Testing __mul__
def test_mul():
#multiply constant
x = AD(1, 1, 'x')
z = x * 3
assert z.value == 3
assert z.deriv == 3
assert z.name == 'x'
#multiply AD
y = AD(2, 1)
k = x * y
assert k.value == 2
assert k.deriv == 3
assert k.name == 'x'
#type error
with pytest.raises(TypeError):
x*'s'
#situation1
m = x * y + 4
assert m.value == 6
assert m.deriv == 3
assert m.name == 'x'
# Testing __rmul__
def test_rmul():
#multiply constant
x = AD(1, 1, 'x')
z = 3 * x
assert z.value == 3
assert z.deriv == 3
assert z.name == 'x'
#multiply AD
y = AD(2, 1)
k = y * x
assert k.value == 2
assert k.deriv == 3
assert k.name == 'x'
#type error
with pytest.raises(TypeError):
's'*x
# Testing __neg__
def test_neg():
x = AD(1, 1, 'x')
z = -x
assert z.value == -1
assert z.deriv == -1
assert z.name == 'x'
# Testing __sub__
def test_sub():
#sub constant
x = AD(0, 1, 'x')
z = x - 2
assert z.value == -2
assert z.deriv == 1
assert z.name == 'x'
#sub AD
y = AD(2, 1)
k = x - y
assert k.value == -2
assert k.deriv == 0
assert k.name == 'x'
#type error
with pytest.raises(TypeError):
x - 's'
# Testing __rsub__
def test_rsub():
#sub constant
x = AD(0, 1, 'x')
z = 2 -x
assert z.value == 2
assert z.deriv == -1
assert z.name == 'x'
#sub AD
y = AD(2, 1)
k = y-x
assert k.value == 2
assert k.deriv == 0
assert k.name == 'x'
#type error
with pytest.raises(TypeError):
's' -x
# Testing __truediv__
def test_truediv():
#truediv constant
x = AD(2, 1, 'x')
z = x / 2
assert z.value == 1
assert z.deriv == 1/2
assert z.name == 'x'
#truediv AD
y = AD(2, 1)
k = x / y
assert k.value == 1
assert k.deriv == 0
assert k.name == 'x'
#divisor is 0
with pytest.raises(ZeroDivisionError):
x / 0
m = AD(0, 1)
with pytest.raises(ZeroDivisionError):
x / m
#type error
with pytest.raises(TypeError):
x / 's'
# Testing __rtruediv__
def test_rtruediv():
#truediv constant
x = AD(2, 1, 'x')
z = 2/x
assert z.value == 1
assert z.deriv == -1/2
assert z.name == 'x'
#truediv AD
y = AD(2, 1)
k = y/x
assert k.value == 1
assert k.deriv == 0
assert k.name == 'x'
#divisor is 0
m = AD(0, 1)
with pytest.raises(ZeroDivisionError):
2 / m
with pytest.raises(ZeroDivisionError):
x / m
#type error
with pytest.raises(TypeError):
's' / x
# Testing __pow__
def test_pow():
# pow constant
x = AD(2, 1, 'x')
z = x **2
assert z.value == 4
assert z.deriv == 4
assert z.name == 'x'
y = AD(-1, 1)
with pytest.raises(ValueError):
y ** (0.5)
k= AD(0, 1, 'k')
with pytest.raises(ZeroDivisionError):
k ** (0)
with pytest.raises(ZeroDivisionError):
k ** (1)
with pytest.raises(ZeroDivisionError):
k ** (-1)
# pow AD
m = AD(0.5, 1)
with pytest.raises(ValueError):
y ** m
with pytest.raises(ZeroDivisionError):
k ** y
with pytest.raises(ZeroDivisionError):
k ** k
l = x **x
assert l.value == 4
assert l.deriv == (1*np.log(2) + 1)*4
assert l.name == 'x'
#type error
with pytest.raises(TypeError):
x**('s')
# Testing __rpow__
def test_rpow():
# rpow constant
x = AD(2, 1, 'x')
z = 2 **x
assert z.value == 4
assert z.deriv == 4 * np.log(2)
assert z.name == 'x'
with pytest.raises(ValueError):
(-3) ** x
y = AD(-1, 1)
with pytest.raises(ZeroDivisionError):
0 ** (y)
k= AD(0, 1)
with pytest.raises(ZeroDivisionError):
0 ** (k)
#type error
with pytest.raises(TypeError):
('s')** x
# Testing __sin__
def test_sin():
x = AD(0.5, 1, 'x')
z = x.sin()
assert z.value == np.sin(0.5)
assert z.deriv == np.cos(0.5)
# Testing __sinh__
def test_sinh():
x = AD(0.5, 1, 'x')
z = x.sinh()
assert z.value == np.sinh(0.5)
assert z.deriv == np.cosh(0.5)
# Testing __arcsin__
def test_arcsin():
x = AD(0.5, 1, 'x')
z = x.arcsin()
assert z.value == np.arcsin(0.5)
assert z.deriv == ((1 - 0.5 ** 2) ** (-0.5))
k= AD(-2, 1)
with pytest.raises(ValueError):
k.arcsin()
# Testing __cos__
def test_cos():
x = AD(0.5, 1, 'x')
z = x.cos()
assert z.value == np.cos(0.5)
assert z.deriv == -np.sin(0.5)
# Testing __cosh__
def test_cosh():
x = AD(0.5, 1, 'x')
z = x.cosh()
assert z.value == np.cosh(0.5)
assert z.deriv == np.sinh(0.5)
# Testing __arccos__
def test_arccos():
x = AD(0.5, 1, 'x')
z = x.arccos()
assert z.value == np.arccos(0.5)
assert z.deriv == - ((1 - 0.5 ** 2) ** (-0.5))
k= AD(-2, 1)
with pytest.raises(ValueError):
k.arccos()
# Testing __tan__
def test_tan():
x = AD(0.5, 1, 'x')
z = x.tan()
assert z.value == np.tan(0.5)
assert z.deriv == 1 / np.power(np.cos(0.5), 2)
k= AD(1.5*np.pi, 1)
with pytest.raises(ValueError):
k.tan()
# Testing __tanh__
def test_tanh():
x = AD(0.5, 1, 'x')
z = x.tanh()
assert z.value == np.tanh(0.5)
assert z.deriv == 1 - (np.tanh(0.5))**2
# Testing __arctan__
def test_arctan():
x = AD(0.5, 1, 'x')
z = x.arctan()
assert z.value == np.arctan(0.5)
assert z.deriv == (1 + 0.5 ** 2) ** (-1)
# Testing __exp__
def test_exp():
x = AD(0.5, 1, 'x')
z = x.exp()
assert z.value == np.exp(0.5)
assert z.deriv == np.exp(0.5)
# Testing __ln__
def test_ln():
x = AD(0.5, 1, 'x')
z = x.ln()
assert z.value == np.log(0.5)
assert z.deriv == 2
k= AD(-3, 1)
with pytest.raises(ValueError):
k.ln()
# Testing __ln_base__
def test_ln_base():
x = AD(0.5, 1, 'x')
z = x.ln_base(2)
assert z.value == math.log(0.5,2)
assert z.deriv == 2 / np.log(2)
k= AD(-3, 1)
with pytest.raises(ValueError):
k.ln_base(2)
# Testing __sqrt__
def test_sqrt():
x = AD(0.5, 1, 'x')
z = x.sqrt()
assert z.value == np.sqrt(0.5)
assert z.deriv == 0.5 * 0.5 **(-0.5)
# Testing __logistic__
def test_logistic():
x = AD(0.5, 1, 'x')
z = x.logistic()
assert z.value == 1 / (1 + np.exp(-0.5))
assert z.deriv == np.exp(-0.5) / ((1 + np.exp(-0.5)) ** 2)
| AD-byteme | /AD-byteme-0.1.5.tar.gz/AD-byteme-0.1.5/test/test_ADbase.py | test_ADbase.py |
# CS207 Final Project Repository
[](https://travis-ci.com/cs207-f18-WIRS/cs207-FinalProject)
[](https://coveralls.io/github/cs207-f18-WIRS/cs207-FinalProject?branch=master)
This repository contains the Final Project Deliverable on Automatic Differentiation for the Harvard Course:
"CS 207: Systems Development for Computational Science"
- [```Github Repository```](https://github.com/cs207-f18-WIRS/cs207-FinalProject)
- [```PyPi Python Package Index distribution: 'AD-cs207'```](https://pypi.org/project/AD-cs207/)
## Documentation can be found at [```docs```](https://github.com/cs207-f18-WIRS/cs207-FinalProject/tree/master/docs):
- [```docs/Final.ipynb:```](https://github.com/cs207-f18-WIRS/cs207-FinalProject/blob/master/docs/Final.ipynb) Automatic Differentiation package documentation.
- [```demo/Demo and Presentation Project CS207.ipynb:```](https://github.com/cs207-f18-WIRS/cs207-FinalProject/blob/master/demo/Demo%20and%20Presentation%20Project%20CS207.ipynb) Automatic Differentiation package how to install, demo and final presentation.
- [```docs/How-to-package.md:```](https://github.com/cs207-f18-WIRS/cs207-FinalProject/blob/master/docs/How-to-package.md) Explanation how the package was distributed.
- [```Course Project discription```](https://iacs-cs-207.github.io/cs207-F18/project.html) : Overview of the instruction on the project.
## Course information:
- [```Main course website```](https://iacs-cs-207.github.io/cs207-F18/) : Check this site for all course-related policies including the syllabus, course schedule, and project policies.
- [```GitHub Repo```](https://github.com/IACS-CS-207/cs207-F18) : All course materials will be released on GitHub.
## Contributors (alphabetic):
- FELDHAUS Isabelle
- JIANG Shenghao
- STRUYVEN Robbert
- WANG William
| AD-cs207 | /AD-cs207-1.0.0.tar.gz/AD-cs207-1.0.0/README.md | README.md |
import setuptools
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='AD-cs207',
version='1.0.0',
author='Shenghao Jiang, Isabelle Feldhaus, Robbert Struyven, William Wang',
author_email=" ",
description='Automatic Differentiation Package',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[ 'sympy>=1.3' ],
packages=setuptools.find_packages(),
keywords=['Automatic differentiation', 'gradients', 'Python'],
url='https://github.com/cs207-f18-WIRS/cs207-FinalProject',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
)
| AD-cs207 | /AD-cs207-1.0.0.tar.gz/AD-cs207-1.0.0/setup.py | setup.py |
import math
class FD:
""" implementation of forward AD using dual numbers """
def __init__(self, string, value, d_seed):
self.value = value
self.dual = d_seed
self.string = string
def __str__(self):
""" returns the string value of the function """
return self.string
def grad(self):
""" returns the derivative of the function based on seed """
return self.dual
""" Implementation of operators using operator overloading """
def __add__(self, other):
n = str(self) + "+" + str(other)
if not isinstance(other, FD):
z = FD(n, self.value + other, self.dual)
return z
z = FD(n, self.value + other.value, self.dual + other.dual)
return z
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
n = "(" + str(self) + ")" + "-(" + str(other) + ")"
if not isinstance(other, FD):
z = FD(n, self.value - other, self.dual)
return z
z = FD(n, self.value - other.value, self.dual - other.dual)
return z
def __rsub__(self, other):
n = str(other) + "-(" + str(self) + ")"
z = FD(n, other - self.value, -self.dual)
return z
def __mul__(self, other):
n = "(" + str(self) + ")" + "*(" + str(other) + ")"
if not isinstance(other, FD):
z = FD(n, self.value * other, self.dual*other)
return z
z = FD(n, self.value * other.value, self.value*other.dual + self.dual*other.value)
return z
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
n = "(" + str(self) + ")" + "/(" + str(other) + ")"
if not isinstance(other, FD):
z = FD(n, self.value / other, self.dual/other)
return z
z = FD(n, self.value / other.value, (other.value*self.dual - self.value*other.dual)/(other.value**2))
return z
def __rtruediv__(self, other):
n = str(other) + "/" + str(self)
z = FD(n, other / self.value, -other*self.dual / self.value**2)
return z
def __pow__(self, other):
n = "POW(" + str(self) + "," + str(other) + ")"
if not isinstance(other, FD):
z = FD(n, self.value ** other, other*self.value**(other-1)*self.dual)
return z
nd = (self.value**other.value) * ((other.value/self.value*self.dual) + (other.dual * math.log(self.value)))
z = FD(n, self.value ** other.value, nd)
return z
def __rpow__(self, other):
n = "POW(" + str(other) + "," + str(self) + ")"
z = FD(n, other ** self.value, self.dual*math.log(other)*other**self.value)
return z
""" implement unary operations for forward div """
def sin(x):
if not isinstance(x, FD):
return math.sin(x)
n = "SIN(" + str(x) + ")"
z = FD(n, math.sin(x.value), x.dual*math.cos(x.value))
return z
def cos(x):
if not isinstance(x, FD):
return math.cos(x)
n = "COS(" + str(x) + ")"
z = FD(n, math.cos(x.value), -x.dual*math.sin(x.value))
return z
def tan(x):
if not isinstance(x, FD):
return math.tan(x)
n = "TAN(" + str(x) + ")"
z = FD(n, math.tan(x.value), x.dual/(math.cos(x.value)**2))
return z
def ln(x):
if not isinstance(x, FD):
return math.log(x)
n = "ln(" + str(x) + ")"
z = FD(n, math.log(x.value), x.dual/x.value)
return z
def log(x, base):
if not isinstance(x, FD):
return math.log(x,base)
n = "log(" + str(x) + ")/log(" + str(base) + ")"
z = FD(n, math.log(x.value)/math.log(base), x.dual/(x.value*math.log(base)) )
return z
def arcsin(x):
if not isinstance(x, FD):
return math.asin(x)
n = "arcsin(" + str(x) + ")"
z = FD(n, math.asin(x.value), x.dual/math.sqrt(1.0-x.value**2))
return z
def arccos(x):
if not isinstance(x, FD):
return math.acos(x)
n = "arccos(" + str(x) + ")"
z = FD(n, math.acos(x.value), -1.0*x.dual/math.sqrt(1.0-x.value**2))
return z
def arctan(x):
if not isinstance(x, FD):
return math.atan(x)
n = "arctan(" + str(x) + ")"
z = FD(n, math.atan(x.value), x.dual/(1.0+x.value**2))
return z
def sinh(x):
if not isinstance(x, FD):
return math.sinh(x)
n = "sinh(" + str(x) + ")"
z = FD(n, math.sinh(x.value), x.dual*math.cosh(x.value))
return z
def cosh(x):
if not isinstance(x, FD):
return math.cosh(x)
n = "cosh(" + str(x) + ")"
z = FD(n, math.cosh(x.value), x.dual*math.sinh(x.value))
return z
def tanh(x):
if not isinstance(x, FD):
return math.tanh(x)
n = "tanh(" + str(x) + ")"
z = FD(n, math.tanh(x.value), x.dual*(1.0-math.tanh(x.value)**2))
return z
def sqrt(x):
if not isinstance(x, FD):
return math.sqrt(x)
n = "sqrt(" + str(x) + ")"
z = FD(n, math.sqrt(x.value), 0.5*x.dual/math.sqrt(x.value) )
return z
| AD-cs207 | /AD-cs207-1.0.0.tar.gz/AD-cs207-1.0.0/AD/for_ad.py | for_ad.py |
import math
print('Importing Reverse Mode Automatic Differentiation')
class Var:
"""
Var class used for reverse AD
If derivative doesn't exist at a point for one of the variables, forward AD should be used
"""
def __init__(self, name, value):
self.value = value
self.children = []
self.grad_value = None
self.name = name
def __str__(self):
""" returns the string of the formula """
return self.name
def grad(self):
""" returns the gradient of the formula with respect to the variable """
if self.grad_value is None:
self.grad_value = sum(val * var.grad() for val, var in self.children)
return self.grad_value
def __add__(self, other):
""" adds the vars, returns a new formula and appens children to the variables """
n = str(self) + "+" + str(other)
if not isinstance(other, Var):
z = Var(n, self.value + other)
self.children.append((1.0, z))
return z
z = Var(n, self.value + other.value)
other.children.append((1.0, z))
self.children.append((1.0, z))
return z
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
""" subtracts the vars, returns a new formula and appens children to the variables """
n = "(" + str(self) + ")" + "-(" + str(other) + ")"
if not isinstance(other, Var):
z = Var(n, self.value - other)
self.children.append((1.0, z))
return z
z = Var(n, self.value - other.value)
self.children.append((1.0, z))
other.children.append((-1.0, z))
return z
def __rsub__(self, other):
n = str(other) + "-(" + str(self) + ")"
z = Var(n, other - self.value)
self.children.append((-1.0, z))
return z
def __mul__(self, other):
""" multiply the vars, returns a new formula and appens children to the variables """
n = "(" + str(self) + ")" + "*(" + str(other) + ")"
if not isinstance(other, Var):
z = Var(n, self.value * other)
self.children.append((other, z))
return z
z = Var(n, self.value * other.value)
self.children.append((other.value, z))
other.children.append((self.value, z))
return z
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
""" divides the vars, returns a new formula and appens children to the variables """
n = "(" + str(self) + ")" + "/(" + str(other) + ")"
if not isinstance(other, Var):
z = Var(n, self.value / other)
self.children.append((1/other, z))
return z
z = Var(n, self.value / other.value)
self.children.append((1/other.value, z))
other.children.append((-self.value/other.value**2, z))
return z
def __rtruediv__(self, other):
n = str(other) + "/" + str(self)
z = Var(n, other / self.value)
self.children.append((-other/self.value**2, z))
return z
def __pow__(self, other):
""" exponentiates the vars, returns a new formula and appens children to the variables """
n = "POW(" + str(self) + "," + str(other) + ")"
if not isinstance(other, Var):
z = Var(n, self.value ** other)
self.children.append((other*self.value**(other-1), z))
return z
z = Var(n, self.value ** other.value)
self.children.append((other.value*self.value**(other.value-1), z))
other.children.append((math.log(self.value)*self.value**other.value,z))
return z
def __rpow__(self, other):
n = "POW(" + str(other) + "," + str(self) + ")"
z = Var(n, other ** self.value)
self.children.append((math.log(other)*other**self.value,z))
return z
def sin(x):
""" calculates sin of the formula/var x """
if not isinstance(x, Var):
print('Bingo')
return math.sin(x)
n = "sin(" + str(x) + ")"
z = Var(n, math.sin(x.value))
x.children.append((math.cos(x.value), z))
return z
def cos(x):
""" calculates cos of the formula/var x """
if not isinstance(x, Var):
return math.cos(x)
n = "cos(" + str(x) + ")"
z = Var(n, math.cos(x.value))
x.children.append((-math.sin(x.value), z))
return z
def tan(x):
""" calculates tan of the formula/var x """
if not isinstance(x, Var):
return math.tan(x)
n = "tan(" + str(x) + ")"
z = Var(n, math.tan(x.value))
x.children.append((1.0+math.tan(x.value)**2, z))
return z
def sqrt(x):
""" calculates sqrt of the formula/var x """
if not isinstance(x, Var):
return math.sqrt(x)
n = "sqrt(" + str(x) + ")"
z = Var(n, math.sqrt(x.value))
x.children.append((0.5/(x.value)**0.5, z))
return z
def ln(x):
""" calculates ln of the formula/var x """
if not isinstance(x, Var):
return math.log(x)
n = "ln(" + str(x) + ")"
z = Var(n, math.log(x.value))
x.children.append((1.0/x.value, z))
return z
def log(x, base):
""" calculates log(x, base) of the formula/var x """
if not isinstance(x, Var):
return math.log(x)/math.log(base)
n = "ln(" + str(x) + ")/ln(" + str(base) + ")"
z = Var(n, math.log(x.value,base))
x.children.append((1.0/(x.value*math.log(base)), z))
return z
def arcsin(x):
""" calculates arcsin of the formula/var x """
if not isinstance(x, Var):
return math.asin(x)
n = "arcsin(" + str(x) + ")"
z = Var(n, math.asin(x.value))
x.children.append((1.0/math.sqrt(1.0-x.value**2), z))
return z
def arccos(x):
""" calculates arccos of the formula/var x """
if not isinstance(x, Var):
return math.acos(x)
n = "arccos(" + str(x) + ")"
z = Var(n, math.acos(x.value))
x.children.append((-1.0/math.sqrt(1.0-x.value**2), z))
return z
def arctan(x):
""" calculates arctan of the formula/var x """
if not isinstance(x, Var):
return math.atan(x)
n = "arctan(" + str(x) + ")"
z = Var(n, math.atan(x.value))
x.children.append((1.0/(1.0+x.value**2), z))
return z
def sinh(x):
""" calculates sinh of the formula/var x """
if not isinstance(x, Var):
return math.sinh(x)
n = "sinh(" + str(x) + ")"
z = Var(n, math.sinh(x.value))
x.children.append((math.cosh(x.value), z))
return z
def cosh(x):
""" calculates cosh of the formula/var x """
if not isinstance(x, Var):
return math.cosh(x)
n = "cosh(" + str(x) + ")"
z = Var(n, math.cosh(x.value))
x.children.append((math.sinh(x.value), z))
return z
def tanh(x):
""" calculates tanh of the formula/var x """
if not isinstance(x, Var):
return math.tanh(x)
n = "tanh(" + str(x) + ")"
z = Var(n, math.tanh(x.value))
x.children.append((1.0-math.tanh(x.value)**2, z))
return z
| AD-cs207 | /AD-cs207-1.0.0.tar.gz/AD-cs207-1.0.0/AD/rev_ad.py | rev_ad.py |
import AD.interpreter as ast
import sympy
class SD():
"""
User friendly interface for the AST interpreter.
"""
def __init__(self, frmla):
self.formula = frmla
self.lexer = ast.Lexer(frmla)
self.parser = ast.Parser(self.lexer)
self.interpreter = ast.Interpreter(self.parser)
self.vd = None
def set_point(self, vd):
"""
sets the point to derive at
"""
if vd is not None:
self.vd = vd
if self.vd is None:
raise NameError("Must set point to evaluate")
def diff(self, dv, vd=None, order=1):
"""
returns numeric derivative with respect to variable dv
vd is used to set a new point
order is the order of the derivative to take
"""
self.set_point(vd)
new_interpreter = self.interpreter
for i in range(order-1):
new_frmla = new_interpreter.symbolic_diff(self.vd, dv)
new_lexer = ast.Lexer(new_frmla)
new_parser = ast.Parser(new_lexer)
new_interpreter = ast.Interpreter(new_parser)
return new_interpreter.differentiate(self.vd, dv)
def symbolic_diff(self, dv, vd=None, order=1, output='default'):
"""
returns symbolic derivative with respect to variable dv
vd is used to set a new point
order is the order of the derivative to take
"""
self.set_point(vd)
new_interpreter = self.interpreter
for i in range(order-1):
new_frmla = new_interpreter.symbolic_diff(self.vd, dv)
new_lexer = ast.Lexer(new_frmla)
new_parser = ast.Parser(new_lexer)
new_interpreter = ast.Interpreter(new_parser)
formul = new_interpreter.symbolic_diff(self.vd, dv)
simplified = self.symplify(formul, output)
return simplified
def diff_all(self, vd=None):
"""
returns numeric derivative of all variables
"""
self.set_point(vd)
return self.interpreter.diff_all(self.vd)
def val(self, vd=None):
"""
returns the value of the function at the point
"""
self.set_point(vd)
return self.interpreter.interpret(self.vd)
def new_formula(self, frmla):
"""
sets a new formula for the object
"""
self.formula = frmla
self.lexer = ast.Lexer(frmla)
self.parser = ast.Parser(self.lexer)
self.interpreter = ast.Interpreter(self.parser)
self.vd = None
def symplify(self, formul, output):
""" simplifies a formula string, output changes output format """
def POW(a, b):
return a ** b
def EXP(a):
return sympy.exp(a)
def LOG(a):
return sympy.log(a)
def COS(a):
return sympy.cos(a)
def SIN(a):
return sympy.sin(a)
def TAN(a): # Tangent Function
return sympy.tan(a)
def SINH(a): # Inverse trigonometric functions: inverse sine or arcsine
return sympy.sinh(a)
def COSH(a): # Inverse trigonometric functions: inverse cosine or arccosine
return sympy.cosh(a)
def TANH(a): # Inverse trigonometric functions: inverse tangent or arctangent
return sympy.tanh(a)
def ARCSIN(a): # Inverse trigonometric functions: inverse sine or arcsine
return sympy.asin(a)
def ARCCOS(a): # Inverse trigonometric functions: inverse cosine or arccosine
return sympy.acos(a)
def ARCTAN(a): # Inverse trigonometric functions: inverse tangent or arctangent
return sympy.atan(a)
string_for_sympy=""
string_for_sympy2=""
split_variables=self.vd.split(",")
for var in split_variables:
l=var.split(":")
string_for_sympy=string_for_sympy+l[0]+" "
string_for_sympy2=string_for_sympy2+l[0]+", "
exec(string_for_sympy2[:-2] + "= sympy.symbols('" + string_for_sympy+ "')")
if output == 'default':
return sympy.simplify(eval(formul))
if output == 'latex':
return sympy.latex(sympy.simplify(eval(formul)))
if output == 'pretty':
sympy.pprint(sympy.simplify(eval(formul)))
return sympy.simplify(eval(formul))
if output == 'all':
print('\nSymbolic differentiation result:')
print(formul)
print('\nSimplified Pretty Print:\n') ; sympy.pprint(sympy.simplify(eval(formul)))
print('\nSimplified Latex code:')
print(sympy.latex(sympy.simplify(eval(formul))))
print('\nSimplified Default:')
print(sympy.simplify(eval(formul)),'\n')
return sympy.simplify(eval(formul))
| AD-cs207 | /AD-cs207-1.0.0.tar.gz/AD-cs207-1.0.0/AD/symdif.py | symdif.py |
""" SPI - Simple Pascal Interpreter """
import copy
import math
import unicodedata
###############################################################################
# #
# LEXER #
# #
###############################################################################
# Token types
#
# EOF (end-of-file) token is used to indicate that
# there is no more input left for lexical analysis
INTEGER, PLUS, MINUS, MUL, DIV, LPAREN, RPAREN, EOF, VAR, COS, SIN, EXP,POW, LOG, COMMA, TAN, ARCSIN, ARCCOS, ARCTAN, SINH, COSH, TANH = (
'INTEGER', 'PLUS', 'MINUS', 'MUL', 'DIV', '(', ')', 'EOF', 'VAR', 'COS', 'SIN', 'EXP', 'POW', 'LOG', ',', 'TAN', 'ARCSIN', 'ARCCOS', 'ARCTAN', 'SINH', 'COSH', 'TANH'
)
def is_number(s):
""" checks if passed in variable is a float """
try:
float(s)
return True
except:
pass
return False
# Inputted strings are broken down into tokens by Lexer
class Token(object):
def __init__(self, type, value):
self.type = type
self.value = value
# Lexer takes a string and parses it into tokens
class Lexer(object):
def __init__(self, text):
# client string input, e.g. "4 + 2 * 3 - 6 / 2"
self.text = text
# self.pos is an index into self.text
self.pos = 0
self.current_char = self.text[self.pos]
def error(self):
raise NameError('Invalid character')
def advance(self):
"""Advance the `pos` pointer and set the `current_char` variable."""
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def skip_whitespace(self):
""" Skips any spaces """
while self.current_char is not None and self.current_char.isspace():
self.advance()
def integer(self):
"""Return a (multidigit) float consumed from the input."""
index = 1
cur = self.text[self.pos:self.pos+index]
while(True):
rem = len(self.text) - self.pos - index
if rem > 2:
a = cur + self.text[self.pos+index:self.pos+index+1]
b = cur + self.text[self.pos+index:self.pos+index+2]
c = cur + self.text[self.pos+index:self.pos+index+3]
elif rem > 1:
a = cur + self.text[self.pos+index:self.pos+index+1]
b = cur + self.text[self.pos+index:self.pos+index+2]
c = None
elif rem > 0:
a = cur + self.text[self.pos+index:self.pos+index+1]
b = None
c = None
else:
while index > 0:
self.advance()
index -= 1
return float(cur)
if is_number(c):
# handles 1e-1
cur = c
index += 3
elif is_number(b):
# handles 1e1 / 1.1
cur = b
index += 2
elif is_number(a):
cur = a
index += 1
else:
while index > 0:
self.advance()
index -= 1
return float(cur)
def word(self):
"""Return a multichar integer consumed from the input."""
result = ''
while self.current_char is not None and self.current_char.isalpha():
result += self.current_char
self.advance()
return result
def get_next_token(self):
"""Lexical analyzer (also known as scanner or tokenizer)
This method is responsible for breaking a sentence
apart into tokens. One token at a time.
"""
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isdigit() or self.current_char == ".":
return Token(INTEGER, self.integer())
# parse constants and constants
if self.current_char.isalpha():
wo = self.word()
w = wo.upper()
if(w == "E"):
return Token(INTEGER, math.e)
elif(w == "PI"):
return Token(INTEGER, math.pi)
elif(w == "COS"):
return Token(COS, self.word())
elif(w == "SIN"):
return Token(SIN, self.word())
elif(w == "EXP"):
return Token(EXP, self.word())
elif(w == "POW"):
return Token(POW, self.word())
elif(w == "LOG"):
return Token(LOG, self.word())
elif(w == "TAN"):
return Token(TAN, self.word())
elif(w == "ARCSIN"):
return Token(ARCSIN, self.word())
elif(w == "ARCCOS"):
return Token(ARCCOS, self.word())
elif(w == "ARCTAN"):
return Token(ARCTAN, self.word())
elif(w == "SINH"):
return Token(SINH, self.word())
elif(w == "COSH"):
return Token(COSH, self.word())
elif(w == "TANH"):
return Token(TANH, self.word())
else:
return Token(VAR, wo)
if self.current_char == '+':
self.advance()
return Token(PLUS, '+')
if self.current_char == '-':
self.advance()
return Token(MINUS, '-')
if self.current_char == '*':
self.advance()
return Token(MUL, '*')
if self.current_char == '/':
self.advance()
return Token(DIV, '/')
if self.current_char == '(':
self.advance()
return Token(LPAREN, '(')
if self.current_char == ')':
self.advance()
return Token(RPAREN, ')')
if self.current_char == ',':
self.advance()
return Token(COMMA, ',')
self.error()
return Token(EOF, None)
###############################################################################
# #
# PARSER #
# #
###############################################################################
# AST objects combine tokens into an abstract syntax tree
class AST(object):
pass
class BinOp(AST):
def __init__(self, left, op, right):
self.left = left
self.token = self.op = op
self.right = right
class Num(AST):
def __init__(self, token):
self.token = token
self.value = token.value
class Var(AST):
def __init__(self, token):
self.token = token
self.name = token.value
class UnaryOp(AST):
def __init__(self, op, expr):
self.token = self.op = op
self.expr = expr
# parses tokens generated by a lexer to create an abstract syntax tree
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
# set current token to the first token taken from the input
self.current_token = self.lexer.get_next_token()
def error(self):
raise NameError('Invalid syntax')
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
if self.current_token.type == token_type:
self.current_token = self.lexer.get_next_token()
else:
self.error()
# parses "factors" which are defined using the context free grammer in the docstring
def factor(self):
"""factor : (PLUS | MINUS) factor | INTEGER | VAR | LPAREN expr RPAREN"""
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
node = UnaryOp(token, self.factor())
return node
elif token.type == MINUS:
self.eat(MINUS)
node = UnaryOp(token, self.factor())
return node
elif token.type == INTEGER:
self.eat(INTEGER)
return Num(token)
elif token.type == VAR:
self.eat(VAR)
return Var(token)
elif token.type == COS:
self.eat(COS)
self.eat(LPAREN)
x = self.expr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node
elif token.type == SIN:
self.eat(SIN)
self.eat(LPAREN)
x = self.expr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node
elif token.type == EXP:
self.eat(EXP)
self.eat(LPAREN)
x = self.expr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node
elif token.type == POW:
self.eat(POW)
self.eat(LPAREN)
x = self.expr()
self.eat(COMMA)
y = self.expr()
self.eat(RPAREN)
return BinOp(left = x, op = token, right = y)
elif token.type == LOG:
self.eat(LOG)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == TAN:
self.eat(TAN)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == ARCSIN:
self.eat(ARCSIN)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == ARCCOS:
self.eat(ARCCOS)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == ARCTAN:
self.eat(ARCTAN)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == SINH:
self.eat(SINH)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == COSH:
self.eat(COSH)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == TANH:
self.eat(TANH)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == LPAREN:
self.eat(LPAREN)
node = self.expr()
self.eat(RPAREN)
return node
else:
raise NameError('Invalid character')
# parses terms defined with the context free grammar in the docstring
def term(self):
"""term : factor ((MUL | DIV) factor)*"""
node = self.factor()
while self.current_token.type in (MUL, DIV):
token = self.current_token
if token.type == MUL:
self.eat(MUL)
elif token.type == DIV:
self.eat(DIV)
node = BinOp(left=node, op=token, right=self.factor())
return node
# parses exprs defined with the context free grammar in the docstring
def expr(self):
"""
expr : term ((PLUS | MINUS) term)*
term : factor ((MUL | DIV) factor)*
factor : (PLUS | MINUS) factor | INTEGER | LPAREN expr RPAREN
"""
node = self.term()
while self.current_token.type in (PLUS, MINUS):
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
elif token.type == MINUS:
self.eat(MINUS)
node = BinOp(left=node, op=token, right=self.term())
return node
# parses the lexer to return an abstract syntax tree
def parse(self):
node = self.expr()
if self.current_token.type != EOF:
self.error()
return node
# similar to factor, but returns the symbolic derivative of a factor
def dfactor(self):
"""factor : (PLUS | MINUS) factor | INTEGER | VAR | LPAREN expr RPAREN"""
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
x, dx = self.dfactor()
node = UnaryOp(token, x)
dnode = UnaryOp(token, dx)
return node, dnode
elif token.type == MINUS:
self.eat(MINUS)
x, dx = self.dfactor()
node = UnaryOp(token, x)
dnode = UnaryOp(token, dx)
return node, dnode
elif token.type == INTEGER:
self.eat(INTEGER)
return Num(token), Num(Token(INTEGER, 0))
elif token.type == VAR:
self.eat(VAR)
return Var(token), Var(Token(VAR, "d_" + token.value))
elif token.type == COS:
self.eat(COS)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = UnaryOp(Token(MINUS, "-"), UnaryOp(Token(SIN, "sin"), x)), op=Token(MUL,'*'), right=dx)
elif token.type == SIN:
self.eat(SIN)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = UnaryOp(Token(COS, "cos"), x), op=Token(MUL,'*'), right=dx)
elif token.type == TAN:
self.eat(TAN)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = BinOp(left = Num(Token(INTEGER, 1)), op = Token(PLUS, '+'),right = BinOp(left = UnaryOp(Token(TAN, "tan"), x), op = Token(MUL, '*'), right = UnaryOp(Token(TAN, "tan"), x))), op=Token(MUL,'*'), right = dx)
elif token.type == ARCSIN:
self.eat(ARCSIN)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = BinOp(left = BinOp(left = Num(Token(INTEGER, 1)), op = Token(MINUS, '-'), right = BinOp(left = x, op = Token(MUL, '*'), right = x)), op = Token(POW, 'pow'), right = Num(Token(INTEGER, -0.5))), op=Token(MUL,'*'), right = dx)
elif token.type == ARCCOS:
self.eat(ARCCOS)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, UnaryOp(Token(MINUS, "-"), BinOp(left = BinOp(left = BinOp(left = Num(Token(INTEGER, 1)), op = Token(MINUS, '-'), right = BinOp(left = x, op = Token(MUL, '*'), right = x)), op = Token(POW, 'pow'), right = Num(Token(INTEGER, -0.5))), op=Token(MUL,'*'), right = dx))
elif token.type == ARCTAN:
self.eat(ARCTAN)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = BinOp(left = BinOp(left = Num(Token(INTEGER, 1)), op = Token(PLUS, '+'), right = BinOp(left = x, op = Token(MUL, '*'), right = x)), op = Token(POW, 'pow'), right = Num(Token(INTEGER, -1.0))), op=Token(MUL,'*'), right = dx)
elif token.type == SINH:
self.eat(SINH)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = UnaryOp(Token(COSH, "cosh"), x), op=Token(MUL,'*'), right=dx)
elif token.type == COSH:
self.eat(COSH)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = UnaryOp(Token(SINH, "sinh"), x), op=Token(MUL,'*'), right=dx)
elif token.type == TANH:
self.eat(TANH)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = BinOp(left = Num(Token(INTEGER, 1.0)), op = Token(MINUS, '-'), right = BinOp(left = node,op = Token(MUL, '*'), right = node)), op=Token(MUL,'*'), right=dx)
elif token.type == EXP:
self.eat(EXP)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = node, op=Token(MUL,'*'), right=dx)
elif token.type == POW:
self.eat(POW)
self.eat(LPAREN)
x_cur = copy.deepcopy(self)
x = self.expr()
dx = x_cur.dexpr()
self.eat(COMMA)
y_cur = copy.deepcopy(self)
y = self.expr()
dy = y_cur.dexpr()
self.eat(RPAREN)
node = BinOp(left = x, op = token, right = y)
return node, BinOp(left = node, op = Token(MUL, '*'), right = BinOp(left = BinOp(left = BinOp(left = y, op = Token(DIV,'/'), right = x), op = Token(MUL,'*'), right = dx), op = Token(PLUS, '+'), right = BinOp(left = dy, op = Token(MUL, '*'),right = UnaryOp(Token(LOG, 'LOG'), x))))
elif token.type == LOG:
self.eat(LOG)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = dx, op=Token(DIV,'/'), right=x)
elif token.type == LPAREN:
self.eat(LPAREN)
cur = copy.deepcopy(self)
node = self.expr()
dnode = cur.dexpr()
self.eat(RPAREN)
return node, dnode
else:
raise NameError('Invalid character')
# similar to term, but returns the symbolic derivative of a term
def dterm(self):
"""term : factor ((MUL | DIV) factor)*"""
node, dnode = self.dfactor()
while self.current_token.type in (MUL, DIV):
token = self.current_token
if token.type == MUL:
self.eat(MUL)
elif token.type == DIV:
self.eat(DIV)
rnode, rdnode = self.dfactor()
lowdhi = BinOp(left=dnode, op=Token(MUL,'*'), right=rnode)
hidlow = BinOp(left=node, op=Token(MUL,'*'), right=rdnode)
if token.type == MUL:
# chain rule
dnode = BinOp(left=lowdhi, op=Token(PLUS,'+'), right=hidlow)
node = BinOp(left=node, op=Token(MUL,'*'), right=rnode)
else:
# quotient rule
topnode = BinOp(left=lowdhi, op=Token(MINUS, '-'), right=hidlow)
botnode = BinOp(left=rnode, op=Token(MUL,'*'), right=rnode)
dnode = BinOp(left=topnode, op=Token(DIV,'/'), right=botnode)
node = BinOp(left=node, op=Token(DIV,'/'), right=rnode)
return dnode
# similar to expr, but returns the symbolic derivative of an expr
def dexpr(self):
"""
expr : term ((PLUS | MINUS) term)*
term : factor ((MUL | DIV) factor)*
factor : (PLUS | MINUS) factor | INTEGER | LPAREN expr RPAREN
"""
dnode = self.dterm()
while self.current_token.type in (PLUS, MINUS):
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
elif token.type == MINUS:
self.eat(MINUS)
dnode = BinOp(left=dnode, op=token, right=self.dterm())
return dnode
# similar to parse, but returns an abstract syntax tree representing the symbolic derivative
def dparse(self):
node = self.dexpr()
if self.current_token.type != EOF:
self.error()
return node
###############################################################################
# #
# INTERPRETER #
# #
###############################################################################
class NodeVisitor(object):
"""
determines the correct visit method for nodes in the abstract syntax tree
visit_ used to evaluate the numeric value of an abstract syntax tree
str_visit_ used to evaluate the string form of an abstract syntax tree
"""
def visit(self, node):
method_name = 'visit_' + type(node).__name__
visitor = getattr(self, method_name, self.generic_visit)
return visitor(node)
def str_visit(self, node):
method_name = 'str_visit_' + type(node).__name__
str_visitor = getattr(self, method_name, self.generic_visit)
return str_visitor(node)
def generic_visit(self, node):
raise Exception('No visit_{} method'.format(type(node).__name__))
class Interpreter(NodeVisitor):
"""
Interpreter utilizes visit_ and str_visit_ to evaluate the abstract syntax tree
"""
def __init__(self, parser):
self.parser = parser
self.dtree = copy.deepcopy(parser).dparse()
self.tree = copy.deepcopy(parser).parse()
def visit_BinOp(self, node):
if node.op.type == PLUS:
return self.visit(node.left) + self.visit(node.right)
elif node.op.type == MINUS:
return self.visit(node.left) - self.visit(node.right)
elif node.op.type == MUL:
return self.visit(node.left) * self.visit(node.right)
elif node.op.type == DIV:
return self.visit(node.left) / self.visit(node.right)
elif node.op.type == POW:
return math.pow(self.visit(node.left), self.visit(node.right))
def str_visit_BinOp(self, node):
if node.op.type == PLUS:
l = self.str_visit(node.left)
r = self.str_visit(node.right)
if l == "0":
return r
if r == "0":
return l
return "(" + l + '+' + r + ")"
elif node.op.type == MINUS:
l = self.str_visit(node.left)
r = self.str_visit(node.right)
if r == "0":
return l
if l == "0":
return "(-" + r + ")"
return "(" + self.str_visit(node.left) + '-' + self.str_visit(node.right) + ")"
elif node.op.type == MUL:
l = self.str_visit(node.left)
r = self.str_visit(node.right)
if l == "0" or r == "0":
return "0"
if l == "1":
return r
if r == "1":
return l
else:
return "(" + l + "*" + r + ")"
elif node.op.type == DIV:
return "(" + self.str_visit(node.left) + '/' + self.str_visit(node.right) + ")"
elif node.op.type == POW:
return 'POW(' + self.str_visit(node.left) + ',' + self.str_visit(node.right) + ')'
def visit_Num(self, node):
return node.value
def str_visit_Num(self, node):
return str(node.value)
def visit_Var(self, node):
if self.vardict is None:
raise NameError("no var dict passed in")
if node.name not in self.vardict:
raise NameError("var {} not in var dict".format(node.name))
return self.vardict[node.name]
def str_visit_Var(self, node):
name = node.name
if name[:2] == "d_":
if self.vardict is None:
raise NameError("no var dict passed in")
if name not in self.vardict:
raise NameError("var {} not in var dict".format(name))
return str(self.vardict[name])
else:
return str(name)
def visit_UnaryOp(self, node):
op = node.op.type
if op == PLUS:
return +self.visit(node.expr)
elif op == MINUS:
return -self.visit(node.expr)
elif op == COS:
return math.cos(self.visit(node.expr))
elif op == SIN:
return math.sin(self.visit(node.expr))
elif op == TAN:
return math.tan(self.visit(node.expr))
elif op == ARCSIN:
return math.asin(self.visit(node.expr))
elif op == ARCCOS:
return math.acos(self.visit(node.expr))
elif op == ARCTAN:
return math.atan(self.visit(node.expr))
elif op == SINH:
return math.sinh(self.visit(node.expr))
elif op == COSH:
return math.cosh(self.visit(node.expr))
elif op == TANH:
return math.tanh(self.visit(node.expr))
elif op == EXP:
return math.exp(self.visit(node.expr))
elif op == LOG:
return math.log(self.visit(node.expr))
def str_visit_UnaryOp(self, node):
op = node.op.type
if op == PLUS:
return "+" + self.str_visit(node.expr)
elif op == MINUS:
return "-" + self.str_visit(node.expr)
elif op == COS:
return "COS(" + self.str_visit(node.expr) + ")"
elif op == SIN:
return "SIN(" + self.str_visit(node.expr) + ")"
elif op == TAN:
return "TAN(" + self.str_visit(node.expr) + ")"
elif op == ARCSIN:
return "ARCSIN(" + self.str_visit(node.expr) + ")"
elif op == ARCCOS:
return "ARCCOS(" + self.str_visit(node.expr) + ")"
elif op == ARCTAN:
return "ARCTAN(" + self.str_visit(node.expr) + ")"
elif op == SINH:
return "SINH(" + self.str_visit(node.expr) + ")"
elif op == COSH:
return "COSH(" + self.str_visit(node.expr) + ")"
elif op == TANH:
return "TANH(" + self.str_visit(node.expr) + ")"
elif op == EXP:
return "EXP(" + self.str_visit(node.expr) + ")"
elif op == LOG:
return "LOG(" + self.str_visit(node.expr) + ")"
def interpret(self, vd=None):
""" numerical evaluation """
self.get_vardict(vd)
tree = self.tree
if tree is None:
return ''
return self.visit(tree)
def differentiate(self, vd=None, dv=None):
""" evaluate numerical derivative, vd is the variable to derive on """
self.get_vardict(vd)
self.get_diffvar(dv)
tree = self.dtree
if tree is None:
return ''
return self.visit(tree)
def symbolic_diff(self, vd=None, dv=None):
""" evaluate symbolic derivative (return a string), vd is the variable to derive on """
original_vd = vd
self.get_vardict(vd)
self.get_diffvar(dv)
tree = self.dtree
if tree is None:
return ''
return self.str_visit(tree)
def diff_all(self, vd=None):
""" returns all partial derivatives """
self.get_vardict(vd)
tree = self.dtree
if tree is None:
return ''
variables = list(self.vardict.keys())
ret = {}
for v in variables:
self.vardict["d_"+v] = 0
for v in variables:
self.vardict["d_"+v] = 1
ret["d_{}".format(v)]=self.visit(tree)
self.vardict["d_"+v] = 0
return ret
def get_vardict(self, vd=None):
""" expects vardict to be formatted as x:10, y:20, z:3 """
vdict = {}
if vd is None:
text = input('vardict> ')
if not text:
self.vardict = None
return
else:
text = vd
text = text.replace(" ", "")
for var in text.split(','):
vals = var.split(':')
vdict[str(vals[0])] = float(vals[1])
self.vardict = vdict
return
def get_diffvar(self, dv=None):
""" sets the variable to derive on """
if dv is None:
text = input('d_var> ')
else:
text = dv
text = text.replace(" ", "")
if text not in self.vardict.keys():
raise NameError("d_var not in vardict")
for v in list(self.vardict.keys()):
self.vardict["d_"+v]=0
self.vardict["d_"+text]=1
return
# def main():
# if run as main, can take inputs from command line
# while True:
# try:
# try:
# text = raw_input('spi> ')
# except NameError: # Python3
# text = input('spi> ')
# except EOFError:
# break
# if not text:
# continue
# lexer = Lexer(text)
# parser = Parser(lexer)
# interpreter = Interpreter(parser)
# result = interpreter.differentiate()
# print(result)
# if __name__ == '__main__':
# main()
'''
Based off of the open source tutorial: Let's Build a Simple Interpreter
https://github.com/rspivak/lsbasi/tree/master/part8/python
'''
| AD-cs207 | /AD-cs207-1.0.0.tar.gz/AD-cs207-1.0.0/AD/interpreter.py | interpreter.py |
# CS207 Final Project Repository
[](https://travis-ci.com/cs207-f18-WIRS/cs207-FinalProject)
[](https://coveralls.io/github/cs207-f18-WIRS/cs207-FinalProject?branch=master)
This repository contains the Final Project Deliverable on Automatic Differentiation for the Harvard Course CS 207: Systems Development for Computational Science.
## Project information:
- Specific information can be found at `docs/milestone1.md`.
- [Course Project discription](https://iacs-cs-207.github.io/cs207-F18/project.html) : Overview of the instruction on the project on automatic differentiation (AD).
## Course information:
- [Main course website](https://iacs-cs-207.github.io/cs207-F18/) : Check this site for all course-related policies including the syllabus, course schedule, and project policies.
- [GitHub Repo](https://github.com/IACS-CS-207/cs207-F18) : All course materials will be released on GitHub.
## Contributors:
- FELDHAUS Isabelle
- JIANG Shenghao
- STRUYVEN Robbert
- WANG William | AD-testing-packaging-CS207 | /AD_testing_packaging_CS207-0.1.5.tar.gz/AD_testing_packaging_CS207-0.1.5/README.md | README.md |
import setuptools
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='AD_testing_packaging_CS207',
version='0.1.5',
author='Shenghao Jiang, Isabelle Feldhaus, Robbert Struyven, William Wang',
author_email=" ",
description='Automatic Differentiation Package',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
keywords=['Automatic differentiation', 'gradients', 'Python'],
url='https://github.com/cs207-f18-WIRS/cs207-FinalProject',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
)
| AD-testing-packaging-CS207 | /AD_testing_packaging_CS207-0.1.5.tar.gz/AD_testing_packaging_CS207-0.1.5/setup.py | setup.py |
import AD.interpreter as ast
class AD():
"""
User friendly interface for the AST interpreter.
Usage
=============
import autodif
def main():
f1 = "x*y*z"
vd = "x:2,y:3,z:4"
F1 = autodif.AD(f1)
print(F1.diff_all(vd))
print(F1.diff("x"))
F1.new_formula("a+b")
vd = "a:10, b : 1"
F1.set_point(vd)
print(F1.val())
print(F1.diff_all())
"""
def __init__(self, frmla):
self.formula = frmla
self.lexer = ast.Lexer(frmla)
self.parser = ast.Parser(self.lexer)
self.interpreter = ast.Interpreter(self.parser)
self.vd = None
def set_point(self, vd):
if vd is not None:
self.vd = vd
if self.vd is None:
raise NameError("Must set point to evaluate")
def diff(self, dv, vd=None):
self.set_point(vd)
return self.interpreter.differentiate(self.vd, dv)
def diff_all(self, vd=None):
self.set_point(vd)
return self.interpreter.diff_all(self.vd)
def val(self, vd=None):
self.set_point(vd)
return self.interpreter.interpret(self.vd)
def new_formula(self, frmla):
self.formula = frmla
self.lexer = ast.Lexer(frmla)
self.parser = ast.Parser(self.lexer)
self.interpreter = ast.Interpreter(self.parser)
self.vd = None
| AD-testing-packaging-CS207 | /AD_testing_packaging_CS207-0.1.5.tar.gz/AD_testing_packaging_CS207-0.1.5/AD/autodif.py | autodif.py |
import AD.autodif as autodif
print("Import Succesful!")
| AD-testing-packaging-CS207 | /AD_testing_packaging_CS207-0.1.5.tar.gz/AD_testing_packaging_CS207-0.1.5/AD/__init__.py | __init__.py |
""" SPI - Simple Pascal Interpreter """
import copy
import math
import unicodedata
###############################################################################
# #
# LEXER #
# #
###############################################################################
# Token types
#
# EOF (end-of-file) token is used to indicate that
# there is no more input left for lexical analysis
INTEGER, PLUS, MINUS, MUL, DIV, LPAREN, RPAREN, EOF, VAR, COS, SIN, EXP,POW, LOG, COMMA = (
'INTEGER', 'PLUS', 'MINUS', 'MUL', 'DIV', '(', ')', 'EOF', 'VAR', 'COS', 'SIN', 'EXP', 'POW', 'LOG', ','
)
class Token(object):
def __init__(self, type, value):
self.type = type
self.value = value
def __str__(self):
"""String representation of the class instance.
Examples:
Token(INTEGER, 3)
Token(PLUS, '+')
Token(MUL, '*')
"""
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
return self.__str__()
class Lexer(object):
def __init__(self, text):
# client string input, e.g. "4 + 2 * 3 - 6 / 2"
self.text = text
# self.pos is an index into self.text
self.pos = 0
self.current_char = self.text[self.pos]
def error(self):
raise Exception('Invalid character')
def advance(self):
"""Advance the `pos` pointer and set the `current_char` variable."""
self.pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
def is_number(self, s):
try:
float(s)
return True
except ValueError:
pass
try:
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def integer(self):
"""Return a (multidigit) integer consumed from the input."""
index = 0
while(self.is_number(self.text[self.pos:len(self.text)-index])==False):
index += 1
number = self.text[self.pos:len(self.text)-index]
index = 0
while(index < len(number)):
self.advance()
index += 1
return float(number)
def word(self):
"""Return a multichar integer consumed from the input."""
result = ''
while self.current_char is not None and self.current_char.isalpha():
result += self.current_char
self.advance()
return result
def get_next_token(self):
"""Lexical analyzer (also known as scanner or tokenizer)
This method is responsible for breaking a sentence
apart into tokens. One token at a time.
"""
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char.isdigit():
return Token(INTEGER, self.integer())
if self.current_char.isalpha():
w = self.word()
if(w.upper() == "COS"):
return Token(COS, self.word())
elif(w.upper() == "SIN"):
return Token(SIN, self.word())
elif(w.upper() == "EXP"):
return Token(EXP, self.word())
elif(w.upper() == "POW"):
return Token(POW, self.word())
elif(w.upper() == "LOG"):
return Token(LOG, self.word())
else:
return Token(VAR, w)
if self.current_char == '+':
self.advance()
return Token(PLUS, '+')
if self.current_char == '-':
self.advance()
return Token(MINUS, '-')
if self.current_char == '*':
self.advance()
return Token(MUL, '*')
if self.current_char == '/':
self.advance()
return Token(DIV, '/')
if self.current_char == '(':
self.advance()
return Token(LPAREN, '(')
if self.current_char == ')':
self.advance()
return Token(RPAREN, ')')
if self.current_char == ',':
self.advance()
return Token(COMMA, ',')
self.error()
return Token(EOF, None)
###############################################################################
# #
# PARSER #
# #
###############################################################################
class AST(object):
pass
class BinOp(AST):
def __init__(self, left, op, right):
self.left = left
self.token = self.op = op
self.right = right
class Num(AST):
def __init__(self, token):
self.token = token
self.value = token.value
class Var(AST):
def __init__(self, token):
self.token = token
self.name = token.value
class UnaryOp(AST):
def __init__(self, op, expr):
self.token = self.op = op
self.expr = expr
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
# set current token to the first token taken from the input
self.current_token = self.lexer.get_next_token()
def error(self):
raise Exception('Invalid syntax')
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
if self.current_token.type == token_type:
self.current_token = self.lexer.get_next_token()
else:
self.error()
def factor(self):
"""factor : (PLUS | MINUS) factor | INTEGER | VAR | LPAREN expr RPAREN"""
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
node = UnaryOp(token, self.factor())
return node
elif token.type == MINUS:
self.eat(MINUS)
node = UnaryOp(token, self.factor())
return node
elif token.type == INTEGER:
self.eat(INTEGER)
return Num(token)
elif token.type == VAR:
self.eat(VAR)
return Var(token)
elif token.type == COS:
self.eat(COS)
self.eat(LPAREN)
x = self.expr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node
elif token.type == SIN:
self.eat(SIN)
self.eat(LPAREN)
x = self.expr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node
elif token.type == EXP:
self.eat(EXP)
self.eat(LPAREN)
x = self.expr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node
elif token.type == POW:
self.eat(POW)
self.eat(LPAREN)
x = self.expr()
self.eat(COMMA)
y = self.expr()
self.eat(RPAREN)
return BinOp(left = x, op = token, right = y)
elif token.type == LOG:
self.eat(LOG)
self.eat(LPAREN)
x = self.expr()
self.eat(RPAREN)
return UnaryOp(token, x)
elif token.type == LPAREN:
self.eat(LPAREN)
node = self.expr()
self.eat(RPAREN)
return node
def term(self):
"""term : factor ((MUL | DIV) factor)*"""
node = self.factor()
while self.current_token.type in (MUL, DIV):
token = self.current_token
if token.type == MUL:
self.eat(MUL)
elif token.type == DIV:
self.eat(DIV)
node = BinOp(left=node, op=token, right=self.factor())
return node
def expr(self):
"""
expr : term ((PLUS | MINUS) term)*
term : factor ((MUL | DIV) factor)*
factor : (PLUS | MINUS) factor | INTEGER | LPAREN expr RPAREN
"""
node = self.term()
while self.current_token.type in (PLUS, MINUS):
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
elif token.type == MINUS:
self.eat(MINUS)
node = BinOp(left=node, op=token, right=self.term())
return node
def parse(self):
node = self.expr()
if self.current_token.type != EOF:
self.error()
return node
def dfactor(self):
"""factor : (PLUS | MINUS) factor | INTEGER | VAR | LPAREN expr RPAREN"""
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
node = UnaryOp(token, self.dfactor())
return node, node
elif token.type == MINUS:
self.eat(MINUS)
node = UnaryOp(token, self.dfactor())
return node, node
elif token.type == INTEGER:
self.eat(INTEGER)
return Num(token), Num(Token(INTEGER, 0))
elif token.type == VAR:
self.eat(VAR)
return Var(token), Var(Token(VAR, "d_" + token.value))
elif token.type == COS:
self.eat(COS)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = UnaryOp(Token(MINUS, "-"), UnaryOp(Token(SIN, "sin"), x)), op=Token(MUL,'*'), right=dx)
elif token.type == SIN:
self.eat(SIN)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = UnaryOp(Token(COS, "cos"), x), op=Token(MUL,'*'), right=dx)
elif token.type == EXP:
self.eat(EXP)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = node, op=Token(MUL,'*'), right=dx)
elif token.type == POW:
self.eat(POW)
self.eat(LPAREN)
x_cur = copy.deepcopy(self)
x = self.expr()
dx = x_cur.dexpr()
self.eat(COMMA)
y_cur = copy.deepcopy(self)
y = self.expr()
dy = y_cur.dexpr()
self.eat(RPAREN)
node = BinOp(left = x, op = token, right = y)
return node, BinOp(left = node, op = Token(MUL, '*'), right = BinOp(left = BinOp(left = BinOp(left = y, op = Token(DIV,'/'), right = x), op = Token(MUL,'*'), right = dx), op = Token(PLUS, '+'), right = BinOp(left = dy, op = Token(MUL, '*'),right = UnaryOp(Token(LOG, 'LOG'), x))))
elif token.type == LOG:
self.eat(LOG)
self.eat(LPAREN)
cur = copy.deepcopy(self)
x = self.expr()
dx = cur.dexpr()
node = UnaryOp(token, x)
self.eat(RPAREN)
return node, BinOp(left = dx, op=Token(DIV,'/'), right=x)
elif token.type == LPAREN:
self.eat(LPAREN)
cur = copy.deepcopy(self)
node = self.expr()
dnode = cur.dexpr()
self.eat(RPAREN)
return node, dnode
def dterm(self):
"""term : factor ((MUL | DIV) factor)*"""
node, dnode = self.dfactor()
while self.current_token.type in (MUL, DIV):
token = self.current_token
if token.type == MUL:
self.eat(MUL)
elif token.type == DIV:
self.eat(DIV)
rnode, rdnode = self.dfactor()
lowdhi = BinOp(left=dnode, op=Token(MUL,'*'), right=rnode)
hidlow = BinOp(left=node, op=Token(MUL,'*'), right=rdnode)
if token.type == MUL:
# chain rule
dnode = BinOp(left=lowdhi, op=Token(PLUS,'+'), right=hidlow)
node = BinOp(left=node, op=Token(MUL,'*'), right=rnode)
else:
# quotient rule
topnode = BinOp(left=lowdhi, op=Token(MINUS, '-'), right=hidlow)
botnode = BinOp(left=rnode, op=Token(MUL,'*'), right=rnode)
dnode = BinOp(left=topnode, op=Token(DIV,'/'), right=botnode)
node = BinOp(left=node, op=Token(DIV,'/'), right=rnode)
return dnode
def dexpr(self):
"""
expr : term ((PLUS | MINUS) term)*
term : factor ((MUL | DIV) factor)*
factor : (PLUS | MINUS) factor | INTEGER | LPAREN expr RPAREN
"""
dnode = self.dterm()
while self.current_token.type in (PLUS, MINUS):
token = self.current_token
if token.type == PLUS:
self.eat(PLUS)
elif token.type == MINUS:
self.eat(MINUS)
dnode = BinOp(left=dnode, op=token, right=self.dterm())
return dnode
def dparse(self):
node = self.dexpr()
if self.current_token.type != EOF:
self.error()
return node
###############################################################################
# #
# INTERPRETER #
# #
###############################################################################
class NodeVisitor(object):
def visit(self, node):
method_name = 'visit_' + type(node).__name__
visitor = getattr(self, method_name, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
raise Exception('No visit_{} method'.format(type(node).__name__))
class Interpreter(NodeVisitor):
def __init__(self, parser):
self.parser = parser
self.dtree = copy.deepcopy(parser).dparse()
self.tree = copy.deepcopy(parser).parse()
def visit_BinOp(self, node):
if node.op.type == PLUS:
return self.visit(node.left) + self.visit(node.right)
elif node.op.type == MINUS:
return self.visit(node.left) - self.visit(node.right)
elif node.op.type == MUL:
return self.visit(node.left) * self.visit(node.right)
elif node.op.type == DIV:
return self.visit(node.left) / self.visit(node.right)
elif node.op.type == POW:
return math.pow(self.visit(node.left), self.visit(node.right))
def visit_Num(self, node):
return node.value
def visit_Var(self, node):
if self.vardict is None:
raise NameError("no var dict passed in")
if node.name not in self.vardict:
raise NameError("var {} not in var dict".format(node.name))
return self.vardict[node.name]
def visit_UnaryOp(self, node):
op = node.op.type
if op == PLUS:
return +self.visit(node.expr)
elif op == MINUS:
return -self.visit(node.expr)
elif op == COS:
return math.cos(self.visit(node.expr))
elif op == SIN:
return math.sin(self.visit(node.expr))
elif op == EXP:
return math.exp(self.visit(node.expr))
elif op == LOG:
return math.log(self.visit(node.expr))
def interpret(self, vd=None):
self.get_vardict(vd)
tree = self.tree
if tree is None:
return ''
return self.visit(tree)
def differentiate(self, vd=None, dv=None):
self.get_vardict(vd)
self.get_diffvar(dv)
tree = self.dtree
if tree is None:
return ''
return self.visit(tree)
def diff_all(self, vd=None):
self.get_vardict(vd)
tree = self.dtree
if tree is None:
return ''
variables = list(self.vardict.keys())
ret = {}
for v in variables:
self.vardict["d_"+v] = 0
for v in variables:
self.vardict["d_"+v] = 1
ret["d_{}".format(v)]=self.visit(tree)
self.vardict["d_"+v] = 0
return ret
def get_vardict(self, vd=None):
""" expects vardict to be formatted as x:10, y:20, z:3 """
vdict = {}
if vd is None:
text = input('vardict> ')
if not text:
self.vardict = None
return
else:
text = vd
text = text.replace(" ", "")
for var in text.split(','):
vals = var.split(':')
vdict[str(vals[0])] = float(vals[1])
self.vardict = vdict
return
def get_diffvar(self, dv=None):
if dv is None:
text = input('d_var> ')
else:
text = dv
text = text.replace(" ", "")
if text not in self.vardict.keys():
raise NameError("d_var not in vardict")
for v in list(self.vardict.keys()):
self.vardict["d_"+v]=0
self.vardict["d_"+text]=1
return
# def main():
# if run as main, can take inputs from command line
# while True:
# try:
# try:
# text = raw_input('spi> ')
# except NameError: # Python3
# text = input('spi> ')
# except EOFError:
# break
# if not text:
# continue
# lexer = Lexer(text)
# parser = Parser(lexer)
# interpreter = Interpreter(parser)
# result = interpreter.differentiate()
# print(result)
# if __name__ == '__main__':
# main()
'''
Based off of the open source tutorial: Let's Build a Simple Interpreter
https://github.com/rspivak/lsbasi/tree/master/part8/python
'''
| AD-testing-packaging-CS207 | /AD_testing_packaging_CS207-0.1.5.tar.gz/AD_testing_packaging_CS207-0.1.5/AD/interpreter.py | interpreter.py |
AD20 package by Group 20:
Lindsey Brown
Xinyue Wang
Kevin Yoon
For documentation, see https://github.com/CS207-AD20/cs207-FinalProject.
| AD20 | /AD20-0.0.2.tar.gz/AD20-0.0.2/README.md | README.md |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="AD20",
version="0.0.2",
author="Lindsey Brown, Xinyue Wang, Kevin Yoon",
author_email=" ",
description="Automatic Differentiation package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CS207-AD20/cs207-FinalProject",
packages=setuptools.find_packages(),
install_requires=['numpy==1.15.2',
'pandas==0.23.4',
'networkx==2.2',
'matplotlib==3.0.2',
'pandastable==0.11.0',
'scipy==1.1.0',
'pytest-timeout==1.2.1',
'pytest==3.4.2',
'pytest-cov==2.5.1',
'pytest-dependency==0.2'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | AD20 | /AD20-0.0.2.tar.gz/AD20-0.0.2/setup.py | setup.py |
import pytest
import AD20
import numpy as np
import networkx as nx
import matplotlib
import pandas
from AD20.ADnum import ADnum
from AD20 import ADmath as ADmath
from AD20 import ADgraph
#ADnum unit tests
def test_ADnum_init():
x = ADnum(100, der = -2)
assert x.val == 100
assert x.der == -2
assert len(x.graph)==0
def test_ADnum_valtype():
with pytest.raises(ValueError):
z = ADnum('zoo', der = 1)
def test_ADnum_dertype():
with pytest.raises(ValueError):
z = ADnum(3.0, der = 'zebra')
def test_ADnum_nodernoins():
with pytest.raises(Exception):
z = ADnum(4, ind = 0)
def test_ADnum_nodernoind():
with pytest.raises(Exception):
z = ADnum(4, ins = 3)
def test_ADnum_nodernothing():
with pytest.raises(Exception):
z = ADnum(3)
def test_ADnum_derconsistent():
with pytest.raises(ValueError):
z = ADnum(3, der = np.array([1, 3]), ins = 5)
def test_graphiput():
z = ADnum(1, der = 1, graph = {'a': 1})
assert z.graph == {'a':1}
def test_neg():
x = ADnum(4, der =1)
f = -x
assert f.val == -4.0
assert f.der == -1.0
assert len(f.graph)==1
def test_ADnum_mul():
x = ADnum(3.0, der = 1)
f = x*2.0
assert f.val == 6.0
assert f.der == 2.0
assert len(f.graph) == 2
def test_ADnum_rmul():
x = ADnum(3.0, der = 1)
f = 2.0*x
assert f.val == 6.0
assert f.der == 2.0
assert len(f.graph) == 2
def test_ADnum_add():
x = ADnum(3.0, der = 1)
f = x+2.0
assert f.val == 5.0
assert f.der == 1.0
assert len(f.graph) == 2
def test_ADnum_radd():
x = ADnum(3.0, der = 1)
f = 2.0+x
assert f.val == 5.0
assert f.der == 1.0
assert len(f.graph) == 2
def test_ADnum_sub():
x = ADnum(5.0, der = 1)
f = x-2.0
assert f.val == 3.0
assert f.der == 1.0
assert len(f.graph) == 2
def test_ADnum_rsub():
x = ADnum(3.0, der = 1)
f = 5.0-x
assert f.val == 2.0
assert f.der == -1.0
assert len(f.graph) == 2
def test_ADnum_div():
x = ADnum(3.0, der = 1)
f = x/1.5
assert f.val == 2.0
assert f.der == 1/1.5
assert len(f.graph) == 2
def test_ADnum_rdiv():
x = ADnum(3.0, der = 1)
f = 6/x
assert f.val == 2.0
assert f.der == -2/3
assert len(f.graph) == 2
def test_ADnum_pow():
x = ADnum(3.0, der = 1)
f = x**2.0
assert f.val == 9.0
assert f.der == 6.0
assert len(f.graph) == 2
def test_ADnum_rpow():
x = ADnum(3.0, der = 1)
f = 4**x
assert f.val == 64
assert f.der == 64*np.log(4.0)
assert len(f.graph) == 2
# ADmath unit tests
def test_ADmath_sin():
X = ADnum(np.pi, der = 1)
Y = ADmath.sin(X)
assert Y.val == np.sin(np.pi)
assert Y.der == np.cos(np.pi)
assert len(Y.graph) == 1
def test_ADmath_cos():
f = ADmath.cos(ADnum(4, der = 1))
assert f.val == np.cos(4)
assert f.der == -np.sin(4)
assert len(f.graph) == 1
def test_ADmath_tan():
f = ADmath.tan(ADnum(4, der = 1))
assert f.val == np.tan(4)
assert f.der == (1/np.cos(4))**2
assert len(f.graph) == 1
def test_ADmath_csc():
f = ADmath.csc(ADnum(5, der = 1))
assert f.val == 1/np.sin(5)
assert f.der == (-1/np.tan(5))*(1/np.sin(5))
assert len(f.graph) == 1
def test_ADmath_sec():
f = ADmath.sec(ADnum(6, der = 1))
assert f.val == 1/np.cos(6)
assert f.der == np.tan(6)/np.cos(6)
assert len(f.graph) == 1
def test_ADmath_cot():
f = ADmath.cot(ADnum(1, der = 1))
assert f.val == 1/np.tan(1)
assert f.der == -1/(np.sin(1)**2)
assert len(f.graph) == 1
def test_ADmath_arcsin():
f = ADmath.arcsin(ADnum(.2, der = 1))
assert f.val == np.arcsin(.2)
assert f.der == 1/(np.sqrt(1-.2**2))
assert len(f.graph) == 1
def test_ADmath_arccos():
f = ADmath.arccos(ADnum(.3, der = 1))
assert f.val == np.arccos(.3)
assert f.der == -1/(np.sqrt(1-.3**2))
assert len(f.graph) == 1
def test_ADmath_arctan():
f = ADmath.arctan(ADnum(1, der = 1))
assert f.val == np.arctan(1)
assert f.der == .5
assert len(f.graph) == 1
def test_ADmath_sinh():
f = ADmath.sinh(ADnum(2, der = 1))
assert f.val == np.sinh(2)
assert f.der == np.cosh(2)
assert len(f.graph) == 1
def test_ADmath_cosh():
f = ADmath.cosh(ADnum(3, der = 1))
assert f.val == np.cosh(3)
assert f.der == np.sinh(3)
assert len(f.graph) == 1
def test_ADmath_tanh():
f = ADmath.tanh(ADnum(-5, der = 1))
assert f.val == np.tanh(-5)
assert f.der == 1/(np.cosh(-5)**2)
assert len(f.graph) == 1
def test_ADmath_exp():
f = ADmath.exp(ADnum(-3, der = 1))
assert f.val == np.exp(-3)
assert f.der == np.exp(-3)
assert len(f.graph) == 1
def test_ADmath_log():
f = ADmath.log(ADnum(72, der = 1))
assert f.val == np.log(72)
assert f.der == 1/72
assert len(f.graph) == 1
def test_ADmath_logistic():
f = ADmath.logistic(ADnum(0, der=1))
assert f.val == .5
assert f.der == .25
def test_ADmath_sqrt():
f = ADmath.sqrt(ADnum(40, der = 1))
assert f.val == np.sqrt(40)
assert f.der == 1/(2*np.sqrt(40))
assert len(f.graph) == 1
def test_ADmath_sinr():
X = np.pi
Y = ADmath.sin(X)
assert Y == np.sin(np.pi)
def test_ADmath_cosr():
f = ADmath.cos(4)
assert f == np.cos(4)
def test_ADmath_tanr():
f = ADmath.tan(4)
assert f == np.tan(4)
def test_ADmath_cscr():
f = ADmath.csc(5)
assert f == 1/np.sin(5)
def test_ADmath_secr():
f = ADmath.sec(6)
assert f == 1/np.cos(6)
def test_ADmath_cotr():
f = ADmath.cot(1)
assert f == 1/np.tan(1)
def test_ADmath_arcsinr():
f = ADmath.arcsin(.2)
assert f == np.arcsin(.2)
def test_ADmath_arccosr():
f = ADmath.arccos(.3)
assert f == np.arccos(.3)
def test_ADmath_arctanr():
f = ADmath.arctan(1)
assert f == np.arctan(1)
def test_ADmath_sinhr():
f = ADmath.sinh(2)
assert f == np.sinh(2)
def test_ADmath_coshr():
f = ADmath.cosh(3)
assert f == np.cosh(3)
def test_ADmath_tanhr():
f = ADmath.tanh(-5)
assert f == np.tanh(-5)
def test_ADmath_expr():
f = ADmath.exp(-3)
assert f == np.exp(-3)
def test_ADmath_logr():
f = ADmath.log(72)
assert f == np.log(72)
def test_ADmath_sqrtr():
f = ADmath.sqrt(40)
assert f == np.sqrt(40)
# More advanced tests
def test_12x():
x = ADnum(1, der = 1)
f = 1/(1-2*x)
assert f.val == 1/(1-2)
assert f.der == 2/(1-2)**2
def test_xex():
x = ADnum(2, der = 1)
f = x * ADmath.exp(x)
assert f.val == 2.0 * np.exp(2.0)
assert f.der == np.exp(2.0) + 2.0*np.exp(2.0)
def test_5x2lnx():
x = ADnum(1, der = 1)
f = 5 * x**2 * ADmath.log(x)
assert f.val == 0.0
assert f.der == 10 * 1.0 * np.log(1.0) + 5*1.0
def test_sinxcosx():
x = ADnum(0, der = 1)
f = ADmath.sin(x) * ADmath.cos(x)
assert f.val == np.sin(0) * np.cos(0)
assert f.der == -(np.sin(0) ** 2) + np.cos(0) **2
def test_2xe2x():
x = ADnum(2, der = 1)
f = 2 * x * ADmath.exp(2*x)
assert f.val == 4 * np.exp(4)
assert f.der == 2 * np.exp(4.0) + 8 * np.exp(4)
def test_multivar():
x = ADnum(3, ins = 2, ind = 0)
y = ADnum(4, ins = 2, ind= 1)
f = 2 * y + 2*x**2
assert f.val == 2 * 4 + 2 * 3**2
assert f.der.all() == np.all(np.array([12, 2]))
def test_vecinput():
x = ADnum([1, 2, 3], ins = 1, ind = 0)
assert np.array_equal(x.val, np.array([1., 2., 3.]))
assert np.array_equal(x.der, np.array([1., 1., 1.]))
def test_vecinput_multi():
x = ADnum([1, 2, 3], ins =2 , ind =0)
assert np.array_equal(x.der, np.array([[1., 1., 1.],[0., 0., 0.]]))
#Graph testing
def test_gen_graph():
d = {'y': [('x', 'test')]}
Y = ADnum(1, der = 1, graph = d)
G= ADgraph.gen_graph(Y)
assert 'y' in G
assert 'x' in G
assert type(G) == nx.classes.digraph.DiGraph
assert G.number_of_nodes()==2
assert G.number_of_edges()==1
def test_reverse_graph():
d = {'y': [('x', 'test')]}
rd = {'x': [('y', 'test')]}
Y = ADnum(1, der =1, graph = d)
rg = ADgraph.reverse_graph(Y)
assert rd == rg
def test_get_labels():
X = ADnum(1, der =1)
Y = ADmath.sin(X)+3
labs = ADgraph.get_labels(Y)
assert labs[X] == 'X0'
assert labs[Y] == 'X2'
assert len(labs) == 4
def test_get_colorsandsizes():
X = ADnum(1, der =1)
Y = ADmath.sin(X)+3
labs = ADgraph.get_labels(Y)
G = ADgraph.gen_graph(Y)
cols = ADgraph.get_colors(G, Y)
sizes = ADgraph.get_sizes(G, Y, labs)
assert len(cols)== 4
assert len(sizes) == 4
def test_draw_graph():
X = ADnum(1, der =1)
Y = ADmath.sin(X)+3
fig = ADgraph.draw_graph(Y)
assert type(fig) == matplotlib.figure.Figure
def test_gen_table():
X = ADnum(1, der =1)
Y = ADmath.sin(X)+3
dat = ADgraph.gen_table(Y)
assert type(dat) == pandas.core.frame.DataFrame
def test_plot_ADnum():
X = ADnum(1, der =1)
def Y(x):
return ADmath.sin(x)
fig = ADgraph.plot_ADnum(Y)
assert type(fig)==matplotlib.figure.Figure
| AD20 | /AD20-0.0.2.tar.gz/AD20-0.0.2/Tests/test_AD20.py | test_AD20.py |
# cs107-FinalProject
[](https://codecov.io/gh/ZLYEPJ20/cs107-FinalProject)
[](https://travis-ci.com/ZLYEPJ20/cs107-FinalProject)
Group #20
<ul>
<li> Zhufeng Kang - zhk877@g.harvard.edu</li>
<li> Yuxi Liu - yul864@mail.harvard.edu</li>
<li> Esther Brown - estherbrown@g.harvard.edu</li>
<ul>
| AD2020 | /AD2020-0.0.2.tar.gz/AD2020-0.0.2/README.md | README.md |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="AD2020",
version="0.0.2",
author="Yuxi Liu, Zhufeng Kang, Esther Brown",
author_email="yuxiliu@mail.harvard.edu, zhk877@g.harvard.edu, estherbrown@g.harvard.edu",
description="An automatic differentiation package",
long_description="AD2020 is a Python package for automatic differentiation (AD). Differentiation is one of the key components in numerical computing and is ubiquitous in machine learning, optimization, and numerical methods. By repeatedly applying chain rule to elementary arithmetic operations and functions, AD is powerful to efficiently compute derivatives for complicated functions to machine precision.",
long_description_content_type="text/markdown",
url="https://github.com/ZLYEPJ20/cs107-FinalProject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| AD2020 | /AD2020-0.0.2.tar.gz/AD2020-0.0.2/setup.py | setup.py |
# Test suite for AD2020 module
import sys
sys.path.append('./AD2020')
import pytest
import numpy as np
from AD2020 import AD2020
def test_AD2020_init():
'''Test initiation'''
def test_scalar_input():
x = AD2020(2, 1)
assert x.value == [2]
assert x.derivative == [1]
x = AD2020([2])
assert x.value == [2]
assert x.derivative == [1]
x = AD2020([2], [3])
assert x.value == [2]
assert x.derivative == [3]
def test_vector_input():
x = AD2020([1],[1,0])
y = AD2020([2],[0,1])
f = AD2020([x, y])
assert np.all(f.value == [1,2])
assert np.all(f.derivative == [[1,0],[0,1]])
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f = AD2020([x, y, z])
assert np.all(f.value == [1,2,3])
assert np.all(f.derivative == [[1,0,0],[0,1,0],[0,0,1]])
def test_repr():
x = AD2020([2],[1])
f = 2 * x + 1
assert f.value == [5]
assert f.derivative == [2]
assert repr(f) == 'An AD2020 Object\nValues:\n{}\nJacobian:\n{}'.format(f.value, f.derivative)
test_scalar_input()
test_vector_input()
test_repr()
def test_AD2020_comparisons():
'''Test comparison methods'''
def test_eq():
x = AD2020([1])
y = AD2020([2])
z = AD2020([1])
assert x == z
assert not x == y
def test_nq():
x = AD2020([1])
y = AD2020([2])
z = AD2020([1])
assert not x != z
assert x != y
def test_lt():
x = AD2020([1])
y = AD2020([2])
assert x < 2
assert 0 < x
assert not x < 1
assert not y < 2
assert x < y
def test_gt():
x = AD2020([1])
y = AD2020([2])
assert x > 0
assert 2 > x
assert not x > 1
assert not y > 2
assert y > x
def test_ge():
x = AD2020([1])
y = AD2020([2])
assert x >= 0
assert 1 >= x
assert 2 >= x
assert not x >= 2
assert y >= x
def test_le():
x = AD2020([1])
y = AD2020([2])
assert x <= 1
assert 0 <= x
assert 1 <= x
assert not x <= 0
assert x <= y
test_eq()
test_nq()
test_lt()
test_gt()
test_ge()
test_le()
def test_AD2020_scalar():
'''Test scalar functions'''
def test_add():
x = AD2020(2, 1)
f1 = x + x
f2 = x + 2
f3 = x + 3.0 + x
assert f1 == AD2020(4, 2)
assert f2 == AD2020(4, 1)
assert f3 == AD2020(7.0, 2)
def test_radd():
x = AD2020(5, 1)
f1 = x + x
f2 = 2 + x
f3 = 2.0 + x + 1.0 + x
assert f1 == AD2020(10, 2)
assert f2 == AD2020(7, 1)
assert f3 == AD2020(13.0, 2)
def test_sub():
x = AD2020(4, 1)
f1 = x - x
f2 = x - 4
f3 = x - 2.0 - 1.0 - x + x
assert f1 == AD2020(0, 0)
assert f2 == AD2020(0, 1)
assert f3 == AD2020(1.0, 1)
def test_rsub():
x = AD2020(4, 1)
f1 = x - x - x
f2 = 4 - x
f3 = 4.0 - x + 1.0 - x + x
assert f1 == AD2020(-4, -1)
assert f2 == AD2020(0, -1)
assert f3 == AD2020(1.0, -1)
def test_mul():
x = AD2020(3, 1)
f1 = x * x
f2 = x * 2
f3 = x * x * 2.0
assert f1 == AD2020(9, 6)
assert f2 == AD2020(6, 2)
assert f3 == AD2020(18.0, 12.0)
def test_rmul():
x = AD2020(6, 1)
f1 = x * x * x
f2 = 2 * x
f3 = 2.0 * x
assert f1 == AD2020(216, 108)
assert f2 == AD2020(12, 2)
assert f3 == AD2020(12.0, 2.0)
def test_truediv():
x = AD2020(2, 1)
f1 = x / x
f2 = x / 2
f3 = x / 2.0 / 2.0 / x * x
assert f1 == AD2020(1.0, 0.0)
assert f2 == AD2020(1, 0.5)
assert f3 == AD2020(0.5, 0.25)
with pytest.raises(ZeroDivisionError):
x0 = AD2020(2, 1)
f0 = x0 / 0
def test_rtruediv():
x = AD2020(2, 1)
f1 = x / x / x
f2 = 1 / x
f3 = 1.0 / x / 2.0 / x * x
assert f1 == AD2020(0.5, -0.25)
assert f2 == AD2020(0.5, -0.25)
assert f3 == AD2020(0.25, -0.125)
with pytest.raises(ZeroDivisionError):
x0 = AD2020(0, 1)
f0 = 2 / x0
def test_pow():
x = AD2020(2, 1)
f1 = x ** (2 * x)
f2 = x ** 3
f3 = x ** (1/2)
assert f1.value == 16
assert np.round(f1.derivative, 2) == 54.18
assert f2 == AD2020(8, 12)
assert np.round(f3.value, 2) == 1.41
assert np.round(f3.derivative, 2) == 0.35
with pytest.raises(ValueError):
x0 = AD2020(-2, 1)
f0 = x0 ** (1/2)
with pytest.raises(ZeroDivisionError):
x0 = AD2020(0, 1)
f0 = x0 ** (1/2)
def test_rpow():
x = AD2020(4, 1)
f1 = 2 ** x
f2 = 0 ** x
assert f1.value == 16
assert np.round(f1.derivative, 2) == 11.09
assert f2 == AD2020(0, 0)
with pytest.raises(ValueError):
x0 = AD2020(2, 1)
f0 = (-4) ** x
with pytest.raises(ZeroDivisionError):
x0 = AD2020(-2, 1)
f0 = 0 ** x0
def test_neg():
x = AD2020(3, 1)
f1 = x * x
f2 = -f1
assert f1 == AD2020(9, 6)
assert f2 == AD2020(-9, -6)
test_add()
test_radd()
test_sub()
test_rsub()
test_mul()
test_rmul()
test_truediv()
test_rtruediv()
test_pow()
test_rpow()
test_neg()
def test_AD2020_vector():
'''Test vector functions'''
def test_add():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = x + y + z
f2 = AD2020([x+y, y+3, z])
f3 = AD2020([x+1, y, y+z])
assert f1 == AD2020(6, [1,1,1])
assert np.all(f2.value == [3,5,3])
assert np.all(f2.derivative == [[1,1,0],[0,1,0],[0,0,1]])
assert np.all(f3.value == [2,2,5])
assert np.all(f3.derivative == [[1,0,0],[0,1,0],[0,1,1]])
def test_radd():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = x + y + z
f2 = AD2020([x+y, 1+y, z])
f3 = AD2020([5+x, y, y+z])
assert f1 == AD2020(6, [1,1,1])
assert np.all(f2.value == [3,3,3])
assert np.all(f2.derivative == [[1,1,0],[0,1,0],[0,0,1]])
assert np.all(f3.value == [6,2,5])
assert np.all(f3.derivative == [[1,0,0],[0,1,0],[0,1,1]])
def test_sub():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = x - y + z
f2 = AD2020([x-y, y-2, z])
f3 = AD2020([x-1, y, y-z])
assert f1 == AD2020(2, [1,-1,1])
assert np.all(f2.value == [-1,0,3])
assert np.all(f2.derivative == [[1,-1,0],[0,1,0],[0,0,1]])
assert np.all(f3.value == [0,2,-1])
assert np.all(f3.derivative == [[1,0,0],[0,1,0],[0,1,-1]])
def test_rsub():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = y - x + z
f2 = AD2020([y-x, 2-y, z])
f3 = AD2020([1-x, y, z-y])
assert f1 == AD2020(4, [-1,1,1])
assert np.all(f2.value == [1,0,3])
assert np.all(f2.derivative == [[-1,1,0],[0,-1,0],[0,0,1]])
assert np.all(f3.value == [0,2,1])
assert np.all(f3.derivative == [[-1,0,0],[0,1,0],[0,-1,1]])
def test_mul():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = x * y + z
f2 = AD2020([x*y, y*2, z])
f3 = AD2020([x, y, y*z])
assert f1 == AD2020(5, [2,1,1])
assert np.all(f2.value == [2,4,3])
assert np.all(f2.derivative == [[2,1,0],[0,2,0],[0,0,1]])
assert np.all(f3.value == [1,2,6])
assert np.all(f3.derivative == [[1,0,0],[0,1,0],[0,3,2]])
def test_rmul():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = 2 * x * y * z
f2 = AD2020([y*x, 3*y, z])
f3 = AD2020([z*x, y, z*y])
assert f1 == AD2020(12, [12,6,4])
assert np.all(f2.value == [2,6,3])
assert np.all(f2.derivative == [[2,1,0],[0,3,0],[0,0,1]])
assert np.all(f3.value == [3,2,6])
assert np.all(f3.derivative == [[3,0,1],[0,1,0],[0,3,2]])
def test_truediv():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = x * z / y
f2 = AD2020([x/y, y*z, z/y])
f3 = AD2020([x/2, y/x, z/3])
assert f1 == AD2020(1.5, [1.5,-0.75,0.5])
assert np.all(f2.value == [0.5,6,1.5])
assert np.all(f2.derivative == [[0.5,-0.25,0],[0,3,2],[0,-0.75,0.5]])
assert np.all(f3.value == [0.5,2,1])
assert np.all(f3.derivative == [[0.5,0,0],[-2,1,0],[0,0,1/3]])
with pytest.raises(ZeroDivisionError):
f0 = x / 0
def test_rtruediv():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = y / x * z
f2 = AD2020([x/y, y*z/x, z/y])
f3 = AD2020([2/x, x/y, 3/z])
assert f1 == AD2020(6, [-6,3,2])
assert np.all(f2.value == [0.5,6,1.5])
assert np.all(f2.derivative == [[0.5,-0.25,0],[-6,3,2],[0,-0.75,0.5]])
assert np.all(f3.value == [2,0.5,1])
assert np.all(f3.derivative == [[-2,0,0],[0.5,-0.25,0],[0,0,-1/3]])
with pytest.raises(ZeroDivisionError):
x0 = AD2020([0],[1,0,0])
f0 = 2 / x0
def test_pow():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = x**3 + y + z
f2 = AD2020([x**2, y*z/x, z/y])
f3 = AD2020([x, x/y, z**2])
assert f1 == AD2020(6, [3,1,1])
assert np.all(f2.value == [1,6,1.5])
assert np.all(f2.derivative == [[2,0,0],[-6,3,2],[0,-0.75,0.5]])
assert np.all(f3.value == [1,0.5,9])
assert np.all(f3.derivative == [[1,0,0],[0.5,-0.25,0],[0,0,6]])
with pytest.raises(ValueError):
x0 = AD2020([-2],[1,0,0])
f0 = x0 ** (1/2)
with pytest.raises(ZeroDivisionError):
x0 = AD2020([0],[1,0,0])
f0 = x0 ** (1/2)
def test_rpow():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = 2**x + y + z
f2 = AD2020([2**(2*x), 2**(y-1), 2**(z**2)])
assert f1.value == [7]
assert np.all(np.round(f1.derivative, 2) == [1.39,1,1])
assert np.all(f2.value == [4,2,512])
assert np.all(np.round(f2.derivative, 2) == [[5.55,0,0],[0,1.39,0],[0,0,2129.35]])
with pytest.raises(ValueError):
f0 = (-4) ** x
with pytest.raises(ZeroDivisionError):
x0 = AD2020([-2],[1,0,0])
f0 = 0 ** x0
def test_neg():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = 2**x + y + z
f2 = -f1
assert f1.value == [7]
assert np.all(np.round(f1.derivative, 2) == [1.39,1,1])
assert f2.value == [-7]
assert np.all(np.round(f2.derivative, 2) == [-1.39,-1,-1])
test_add()
test_radd()
test_sub()
test_rsub()
test_mul()
test_rmul()
test_truediv()
test_rtruediv()
test_pow()
test_rpow()
test_neg()
test_AD2020_init()
test_AD2020_comparisons()
test_AD2020_scalar()
test_AD2020_vector()
print('Passed All Tests!')
| AD2020 | /AD2020-0.0.2.tar.gz/AD2020-0.0.2/tests/test_AD2020.py | test_AD2020.py |
# Test suite for AD2020Root module
import sys
sys.path.append('./AD2020')
import pytest
import numpy as np
from AD2020 import AD2020
from AD2020Fun import AD2020Fun
from AD2020Root import Find
def test_AD2020Root_univ():
'''Test for 1 to 1'''
def test_1():
def f(x):
return (x - 1) ** 2
x0 = AD2020(2)
root = Find(f, x0)
assert np.isclose(root, 1)
def test_2():
def f(x):
return - ((x + 1) ** 2) + 1
x0 = AD2020(4)
root = Find(f, x0)
assert np.isclose(root, 0)
x0 = AD2020(-4)
root = Find(f, x0)
assert np.isclose(root, -2)
def test_3():
def f(x):
return (x - 1) ** 3 + 4
x0 = AD2020(0)
root = Find(f, x0)
assert np.isclose(root, np.cbrt(-4) + 1)
def test_4():
def f(x):
return (x + 1) ** 4 - 2
x0 = AD2020(0)
root = Find(f, x0)
assert np.isclose(root, np.power(2, 1/4) - 1)
x0 = AD2020(-2)
root = Find(f, x0)
assert np.isclose(root, - np.power(2, 1/4) - 1)
def test_5():
def f(x):
return x + AD2020Fun.sin(AD2020Fun.exp(x)) - 1
x0 = AD2020(0)
root = Find(f, x0)
assert np.isclose(root, 0.10432071)
x0 = AD2020(1)
root = Find(f, x0)
assert np.isclose(root, 1.20986396)
x0 = AD2020(2)
root = Find(f, x0)
assert np.isclose(root, 1.70490978)
test_1()
test_2()
test_3()
test_4()
test_5()
def test_AD2020Root_multv():
'''Test for m to 1'''
def test_1():
def f(var):
x, y = var
return x + y
x0 = AD2020(2, [1,0])
y0 = AD2020(5, [0,1])
vars = [x0, y0]
root = Find(f, vars)
assert root[0] == - root[1]
def test_2():
def f(var):
x, y = var
return x ** 2 - y ** 2
x0 = AD2020(-2, [1,0])
y0 = AD2020(5, [0,1])
vars = [x0, y0]
root = Find(f, vars)
assert abs(root[0]) == abs(root[1])
def test_3():
def f(var):
x, y = var
return (x ** 2 + y ** 2) - 1
x0 = AD2020(-2, [1,0])
y0 = AD2020(1, [0,1])
vars = [x0, y0]
root = Find(f, vars)
assert np.isclose(root[0] ** 2 + root[1] ** 2, 1)
def test_4():
def f(var):
x, y, z = var
return (x - 1) ** 2 + (y + 2) ** 2 + (z * 2) ** 2
x0 = AD2020(-2, [1,0,0])
y0 = AD2020(1, [0,1,0])
z0 = AD2020(1, [0,0,1])
vars = [x0, y0, z0]
root = Find(f, vars)
assert np.all(np.round(root,2) == [[1.],[-2.],[0.]])
def test_5():
def f(var):
x, y, z = var
return (x - y) ** 2 - z
x0 = AD2020(-2, [1,0,0])
y0 = AD2020(1, [0,1,0])
z0 = AD2020(1, [0,0,1])
vars = [x0, y0, z0]
root = Find(f, vars)
assert np.isclose(abs(root[0] - root[1]), np.sqrt(root[2]))
test_1()
test_2()
test_3()
test_4()
test_5()
def test_AD2020Root_excep():
'''Test for exception'''
with pytest.raises(Exception):
def f(x):
return (x - 1) ** 2
x0 = 2
root = Find(f, x0)
test_AD2020Root_univ()
test_AD2020Root_multv()
test_AD2020Root_excep()
print('Passed All Tests!')
| AD2020 | /AD2020-0.0.2.tar.gz/AD2020-0.0.2/tests/test_AD2020Root.py | test_AD2020Root.py |
# Test suite for AD2020Fun module
import sys
sys.path.append('./AD2020')
import pytest
import numpy as np
from AD2020 import AD2020
from AD2020Fun import AD2020Fun
def test_AD2020Fun_scalar():
def test_exp():
x = AD2020(2, 1)
f1 = 2 * AD2020Fun.exp(x) + 1
f2 = 2 * AD2020Fun.exp(x,3) + 1
assert np.round(f1.value, 2) == 15.78
assert np.round(f1.derivative, 2) == 14.78
assert np.round(f2.value, 2) == 19
assert np.round(f2.derivative, 2) == 19.78
with pytest.raises(ValueError):
f0 = AD2020Fun.exp(x,-4)
with pytest.raises(ZeroDivisionError):
x0 = AD2020(-2, 1)
f0 = AD2020Fun.exp(x0,0)
def test_log():
x = AD2020(2, 1)
f1 = 2 * AD2020Fun.log(x) + 1
f2 = 2 * AD2020Fun.log(x,10) + 1
assert np.round(f1.value, 2) == 2.39
assert np.round(f1.derivative, 2) == 1
assert np.round(f2.value, 2) == 1.60
assert np.round(f2.derivative, 2) == 0.43
with pytest.raises(ValueError):
x0 = AD2020(0, 1)
f0 = 2 * AD2020Fun.log(x0) + 1
with pytest.raises(ValueError):
x0 = AD2020(-2, 1)
f0 = 2 * AD2020Fun.log(x0) + 1
def test_sin():
x = AD2020(2, 1)
f = 2 * AD2020Fun.sin(x) + 1
assert np.round(f.value, 2) == 2.82
assert np.round(f.derivative, 2) == -0.83
def test_cos():
x = AD2020(2, 1)
f = 2 * AD2020Fun.cos(x) + 1
assert np.round(f.value, 2) == 0.17
assert np.round(f.derivative, 2) == -1.82
def test_tan():
x = AD2020(2, 1)
f = 2 * AD2020Fun.tan(x) + 1
assert np.round(f.value, 2) == -3.37
assert np.round(f.derivative, 2) == 11.55
with pytest.raises(ValueError):
x0 = AD2020(np.pi/2, 1)
f0 = 2 * AD2020Fun.tan(x0) + 1
def test_arcsin():
x = AD2020(0, 1)
f = 2 * AD2020Fun.arcsin(x) + 1
assert f.value == 1
assert f.derivative == 2
with pytest.raises(ValueError):
x0 = AD2020(-2, 1)
f0 = 2 * AD2020Fun.arcsin(x0) + 1
def test_arccos():
x = AD2020(0, 1)
f = 2 * AD2020Fun.arccos(x) + 1
assert f.value == 1 + np.pi
assert f.derivative == -2
with pytest.raises(ValueError):
x0 = AD2020(-2, 1)
f0 = 2 * AD2020Fun.arccos(x0) + 1
def test_arctan():
x = AD2020(2, 1)
f = 2 * AD2020Fun.arctan(x) + 1
assert np.round(f.value, 2) == 3.21
assert np.round(f.derivative, 2) == 0.4
def test_sinh():
x = AD2020(2, 1)
f = 2 * AD2020Fun.sinh(x) + 1
assert np.round(f.value, 2) == 8.25
assert np.round(f.derivative, 2) == 7.52
def test_cosh():
x = AD2020(2, 1)
f = 2 * AD2020Fun.cosh(x) + 1
assert np.round(f.value, 2) == 8.52
assert np.round(f.derivative, 2) == 7.25
def test_tanh():
x = AD2020(2, 1)
f = 2 * AD2020Fun.tanh(x) + 1
assert np.round(f.value, 2) == 2.93
assert np.round(f.derivative, 2) == 0.14
def test_logistic():
x = AD2020(2, 1)
f = 2 * AD2020Fun.logistic(x) + 1
assert np.round(f.value, 2) == 2.76
assert np.round(f.derivative, 2) == 0.21
def test_sqrt():
x = AD2020(2, 1)
f = 2 * AD2020Fun.sqrt(x) + 1
assert np.round(f.value, 2) == 3.83
assert np.round(f.derivative, 2) == 0.71
with pytest.raises(ValueError):
x0 = AD2020(-2, 1)
f0 = AD2020Fun.sqrt(x0)
with pytest.raises(ZeroDivisionError):
x0 = AD2020(0, 1)
f0 = AD2020Fun.sqrt(x0)
test_exp()
test_log()
test_sin()
test_cos()
test_tan()
test_arcsin()
test_arccos()
test_arctan()
test_sinh()
test_cosh()
test_tanh()
test_logistic()
test_sqrt()
def test_AD2020Fun_vector():
def test_exp():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.exp(x) + 2*y + z
f2 = AD2020([AD2020Fun.exp(x), AD2020Fun.exp(x+y), AD2020Fun.exp(x+z)])
f3 = AD2020([AD2020Fun.exp(x,3), x+y, z*2])
assert f1 == AD2020(np.e+7, [np.e,2,1])
assert np.all(np.round(f2.value,2) == np.round([np.e**1,np.e**3,np.e**4],2))
assert np.all(np.round(f2.derivative,2) == np.round([[np.e**1,0,0],[np.e**3,np.e**3,0],[np.e**4,0,np.e**4]],2))
assert np.all(f3.value == [3,3,6])
assert np.all((np.round(f3.derivative,2) == [[3.30,0,0],[1,1,0],[0,0,2]]))
with pytest.raises(ValueError):
f0 = AD2020Fun.exp(x,-4)
with pytest.raises(ZeroDivisionError):
x0 = AD2020(-2, [1,0,0])
f0 = AD2020Fun.exp(x0,0)
def test_log():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.log(x) + 2*y + z
f2 = AD2020([AD2020Fun.log(x), AD2020Fun.log(x+y), AD2020Fun.log(x+z)])
f3 = AD2020([AD2020Fun.log(x,10), AD2020Fun.log(x+y,10), z*2])
assert f1 == AD2020(7, [1,2,1])
assert np.all(np.round(f2.value,2) == np.round([0,np.log(3),np.log(4)],2))
assert np.all(np.round(f2.derivative,2) == np.round([[1,0,0],[1/3,1/3,0],[0.25,0,0.25]],2))
assert np.all(np.round(f3.value,2) == [0,0.48,6])
assert np.all((np.round(f3.derivative,2) == [[0.43,0,0],[0.14,0.14,0],[0,0,2]]))
with pytest.raises(ValueError):
x0 = AD2020([-1],[1,0,0])
f0 = AD2020Fun.log(x0)
with pytest.raises(ZeroDivisionError):
f0 = AD2020Fun.log(x,0)
def test_sin():
x = AD2020([np.pi/2],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.sin(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.sin(x), AD2020Fun.sin(x+y), AD2020Fun.sin(x+z)])
assert f1.value == 8
assert np.all(np.round(f1.derivative,2) == [0,2,1])
assert np.all(np.round(f2.value,2) == [2,-0.42,-0.99])
assert np.all(np.round(f2.derivative,2) == [[0,0,0],[-0.91,-0.91,0],[-0.14,0,-0.14]])
def test_cos():
x = AD2020([np.pi/2],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.cos(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.cos(x), AD2020Fun.cos(x+y), AD2020Fun.cos(x+z)])
assert f1 == AD2020(7, [-1,2,1])
assert np.all(np.round(f2.value,2) == [0,-0.91,-0.14])
assert np.all(np.round(f2.derivative,2) == [[-2,0,0],[0.42,0.42,0],[0.99,0,0.99]])
def test_tan():
x = AD2020([np.pi/3],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.tan(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.tan(x), AD2020Fun.tan(x+y), AD2020Fun.tan(2*x+z)])
assert np.round(f1.value,2) == 8.73
assert np.all(np.round(f1.derivative,2) == [4,2,1])
assert np.all(np.round(f2.value,2) == [3.46,-0.09,-2.49])
assert np.all(np.round(f2.derivative,2) == [[8,0,0],[1.01,1.01,0],[14.39,0,7.20]])
with pytest.raises(ValueError):
x0 = AD2020(np.pi/2, [1,0,0])
f0 = 2 * AD2020Fun.tan(x0) + 1
def test_arcsin():
x = AD2020([0],[1,0,0])
y = AD2020([-0.5],[0,1,0])
z = AD2020([0.5],[0,0,1])
f1 = AD2020Fun.arcsin(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.arcsin(x), AD2020Fun.arcsin(x+y), AD2020Fun.arcsin(2*x+z)])
assert np.round(f1.value,2) == -0.5
assert np.all(np.round(f1.derivative,2) == [1,2,1])
assert np.all(np.round(f2.value,2) == [0,-0.52,0.52])
assert np.all(np.round(f2.derivative,2) == [[2,0,0],[1.15,1.15,0],[2.31,0,1.15]])
with pytest.raises(ValueError):
x0 = AD2020(-2, [1,0,0])
f0 = 2 * AD2020Fun.arcsin(x0) + 1
def test_arccos():
x = AD2020([0],[1,0,0])
y = AD2020([-0.5],[0,1,0])
z = AD2020([0.5],[0,0,1])
f1 = AD2020Fun.arccos(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.arccos(x), AD2020Fun.arccos(x+y), AD2020Fun.arccos(2*x+z)])
assert np.round(f1.value,2) == 1.07
assert np.all(np.round(f1.derivative,2) == [-1,2,1])
assert np.all(np.round(f2.value,2) == [3.14,2.09,1.05])
assert np.all(np.round(f2.derivative,2) == [[-2,0,0],[-1.15,-1.15,0],[-2.31,0,-1.15]])
with pytest.raises(ValueError):
x0 = AD2020(-2, [1,0,0])
f0 = 2 * AD2020Fun.arccos(x0) + 1
def test_arctan():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.arctan(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.arctan(x), AD2020Fun.arctan(x+y), AD2020Fun.arctan(2*x+z)])
assert np.round(f1.value,2) == 7.79
assert np.all(np.round(f1.derivative,2) == [0.5,2,1])
assert np.all(np.round(f2.value,2) == [1.57,1.25,1.37])
assert np.all(np.round(f2.derivative,2) == [[1,0,0],[0.1,0.1,0],[0.08,0,0.04]])
def test_sinh():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.sinh(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.sinh(x), AD2020Fun.sinh(x+y), AD2020Fun.sinh(2*x+z)])
assert np.round(f1.value,2) == 8.18
assert np.all(np.round(f1.derivative,2) == [1.54,2,1])
assert np.all(np.round(f2.value,2) == [2.35,10.02,74.20])
assert np.all(np.round(f2.derivative,2) == [[3.09,0,0],[10.07,10.07,0],[148.42,0,74.21]])
def test_cosh():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.cosh(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.cosh(x), AD2020Fun.cosh(x+y), AD2020Fun.cosh(2*x+z)])
assert np.round(f1.value,2) == 8.54
assert np.all(np.round(f1.derivative,2) == [1.18,2,1])
assert np.all(np.round(f2.value,2) == [3.09,10.07,74.21])
assert np.all(np.round(f2.derivative,2) == [[2.35,0,0],[10.02,10.02,0],[148.41,0,74.20]])
def test_tanh():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.tanh(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.tanh(x), AD2020Fun.tanh(x+y), AD2020Fun.tanh(2*x+z)])
assert np.round(f1.value,2) == 7.76
assert np.all(np.round(f1.derivative,2) == [0.42,2,1])
assert np.all(np.round(f2.value,2) == [1.52,1.00,1.00])
assert np.all(np.round(f2.derivative,5) == [[0.83995,0,0],[0.00987,0.00987,0],[0.00036,0,0.00018]])
def test_logistic():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.logistic(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.logistic(x), AD2020Fun.logistic(x+y), AD2020Fun.logistic(2*x+z)])
assert np.round(f1.value,2) == 7.73
assert np.all(np.round(f1.derivative,2) == [0.20,2,1])
assert np.all(np.round(f2.value,2) == [1.46,0.95,0.99])
assert np.all(np.round(f2.derivative,3) == [[0.393,0,0],[0.045,0.045,0],[0.013,0,0.007]])
def test_sqrt():
x = AD2020([1],[1,0,0])
y = AD2020([2],[0,1,0])
z = AD2020([3],[0,0,1])
f1 = AD2020Fun.sqrt(x) + 2*y + z
f2 = AD2020([2*AD2020Fun.sqrt(x), AD2020Fun.sqrt(x+y), AD2020Fun.sqrt(2*x+z)])
assert np.round(f1.value,2) == 8
assert np.all(np.round(f1.derivative,2) == [0.5,2,1])
assert np.all(np.round(f2.value,2) == [2,1.73,2.24])
assert np.all(np.round(f2.derivative,2) == [[1,0,0],[0.29,0.29,0],[0.45,0,0.22]])
with pytest.raises(ValueError):
x0 = AD2020([-1],[1,0,0])
f0 = AD2020Fun.sqrt(x0)
with pytest.raises(ZeroDivisionError):
x0 = AD2020([0],[1,0,0])
f0 = AD2020Fun.sqrt(x0)
test_exp()
test_log()
test_sin()
test_cos()
test_tan()
test_arcsin()
test_arccos()
test_arctan()
test_sinh()
test_cosh()
test_tanh()
test_logistic()
test_sqrt()
test_AD2020Fun_scalar()
test_AD2020Fun_vector()
print('Passed All Tests!')
| AD2020 | /AD2020-0.0.2.tar.gz/AD2020-0.0.2/tests/test_AD2020Fun.py | test_AD2020Fun.py |
import json
with open('coverage.json') as f:
report = json.load(f)
percent_covered = report['totals']['percent_covered']
if percent_covered < 90:
raise Exception('The total code coverage was less than 90%')
else:
print(f'The tests covered {percent_covered:.2f}% of the code.')
| AD27 | /ad27-0.0.1-py3-none-any.whl/check_coverage.py | check_coverage.py |
#!/usr/bin/env python3
import numpy as np
from autodiff.dual import Dual
class Node:
"""
Node class to implement the reverse mode auto differentiation. Elementary operations are overloaded to create the tree structure
to represent the function. A forward pass process is implemented in the _
"""
_supported_scalars = (int, float, np.float64)
def __init__(self, key, *, value = None, left_partial = None , right_partial = None, operation = None, left = None, right = None, sensitivity = 0):
self.key = key
self.left = left
self.right = right
self.value = value
self.left_partial = left_partial ## save partial at the self level is not the best choice. => does not account for recycled nodes unless leaf nodes are redefined
self.right_partial = right_partial
self.operation = operation # the elementary operation performed at each node
self.sensitivity = sensitivity
self._eval()
def __add__(self, other):
"""
overload addition operation
"""
#self.partial = 1 #### Calculate partial at the creation step will not work for sin, cos, etc!!!
#other.partial = 1
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
if isinstance(other, self._supported_scalars):
operation = lambda x: x + other
return Node('add', left = self, right = None, operation = operation)
else:
operation = lambda x,y: x+y
return Node('add', left = self, right = other, operation = operation)
def __radd__(self, other):
"""
overload reverse addition operation
"""
return self.__add__(other)
def __sub__(self, other):
#self.partial = 1
#other.partial = -1
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
if isinstance(other, self._supported_scalars):
operation = lambda x: x - other
return Node('sub', left = self, right = None, operation = operation)
else:
operation = lambda x,y: x-y
return Node('sub', left = self, right = other, operation = operation)
def __rsub__(self, other):
"""
overload reverse subtraction operation
"""
return -self.__sub__(other)
def __mul__(self, other):
#self.partial = other.value
#other.partial = self.value
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
if isinstance(other, self._supported_scalars):
operation = lambda x: x*other
return Node('mul', left = self, right = None, operation = operation)
else:
operation = lambda x,y: x*y
return Node('mul', left = self, right = other, operation = operation)
def __rmul__(self, other):
"""
overload reverse multiplication operation
"""
return self.__mul__(other)
def __truediv__(self, other):
"""
overload division operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
if isinstance(other, self._supported_scalars):
operation = lambda x: x/other
return Node('div', left = self, right = None, operation = operation)
else:
operation = lambda x,y: x/y
return Node('div', left = self, right = other, operation = operation)
def __rtruediv__(self, other):
"""
overload reverse division operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
else:
operation = lambda x: other/x
return Node('div', left = self, right = None, operation = operation)
def __pow__(self, other):
"""
overload the power operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
if isinstance(other, self._supported_scalars):
operation = lambda x: x**other
return Node('pow', left = self, right = None, operation = operation)
else:
operation = lambda x,y: x**y
return Node('pow', left = self, right = other, operation = operation)
def __rpow__(self, other):
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
else:
operation = lambda x: other**x
return Node('exp', left = self, right = None, operation = operation)
def __neg__(self):
"""
overload the unary negation operation
"""
operation = lambda x: -x
return Node('neg', left = self, right = None, operation = operation)
def __lt__(self, other):
"""
overload the < operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
elif isinstance(other, Node):
return self.value < other.value
else:
return self.value < other
def __gt__(self, other):
"""
overload the > operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
elif isinstance(other, Node):
return self.value > other.value
else:
return self.value > other
def __eq__(self, other):
"""
overload the = operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
elif isinstance(other, Node):
return self.value == other.value and self.sensitivity == other.sensitivity
else:
return self.value == other
def __ne__(self, other):
"""
overload the != operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
elif isinstance(other, Node):
return self.value != other.value or self.sensitivity != other.sensitivity
else:
return self.value != other
def __le__(self, other):
"""
overload the <= operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
elif isinstance(other, Node):
return self.value <= other.value
else:
return self.value <= other
def __ge__(self, other):
"""
overload the >= operation
"""
if not isinstance(other, (*self._supported_scalars, Node)):
raise TypeError(f'Type not supported for reverse mode auto differentiation')
elif isinstance(other, Node):
return self.value >= other.value
else:
return self.value >= other
def __str__(self):
return self._pretty(self)
def _eval(self):
"""
Forward pass of the reverse mode auto differentiation.
Calculate the value of all nodes of the tree, as well as the partial derivative of the current node wrt all child nodes.
"""
if (self.left is None) and (self.right is None):
return self.value
elif self.value is not None:
return self.value
elif self.right is None:
dual = self.operation(Dual(self.left._eval())) # real part evaluates the current node, dual part evaluates the partial derivative
self.value = dual.real
self.left_partial = dual.dual
return self.value
else:
self.left._eval()
self.right._eval()
dual1 = Dual(self.left.value, 1)
dual2 = Dual(self.right.value, 0)
dual = self.operation(dual1, dual2)
self.value = dual.real
self.left_partial = dual.dual
self.right_partial = self.operation(Dual(self.left.value, 0), Dual(self.right.value, 1)).dual
return self.value
def _sens(self):
"""
Reverse pass of the reverse mode auto differentiation.
Calculate the sensitivity (adjoint) of all child nodes with respect to the current node
"""
if (self.left is None) and (self.right is None):
pass
elif self.right is None:
self.left.sensitivity += self.sensitivity*self.left_partial
self.left._sens()
else:
self.left.sensitivity += self.sensitivity*self.left_partial
self.right.sensitivity += self.sensitivity*self.right_partial
self.left._sens()
self.right._sens()
def _reset(self):
"""
Reset the sensitivty of leaf nodes too zero to allow the reverse mode auto differentiation of the next component of a vector function.
Calculate the sensitivity (adjoint) of all child nodes with respect to the current node
"""
if (self.left is None) and (self.right is None):
pass
elif self.right is None:
self.left.sensitivity = 0
self.left._reset()
else:
self.left.sensitivity = 0
self.right.sensitivity = 0
self.left._reset()
self.right._reset()
@staticmethod
def _pretty(node):
"""Pretty print the expression tree (called recursively)"""
if node.left is None and node.right is None:
return f'{node.key}' + f': value = {node.value}'
if node.left is not None and node.right is None:
return f'{node.key}({node._pretty(node.left)})' + f': value = {node.value}'
return f'{node.key}({node._pretty(node.left)}, {node._pretty(node.right)})' + f': value = {node.value}'
| AD27 | /ad27-0.0.1-py3-none-any.whl/autodiff/reverse.py | reverse.py |
#!/usr/bin/env python3
import numpy as np
from autodiff.dual import Dual
from autodiff.reverse import Node
def sin(x):
"""
overwrite sine function
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.sin(x.real), np.cos(x.real)*x.dual)
elif type(x) is Node:
return Node('sin', left = x, operation = lambda x:sin(x))
else:
return np.sin(x)
def cos(x):
"""
overwrite cosine function
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.cos(x.real), -np.sin(x.real)*x.dual)
elif type(x) is Node:
return Node('cos', left = x, operation = lambda x:cos(x))
else:
return np.cos(x)
def tan(x):
"""
overwrite tangent
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.tan(x.real), 1/(np.cos(x.real))**2*x.dual)
elif type(x) is Node:
return Node('tan', left = x, operation = lambda x:tan(x))
else:
return np.tan(x)
def log(x):
"""
overwrite log
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.log(x.real), 1/x.real*x.dual)
elif type(x) is Node:
return Node('log', left = x, operation = lambda x:log(x))
else:
return np.log(x)
def log2(x):
"""
overwrite hyberbolic sine
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.log2(x.real), (1/(x.real*np.log(2)))*x.dual)
elif type(x) is Node:
return Node('log2', left = x, operation = lambda x:log2(x))
else:
return np.log2(x)
def log10(x):
"""
overwrite log10
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.log10(x.real), (1/(x.real*np.log(10)))*x.dual)
elif type(x) is Node:
return Node('log10', left = x, operation = lambda x:log10(x))
else:
return np.log10(x)
def sinh(x):
"""
overwrite hyberbolic sine
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.sinh(x.real), np.cosh(x.real) * x.dual)
elif type(x) is Node:
return Node('sinh', left = x, operation = lambda x:sinh(x))
else:
return np.sinh(x)
def cosh(x):
"""
overwrite hyberbolic cosine
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.cosh(x.real), np.sinh(x.real) * x.dual)
elif type(x) is Node:
return Node('cosh', left = x, operation = lambda x:cosh(x))
else:
return np.cosh(x)
def tanh(x):
"""
overwrite hyberbolic tangent
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.tanh(x.real), x.dual / np.cosh(x.real)**2)
elif type(x) is Node:
return Node('tanh', left = x, operation = lambda x:tanh(x))
else:
return np.tanh(x)
def exp(x):
"""
overwrite exponential
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.exp(x.real), np.exp(x.real) * x.dual)
elif type(x) is Node:
return Node('exp', left = x, operation = lambda x:exp(x))
else:
return np.exp(x)
def sqrt(x):
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.sqrt(x.real), 1/2/np.sqrt(x.real) * x.dual)
elif type(x) is Node:
return Node('sqrt', left = x, operation = lambda x:sqrt(x))
else:
return np.sqrt(x)
def power(x, other):
if type(x) is Node:
return Node('sqrt', left = x, operation = lambda x:power(x))
else:
return x.__pow__(other)
def arcsin(x):
"""
overwrite arc sine
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.arcsin(x.real), 1 / np.sqrt(1 - x.real ** 2) * x.dual)
elif type(x) is Node:
return Node('arcsin', left = x, operation = lambda x:arcsin(x))
else:
return np.arcsin(x)
def arccos(x):
"""
overwrite arc cosine
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.arccos(x.real), -1 / np.sqrt(1 - x.real**2) * x.dual)
elif type(x) is Node:
return Node('arccos', left = x, operation = lambda x:arccos(x))
else:
return np.arccos(x)
def arctan(x):
"""
overwrite arc tangent
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.arctan(x.real), 1 / (1 + x.real**2) * x.dual)
elif type(x) is Node:
return Node('arctan', left = x, operation = lambda x:arctan(x))
else:
return np.arctan(x)
def logist(x, loc=0, scale=1):
"""
overwrite logistic
default set loc and scale to be 0 and 1
"""
supported_types = (int, float, np.float64, Dual, Node)
if type(x) not in supported_types:
raise TypeError('type of input argument not supported')
elif type(x) is Dual:
return Dual(np.exp((loc-x.real)/scale)/(scale*(1+np.exp((loc-x.real)/scale))**2),
np.exp((loc-x.real)/scale)/(scale*(1+np.exp((loc-x.real)/scale))**2)/ \
(scale*(1+np.exp((loc-x.real)/scale))**2)**2* \
((-1/scale)*(scale*(1+np.exp((loc-x.real)/scale))**2)- \
((loc-x.real)/scale)*(scale*2*(1+np.exp((loc-x.real)/scale)))*np.exp((loc-x.real)/scale)*(-1)/scale)*x.dual)
elif type(x) is Node:
return Node('logist', left = x, operation = lambda x:logist(x))
else:
return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2) | AD27 | /ad27-0.0.1-py3-none-any.whl/autodiff/trig.py | trig.py |
#!/usr/bin/env python3
"""Dual number implementation for AD forward mode.
This module contains dunder methods to overload built-in Python operators.
"""
import numpy as np
class Dual:
_supported_scalars = (int, float, np.float64)
def __init__(self, real, dual = 1):
self.real = real
self.dual = dual
def __add__(self, other):
"""
overload add operation
"""
if not isinstance(other, (*self._supported_scalars, Dual)):
raise TypeError(f'Type not supported for Dual number operations')
if isinstance(other, self._supported_scalars):
return Dual(self.real + other, self.dual)
else:
return Dual(self.real + other.real, self.dual + other.dual)
def __radd__(self, other):
"""
overload reverse subtration operation
"""
return self.__add__(other)
def __sub__(self, other):
"""
overload subtraction operation
"""
if not isinstance(other, (*self._supported_scalars, Dual)):
raise TypeError(f'Type not supported for Dual number operations')
if isinstance(other, self._supported_scalars):
return Dual(self.real - other, self.dual)
else:
return Dual(self.real - other.real, self.dual - other.dual)
def __rsub__(self, other):
"""
overload reverse subtraction operation
"""
return Dual(other - self.real, -self.dual)
def __mul__(self, other):
"""
overwrite multiplication operation
"""
if not isinstance(other, (*self._supported_scalars, Dual)):
raise TypeError(f'Type not supported for Dual number operations')
if isinstance(other, self._supported_scalars):
return Dual(other*self.real, other*self.dual)
else:
return Dual(self.real*other.real, self.dual*other.real + self.real*other.dual)
def __rmul__(self, other):
"""
overwrite reverse multiplication operation
"""
return self.__mul__(other)
def __pow__(self, other):
"""
overwrite power law operation
"""
if not isinstance(other, self._supported_scalars):
raise TypeError(f'Type not supported for Dual number operations')
if isinstance(other, self._supported_scalars):
return Dual(self.real**other, other*self.real**(other - 1)*self.dual)
def __rpow__(self, other):
"""
overwrite reverse power law operation
"""
if not isinstance(other, self._supported_scalars):
raise TypeError(f'Type not supported for Dual number operations')
if isinstance(other, self._supported_scalars):
return Dual(other**self.real, np.log(other)*other**self.real*self.dual)
def __truediv__(self, other):
"""
Overload the division operator (/) to handle Dual class
"""
if not isinstance(other, (*self._supported_scalars, Dual)):
raise TypeError(f'Type not supported for Dual number operations')
if isinstance(other, self._supported_scalars):
return Dual(self.real/other,self.dual/other)
else:
return Dual(self.real/other.real, self.dual/other.real - self.real*other.dual/other.real/other.real)
def __rtruediv__(self, other):
"""
Overload the reverse division operator (/) to handle Dual class
"""
return Dual(other/self.real, -other*self.dual/self.real/self.real )
def __neg__(self):
"""
Overload the negative operator to handle Dual class
"""
return Dual(-self.real, -self.dual)
def __neq__(self, other):
"""
Overload the inequality operator (!=) to handle Dual class
"""
if isinstance(other, Dual):
return self.real != other.real
return self.real != other
def __lt__(self, other):
"""
Overload the less than operator to handle Dual class
"""
if isinstance(other, Dual):
return self.real < other.real
return self.real < other
def __gt__(self, other):
"""
Overload the greater than operator to handle Dual class
"""
if isinstance(other, Dual):
return self.real > other.real
return self.real > other
def __le__(self, other):
"""
Overload the <= operator to handle Dual class
"""
if isinstance(other, Dual):
return self.real <= other.real
return self.real <= other
def __ge__(self, other):
"""
Overload the >= operator to handle Dual class
"""
if isinstance(other, Dual):
return self.real >= other.real
return self.real >= other
def __repr__(self):
"""
Print class definition
"""
return f'Dual({self.real},{self.dual})'
def __str__(self):
"""
prettier string representation
"""
return f'Forward mode dual number object(real: {self.real}, dual: {self.dual})'
def __len__(self):
"""
Return length of input vector
"""
return (type(self.real) in (int, float)) and (type(self.dual) in (int, float))
def __eq__(self,other):
if isinstance(other, Dual):
return (self.real == other.real and self.dual == other.dual)
return self.real == other
| AD27 | /ad27-0.0.1-py3-none-any.whl/autodiff/dual.py | dual.py |
#!/usr/bin/env python3
import numpy as np
import autodiff.trig as tr
from autodiff.dual import Dual
from autodiff.reverse import Node
class ForwardDiff:
def __init__(self, f):
self.f = f
def derivative(self, x, p=[1]):
"""
Parameters
==========
x : constant associated with each component of vector x
p : direction at which the direcitonal derivative is evaluated
Returns
=======
the dual part of a Dualnumber
Example:
=======
z_i = Dual(x_i, p_i)
f(z).real = f(x)
f(z).dual = D_p_{f}
"""
scalars = [float, int, np.float64]
if type(x) in scalars:
z = Dual(x)
elif isinstance(x, list) or isinstance(x, np.ndarray):
if len(p)!=len(x):
raise Exception('length of p should be the same as length of x')
if len(x)==1:
z=Dual(x[0])
else:
z = [0] * len(x)
for i in range(len(x)):
z[i] = Dual(x[i], p[i])
else:
raise TypeError(f'Unsupported type for derivative function. X is of type {type(x)}')
if type(self.f(z)) is Dual:
return self.f(z).dual
else:
output=[]
for i in self.f(z):
output.append(i.dual)
return output
def Jacobian(self, x):
# construct a dual number
deri_array = []
for i in range(len(x)):
p = np.zeros(len(x))
p[i] = 1
deri_array.append(self.derivative(x, p))
return np.array(deri_array).T
class ReverseDiff:
def __init__(self, f):
self.f = f
def Jacobian(self, vector):
iv_nodes = [Node(1-k) for k in range(len(vector))] #nodes of independent variables, key value numbering according to vs
for i, iv_node in enumerate(iv_nodes):
iv_node.value = vector[i]
tree = self.f([*iv_nodes])
print(type(tree))
if type(tree) is Node:
#tree._eval()
#print(tree)
tree.sensitivity = 1
tree._sens()
return [iv_node.sensitivity for iv_node in iv_nodes]
else:
deri_array = []
for line in tree:
#line._eval()
line._reset()
line.sensitivity=1
line._sens()
line_partials = [iv_node.sensitivity for iv_node in iv_nodes]
deri_array.append(line_partials )
return deri_array
| AD27 | /ad27-0.0.1-py3-none-any.whl/autodiff/autoDiff.py | autoDiff.py |
from setuptools import setup, find_packages
setup(
name='ADA-sdk',
version='2.9',
packages=find_packages(),
description='Python SDK for Automation data analytics API',
include_package_data=True,
install_requires=[
'requests',
]
)
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/setup.py | setup.py |
import json
import requests
from ada.config import ADA_API_URL, DEFAULT_HEADERS
class Request(object):
def __init__(self, token):
self.token = token
def send_request(self, api_path, data, method):
url = ADA_API_URL + api_path + "?api-key=" + self.token
main_data = json.dumps(data)
headers = DEFAULT_HEADERS
if method == "POST":
response = requests.post(
url, data=main_data, headers=headers
)
elif method == "PATCH":
response = requests.patch(
url, data=main_data, headers=headers
)
else:
raise Exception("method is not supported")
if response.status_code == 201 or response.status_code == 200:
return response.json()
raise Exception(response.text)
def send_request_form(self, api_path, folder, method, files):
url = ADA_API_URL + api_path + folder + "/upload/?api-key=" + self.token
if method == "POST":
response = requests.post(
url, files=files
)
elif method == "PATCH":
response = requests.patch(
url, files=files
)
else:
raise Exception("method is not supported")
if response.status_code == 201 or response.status_code == 200:
return response.json()
raise Exception(response.text)
class BaseADA(Request):
url = None
def create(self, kwargs):
reps = self.send_request(self.url, data=kwargs, method="POST")
return reps["_id"]["$oid"]
def create_form(self, folder, files):
reps = self.send_request_form(self.url, folder=folder, method="POST", files=files)
return reps
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/base.py | base.py |
import time
import os
import re
from ada.features import Execution, TestCase, UploadImage
def get_keyword_failed(data, keyword=""):
for func in data:
if func["status"] != "PASS":
if keyword:
keyword += "."
keyword += func["kwname"]
keyword = get_keyword_failed(func["functions"], keyword)
break
return keyword
class BaseListener:
ROBOT_LISTENER_API_VERSION = 2
API_KEY = ""
PROJECT_ID = ""
def __init__(self):
"""Is used to init variables, objects, ... to support for generating report
Args:
sampleVar: TBD
Returns:
NA
"""
self.execution = None
self.dict_exe = {}
self.arr_exe = []
self.step = {}
def start_suite(self, name, attrs):
"""This event will be trigger at the beginning of test suite.
Args:
name: TCs name
attrs: Attribute of test case can be query as dictionary type
Returns:
NA
"""
self.image = []
self.name = name
self.index = -1
parent = None
if self.arr_exe:
functions = []
id_parent = self.arr_exe[-1]
parent = self.dict_exe[id_parent]
if id_parent in self.step and self.step[id_parent]:
functions = self.step[id_parent][0]
try:
Execution(self.API_KEY).up_log(
parent, functions=functions
)
except Exception:
pass
try:
self.execution = Execution(self.API_KEY).create(
attrs["longname"], self.PROJECT_ID, attrs["totaltests"],
parent=parent, starttime=attrs["starttime"], endtime="",
doc=attrs["doc"], source=attrs["source"]
)
except Exception as e:
pass
self.dict_exe[attrs["id"]] = self.execution
self.step[attrs["id"]] = {}
self.arr_exe.append(attrs["id"])
def end_suite(self, name, attrs):
function = []
if attrs["id"] in self.step and self.step[attrs["id"]]:
function = self.step[attrs["id"]][0]
try:
Execution(self.API_KEY).up_log(
self.dict_exe[attrs["id"]], log_teardown="this is log teardown",
status="complete", functions=function, duration=attrs["elapsedtime"],
endtime=attrs["endtime"]
)
except Exception:
pass
del self.arr_exe[-1]
def start_test(self, name, attrs):
self.image = []
self.step[name] = {}
self.index = -1
self.start_time = time.time()
self.arr_exe.append(name)
def end_test(self, name, attrs):
failed_keyword = ""
if attrs['status'] == 'PASS':
status = "passed"
else:
status = "failed"
failed_keyword = get_keyword_failed(self.step[name][0])
result = None
try:
if not result:
result = TestCase(self.API_KEY).create(
name, status, attrs["elapsedtime"], self.execution,
failed_reason=attrs["message"], functions=self.step[name][0],
starttime=attrs["starttime"], endtime=attrs["endtime"],
failed_keyword=failed_keyword
)
if result and self.image:
UploadImage(self.API_KEY).create(result, self.image)
except Exception:
pass
self.step[name] = {}
del self.arr_exe[-1]
self.image = []
def start_keyword(self, name, attrs):
self.log_test = []
self.index += 1
self.step[self.arr_exe[-1]].setdefault(self.index, [])
def end_keyword(self, name, attrs):
# print("end key ", attrs)
attrs["functions"] = []
attrs["log"] = self.log_test
index = self.index + 1
key = self.arr_exe[-1]
if index in self.step[key] and self.step[key][index]:
attrs["functions"] = self.step[key][index]
self.step[key][index] = []
self.step[key][self.index].append(attrs)
self.index -= 1
self.log_test = []
self.check = True
def log_message(self, msg):
message = msg["message"]
result = re.search("(([<]([\w\W\-.\/]+\.(png|jpg))[>])|([\w-]+\.(png|jpg)))", message)
real_image = None
if result:
data = result.group(1).strip("<>")
if "/" in data or "\\" in data:
image_path = data
if "/" in data:
image = data.split("/")[-1]
else:
image = data.split("\\")[-1]
else:
image_path = os.path.join(os.getcwd(), data.strip())
image = data
try:
if os.path.isfile(image_path):
self.image.append(('screenshot', open(image_path, "rb")))
real_image = image
except:
pass
msg["image"] = real_image
self.log_test.append(msg)
# def message(self, msg):
# print('\n Listener detect message: %s' %(msg))
def close(self):
print('\n Close Suite') | ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/listener.py | listener.py |
import os
ADA_API_URL = os.getenv("ADA_API_URL", "http://10.128.220.229:86/api/")
DEFAULT_HEADERS = {
'content-type': "application/json"
}
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/config/api_config.py | api_config.py |
from .api_config import ADA_API_URL
from .api_config import DEFAULT_HEADERS
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/config/__init__.py | __init__.py |
from ada.base import BaseADA
class UploadImage(BaseADA):
url = "test-case/"
def create(self, folder, files):
return super(UploadImage, self).create_form(folder, files)
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/features/upload_image.py | upload_image.py |
from ada.base import BaseADA
class Execution(BaseADA):
url = "execution/"
def create(self, test_suite, project_id, total_test_cases=None, parent=None, **kwargs):
data = {
'test_suite': test_suite,
'project_id': project_id,
'total_test_cases': total_test_cases or 0,
"parent": parent
}
data.update(kwargs)
return super(Execution, self).create(data)
def up_log(self, execution_id, **kwargs):
url = "{}{}".format(self.url, execution_id)
reps = self.send_request(url, data=kwargs, method="PATCH")
return reps
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/features/execution.py | execution.py |
from ada.base import BaseADA
class TestCase(BaseADA):
url = "test-case/"
def create(self, name, status, duration, execution_id, **kwargs):
data = {
'name': name,
'status': status,
'duration': duration,
'execution': execution_id,
}
data.update(kwargs)
return super(TestCase, self).create(data)
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/features/testcase.py | testcase.py |
from .execution import Execution
from .testcase import TestCase
from .upload_image import UploadImage
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/features/__init__.py | __init__.py |
class APIException(Exception):
def __init__(self, message):
super(APIException, self).__init__(message)
| ADA-sdk | /ADA-sdk-2.9.tar.gz/ADA-sdk-2.9/ada/utils/APIException.py | APIException.py |
import torch
import torch.nn as nn
import ADAFMNoiseReducer.models.modules.architecture as arch
# Generator
def define_G(gpu_ids, network_G):
opt_net = network_G
which_model = opt_net['which_model_G']
if which_model == 'adaptive_resnet':
netG = arch.AdaResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'],
nb=opt_net['nb'], norm_type=opt_net['norm_type'], act_type='relu',
upsample_mode='pixelshuffle', adafm_ksize=opt_net['adafm_ksize'])
else:
raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model))
if gpu_ids:
assert torch.cuda.is_available()
netG = nn.DataParallel(netG)
return netG | ADAFMNoiseReducer | /models/networks.py | networks.py |
import torch
import torch.nn as nn
class BaseModel():
def __init__(self, gpu_ids, finetune_norm, _pretrain_model_G, is_train=False):
self.device = torch.device('cuda' if gpu_ids is not None else 'cpu')
self.is_train = is_train
self._pretrain_model_G = _pretrain_model_G
self.finetune_norm = finetune_norm
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def get_current_losses(self):
pass
def save(self, label):
pass
def load(self):
pass
def get_network_description(self, network):
'''Get the string and total parameters of the network'''
if isinstance(network, nn.DataParallel):
network = network.module
s = str(network)
n = sum(map(lambda x: x.numel(), network.parameters()))
return s, n
def load_network(self, load_path, network, strict=True):
if isinstance(network, nn.DataParallel):
network = network.module
network.load_state_dict(torch.load(load_path), strict=strict)
| ADAFMNoiseReducer | /models/base_model.py | base_model.py |
from collections import OrderedDict
import torch
import torch.nn as nn
import ADAFMNoiseReducer.models.networks as networks
from ADAFMNoiseReducer.models.base_model import BaseModel
class SRModel(BaseModel):
def __init__(self,gpu_ids, network_G, finetune_norm, _pretrain_model_G):
super(SRModel, self).__init__(gpu_ids, finetune_norm, _pretrain_model_G)
# define network and load pretrained models
self.netG = networks.define_G(gpu_ids, network_G).to(self.device)
self.load()
def feed_data(self, data, need_HR=False):
self.var_L = data.to(self.device) # LR
def test(self):
self.netG.eval()
with torch.no_grad():
self.fake_H = self.netG(self.var_L)
self.netG.train()
def get_current_visuals(self, need_HR=False):
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach()[0].float().cpu()
out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
return out_dict
def load(self):
load_path_G = self._pretrain_model_G
if load_path_G is not None:
if self.finetune_norm:
self.load_network(load_path_G, self.netG, strict=False)
else:
self.load_network(load_path_G, self.netG)
def update(self, new_model_dict):
if isinstance(self.netG, nn.DataParallel):
network = self.netG.module
network.load_state_dict(new_model_dict)
| ADAFMNoiseReducer | /models/SR_model.py | SR_model.py |
import torch.nn as nn
from . import block as B
class AdaResNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, norm_type='batch', act_type='relu',
res_scale=1, upsample_mode='upconv', adafm_ksize=1):
super(AdaResNet, self).__init__()
norm_layer = B.get_norm_layer(norm_type, adafm_ksize)
fea_conv = B.conv_block(in_nc, nf, stride=2, kernel_size=3, norm_layer=None, act_type=None)
resnet_blocks = [B.ResNetBlock(nf, nf, nf, norm_layer=norm_layer, act_type=act_type, res_scale=res_scale)
for _ in range(nb)]
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_layer=norm_layer, act_type=None)
if upsample_mode == 'upconv':
upsample_block = B.upconv_blcok
elif upsample_mode == 'pixelshuffle':
upsample_block = B.pixelshuffle_block
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
upsampler = upsample_block(nf, nf, act_type=act_type)
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_layer=None, act_type=act_type)
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_layer=None, act_type=None)
self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*resnet_blocks, LR_conv)),
upsampler, HR_conv0, HR_conv1)
def forward(self, x):
x = self.model(x)
return x | ADAFMNoiseReducer | /models/modules/architecture.py | architecture.py |
from collections import OrderedDict
import functools
import torch.nn as nn
####################
# Basic blocks
####################
def get_norm_layer(norm_type, adafm_ksize=1):
# helper selecting normalization layer
if norm_type == 'batch':
layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'basic':
layer = functools.partial(Basic)
elif norm_type == 'adafm':
layer = functools.partial(AdaptiveFM, kernel_size=adafm_ksize)
else:
raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type))
return layer
def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1):
# helper selecting activation
# neg_slope: for leakyrelu and init of prelu
# n_prelu: for p_relu num_parameters
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))
return layer
def pad(pad_type, padding):
# helper selecting padding layer
# if padding is 'zero', do by conv layers
pad_type = pad_type.lower()
if padding == 0:
return None
if pad_type == 'reflect':
layer = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
layer = nn.ReplicationPad2d(padding)
else:
raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type))
return layer
def get_valid_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
class ShortcutBlock(nn.Module):
#Elementwise sum the output of a submodule to its input
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
def sequential(*args):
# Flatten Sequential. It unwraps nn.Sequential.
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, \
pad_type='zero', norm_layer=None, act_type='relu'):
'''
Conv layer with padding, normalization, activation
'''
padding = get_valid_padding(kernel_size, dilation)
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
padding = padding if pad_type == 'zero' else 0
c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \
dilation=dilation, bias=bias, groups=groups)
a = act(act_type) if act_type else None
n = norm_layer(out_nc) if norm_layer else None
return sequential(p, c, n, a)
####################
# Useful blocks
####################
class ResNetBlock(nn.Module):
'''
ResNet Block, 3-3 style
'''
def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \
bias=True, pad_type='zero', norm_layer=None, act_type='relu', res_scale=1):
super(ResNetBlock, self).__init__()
conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
norm_layer, act_type)
act_type = None
conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
norm_layer, act_type)
self.res = sequential(conv0, conv1)
self.res_scale = res_scale
def forward(self, x):
res = self.res(x).mul(self.res_scale)
return x + res
####################
# AdaFM
####################
class AdaptiveFM(nn.Module):
def __init__(self, in_channel, kernel_size):
super(AdaptiveFM, self).__init__()
padding = get_valid_padding(kernel_size, 1)
self.transformer = nn.Conv2d(in_channel, in_channel, kernel_size,
padding=padding, groups=in_channel)
def forward(self, x):
return self.transformer(x) + x
class Basic(nn.Module):
def __init__(self, in_channel):
super(Basic, self).__init__()
self.in_channel = in_channel
def forward(self, x):
return x
####################
# Upsampler
####################
def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \
pad_type='zero', norm_layer=None, act_type='relu'):
'''
Pixel shuffle layer
(Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
Neural Network, CVPR17)
'''
conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, \
pad_type=pad_type, norm_layer=None, act_type=None)
pixel_shuffle = nn.PixelShuffle(upscale_factor)
n = norm_layer(out_nc) if norm_layer else None
a = act(act_type) if act_type else None
return sequential(conv, pixel_shuffle, n, a)
def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \
pad_type='zero', norm_layer=None, act_type='relu', mode='nearest'):
# Up conv
# described in https://distill.pub/2016/deconv-checkerboard/
upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode)
conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, \
pad_type=pad_type, norm_layer=norm_layer, act_type=act_type)
return sequential(upsample, conv)
| ADAFMNoiseReducer | /models/modules/block.py | block.py |
from ADB_Easy_Control import touch_event
from ADB_Easy_Control import data_model
def point_touch_withobj(position: data_model.Point, sleep_time: float):
touch_event.point_touch(position.position_x, position.position_y, sleep_time)
def point_swipe_withobj(start_position: data_model.Point, end_position: data_model.Point, swipe_time: float,
sleep_time: float):
touch_event.point_swipe(start_position.position_x, start_position.position_y, end_position.position_x,
end_position.position_y, swipe_time, sleep_time)
def point_longtime_touch_withobj(position: data_model.Point, touch_time: float, sleep_time: float):
touch_event.point_longtime_touch(position.position_x, position.position_y, touch_time, sleep_time)
def rectangle_area_touch_withobj(area: data_model.RectangleArea, sleep_time: float):
touch_event.rectangle_area_touch(area.beginarea_x, area.finisharea_x, area.beginarea_y, area.finisharea_y,
sleep_time)
def rectangle_area_longtime_touch_withobj(area: data_model.RectangleArea, touch_time: float, sleep_time: float):
touch_event.rectangle_area_longtime_touch(area.beginarea_x, area.finisharea_x, area.beginarea_y, area.finisharea_y,
touch_time, sleep_time)
def rectangle_area_swipe_withobj(start_area: data_model.RectangleArea, end_area: data_model.RectangleArea,
swipe_time: float,
sleep_time: float):
touch_event.rectangle_area_swipe(start_area.beginarea_x, start_area.finisharea_x, start_area.beginarea_y,
start_area.finisharea_y, end_area.beginarea_x, end_area.finisharea_x,
end_area.beginarea_y, end_area.finisharea_y, swipe_time, sleep_time)
def rectangle_inarea_rand_swipe_withobj(area: data_model.RectangleArea, min_swipe_distance: int,
max_swipe_distance: int,
swipe_time: float, sleep_time: float):
touch_event.rectangle_inarea_rand_swipe(area.beginarea_x, area.finisharea_x, area.beginarea_y, area.finisharea_y,
min_swipe_distance, max_swipe_distance, swipe_time, sleep_time)
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/touch_event_withobj.py | touch_event_withobj.py |
import os
def get_dpi() -> str:
dpi_string = os.popen("adb" + multi_devices_helper() + " shell wm density").read().split(" ")[2][:-1]
return dpi_string
def get_size() -> str:
size_string = os.popen("adb" + multi_devices_helper() + " shell wm size").read().split(" ")[2][:-1]
return size_string
def get_size_x() -> str:
size_x_string = os.popen("adb" + multi_devices_helper() + " shell wm size").read().split(" ")[2][:-1].split("x")[0]
return size_x_string
def get_size_y() -> str:
size_y_string = os.popen("adb" + multi_devices_helper() + " shell wm size").read().split(" ")[2][:-1].split("x")[1]
return size_y_string
def reboot():
os.system("adb" + multi_devices_helper() + " shell reboot")
def shutdown():
os.system("adb" + multi_devices_helper() + " shell reboot -p")
def turn_on_wifi():
os.system("adb" + multi_devices_helper() + " shell svc wifi enable")
def turn_off_wifi():
os.system("adb" + multi_devices_helper() + " shell svc wifi disable")
def wifi_prefer():
os.system("adb" + multi_devices_helper() + " shell svc wifi prefer")
def turn_on_data():
os.system("adb" + multi_devices_helper() + " shell svc data enable")
def turn_off_data():
os.system("adb" + multi_devices_helper() + " shell svc data disable")
def data_prefer():
os.system("adb" + multi_devices_helper() + " shell svc data prefer")
def power_stay_on(mode: str):
os.system("adb" + multi_devices_helper() + " shell svc power stayon " + format(mode))
def kill_adb_server():
os.system("adb kill-server")
def start_adb_server():
os.system("adb start_server")
def get_connected_devices() -> list:
devices_info = os.popen("adb devices -l").read().split()[4:]
devices_list = []
for i in range(0, len(devices_info), 7):
devices_list.append(devices_info[i])
return devices_list
def get_connected_device_info() -> list:
devices_info = os.popen("adb devices -l").read().split()[4:]
return devices_info
is_multi_devices = 0
current_device = ""
def multi_devices_helper() -> str:
if is_multi_devices == 1 and not current_device == "":
return format(" -s " + current_device)
else:
return ""
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/device_assistant.py | device_assistant.py |
import os
import datetime
from ADB_Easy_Control import device_assistant
def screen_capture() -> str:
now_time = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
os.system("adb" + device_assistant.multi_devices_helper() + " shell screencap -p /sdcard/screencap.png")
if not os.path.exists(format(os.getcwd()) + "/ScreenCapture"):
os.mkdir(format(os.getcwd()) + "/ScreenCapture/")
os.system(
"adb" + device_assistant.multi_devices_helper() + " pull /sdcard/screencap.png" + " " + format(
os.getcwd()) + "/ScreenCapture/" + format(now_time) + ".png")
return format(now_time) + ".png"
def custompath_screen_capture(filename: str, path: str) -> str:
os.system("adb" + device_assistant.multi_devices_helper() + " shell screencap -p /sdcard/screencap.png")
os.system("adb" + device_assistant.multi_devices_helper() + " pull /sdcard/screencap.png" + " " + format(
path) + "/ScreenCapture/" + format(filename) + ".png")
return format(filename) + ".png"
def screen_record(time_limit: float) -> str:
now_time = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
os.system("adb" + device_assistant.multi_devices_helper() + " shell screenrecord --time-limit " + format(
time_limit) + " /sdcard/screenrecord.mp4")
if not os.path.exists(format(os.getcwd()) + "/ScreenRecord"):
os.mkdir(format(os.getcwd()) + "/ScreenRecord/")
os.system(
"adb" + device_assistant.multi_devices_helper() + " pull /sdcard/screenrecord.mp4" + " " + format(
os.getcwd()) + "/ScreenRecord/" + format(now_time) + ".mp4")
return format(now_time) + ".mp4"
def custompath_screen_record(time_limit: float, filename: str, path: str) -> str:
os.system("adb" + device_assistant.multi_devices_helper() + " shell screenrecord --time-limit " + format(
time_limit) + " /sdcard/screenrecord.mp4")
os.system(
"adb" + device_assistant.multi_devices_helper() + " pull /sdcard/screenrecord.mp4" + " " + format(
path) + "/ScreenRecord/" + format(filename) + ".mp4")
return format(filename) + ".mp4"
def custom_screen_record(time_limit: float, size: str, bit_rate: int, filename: str, path: str):
os.system("adb" + device_assistant.multi_devices_helper() + " shell screenrecord --time-limit " + format(
time_limit) + " --size " + format(size) + " --bit-rate " + format(bit_rate) + " /sdcard/screenrecord.mp4")
os.system(
"adb" + device_assistant.multi_devices_helper() + " pull /sdcard/screenrecord.mp4" + " " + format(
path) + "/ScreenRecord/" + format(filename) + ".mp4")
return format(filename) + ".mp4"
def pull_file_to_computer(droid_path: str, computer_path: str):
os.system("adb" + device_assistant.multi_devices_helper() + " pull " + droid_path + " " + computer_path)
def push_file_to_droid(computer_path: str, droid_path: str):
os.system("adb" + device_assistant.multi_devices_helper() + " push " + computer_path + " " + droid_path)
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/screen_and_file.py | screen_and_file.py |
import os
import time
import random
import math
from ADB_Easy_Control import data_model, device_assistant
from functools import singledispatch
@singledispatch
def point_touch(position_x: int, position_y: int, sleep_time: float):
os.system('adb' + device_assistant.multi_devices_helper() + ' shell input tap ' + format(position_x) + ' ' + format(
position_y))
time.sleep(sleep_time)
@point_touch.register(data_model.Point)
def _(position: data_model.Point, sleep_time: float):
point_touch(position.position_x, position.position_y, sleep_time)
@singledispatch
def point_swipe(start_position_x: int, start_position_y: int, end_position_x: int, end_position_y: int,
swipe_time: float, sleep_time: float):
if swipe_time == 0:
os.system('adb' + device_assistant.multi_devices_helper() + ' shell input swipe ' + format(
start_position_x) + ' ' + format(start_position_y) + ' ' + format(
end_position_x) + ' ' + format(end_position_y))
if swipe_time != 0:
if swipe_time > 5:
print('You may have entered too long a slide time of ' + format(
swipe_time) + ' seconds.\nNote that the sliding time is in seconds and not milliseconds.')
os.system('adb' + device_assistant.multi_devices_helper() + ' shell input swipe ' + format(
start_position_x) + ' ' + format(start_position_y) + ' ' + format(
end_position_x) + ' ' + format(end_position_y) + '' + format(swipe_time * 1000))
time.sleep(sleep_time)
@point_swipe.register(data_model.Point)
def _(start_position: data_model.Point, end_position: data_model.Point, swipe_time: float,
sleep_time: float):
point_swipe(start_position.position_x, start_position.position_y, end_position.position_x,
end_position.position_y, swipe_time, sleep_time)
@singledispatch
def point_longtime_touch(position_x: int, position_y: int, touch_time: float, sleep_time: float):
if touch_time > 5:
# print('您可能输入了过长的滑动时间,' + format(touch_time) + '秒\n请注意,滑动时间的单位为秒而非毫秒')
print('You may have entered too long a touch time of ' + format(
touch_time) + ' seconds.\nNote that the touching time is in seconds and not milliseconds.')
os.system(
'adb' + device_assistant.multi_devices_helper() + ' shell input swipe ' + format(position_x) + ' ' + format(
position_y) + ' ' + format(
position_x) + ' ' + format(position_y) + '' + format(touch_time * 1000))
time.sleep(sleep_time)
@point_longtime_touch.register(data_model.Point)
def _(position: data_model.Point, touch_time: float, sleep_time: float):
point_longtime_touch(position.position_x, position.position_y, touch_time, sleep_time)
@singledispatch
def rectangle_area_touch(beginarea_x: int, finisharea_x: int, beginarea_y: int, finisharea_y: int, sleep_time: float):
rand_position_x = random.randint(beginarea_x, finisharea_x)
rand_position_y = random.randint(beginarea_y, finisharea_y)
os.system(
'adb' + device_assistant.multi_devices_helper() + ' shell input tap ' + format(rand_position_x) + ' ' + format(
rand_position_y))
time.sleep(sleep_time)
@rectangle_area_touch.register(data_model.RectangleArea)
def _(area: data_model.RectangleArea, sleep_time: float):
rectangle_area_touch(area.beginarea_x, area.finisharea_x, area.beginarea_y, area.finisharea_y,
sleep_time)
@singledispatch
def rectangle_area_longtime_touch(beginarea_x: int, finisharea_x: int, beginarea_y: int, finisharea_y: int,
touch_time: float, sleep_time: float):
rand_position_x = random.randint(beginarea_x, finisharea_x)
rand_position_y = random.randint(beginarea_y, finisharea_y)
os.system(
'adb' + device_assistant.multi_devices_helper() + ' shell input swipe' + format(rand_position_x) + ' ' + format(
rand_position_y) + ' ' + format(
rand_position_x) + ' ' + format(rand_position_y) + '' + format(touch_time * 1000))
time.sleep(sleep_time)
@rectangle_area_longtime_touch.register(data_model.RectangleArea)
def _(area: data_model.RectangleArea, touch_time: float, sleep_time: float):
rectangle_area_longtime_touch(area.beginarea_x, area.finisharea_x, area.beginarea_y, area.finisharea_y,
touch_time, sleep_time)
@singledispatch
def rectangle_area_swipe(start_beginarea_x: int, start_finisharea_x: int, start_beginarea_y: int,
start_finisharea_y: int, end_beginarea_x: int, end_finisharea_x: int, end_beginarea_y: int,
end_finisharea_y: int, swipe_time: float, sleep_time: float):
rand_start_position_x = random.randint(start_beginarea_x, start_finisharea_x)
rand_start_position_y = random.randint(start_beginarea_y, start_finisharea_y)
rand_end_position_x = random.randint(end_beginarea_x, end_finisharea_x)
rand_end_position_y = random.randint(end_beginarea_y, end_finisharea_y)
point_swipe(rand_start_position_x, rand_start_position_y, rand_end_position_x, rand_end_position_y, swipe_time,
sleep_time)
@rectangle_area_swipe.register(data_model.RectangleArea)
def _(start_area: data_model.RectangleArea, end_area: data_model.RectangleArea,
swipe_time: float,
sleep_time: float):
rectangle_area_swipe(start_area.beginarea_x, start_area.finisharea_x, start_area.beginarea_y,
start_area.finisharea_y, end_area.beginarea_x, end_area.finisharea_x,
end_area.beginarea_y, end_area.finisharea_y, swipe_time, sleep_time)
@singledispatch
def rectangle_inarea_rand_swipe(beginarea_x: int, finisharea_x: int, beginarea_y: int, finisharea_y: int,
min_swipe_distance: int, max_swipe_distance: int, swipe_time: float, sleep_time: float):
if min_swipe_distance > max_swipe_distance:
print("最小滑动距离" + format(min_swipe_distance) + "大于最大滑动距离" + format(max_swipe_distance))
return
diagonal_distance = math.hypot(finisharea_x - beginarea_x, finisharea_y - beginarea_y)
if max_swipe_distance > diagonal_distance:
print("设定的最大滑动距离" + format(max_swipe_distance) + "大于区域的对角线距离" + format(diagonal_distance))
max_swipe_distance = diagonal_distance
if min_swipe_distance > max_swipe_distance:
print("设定的最小滑动距离" + format(min_swipe_distance) + "大于区域的对角线距离" + format(diagonal_distance))
min_swipe_distance = max_swipe_distance
rand_distance = random.randint(min_swipe_distance, max_swipe_distance)
rand_degree = random.randint(0, 90)
x_move_distance = math.cos(math.radians(rand_degree)) * rand_distance
y_move_distance = math.sin(math.radians(rand_degree)) * rand_distance
rand_direction = random.randint(1, 4)
if rand_direction == 1:
rand_start_position_x = random.randint(beginarea_x, int(finisharea_x - x_move_distance))
rand_start_position_y = random.randint(beginarea_y, int(finisharea_y - y_move_distance))
rand_end_position_x = rand_start_position_x + x_move_distance
rand_end_position_y = rand_start_position_y + y_move_distance
elif rand_direction == 2:
rand_start_position_x = random.randint(beginarea_x, int(finisharea_x - x_move_distance))
rand_start_position_y = random.randint(int(beginarea_y + y_move_distance), finisharea_y)
rand_end_position_x = rand_start_position_x + x_move_distance
rand_end_position_y = rand_start_position_y - y_move_distance
elif rand_direction == 3:
rand_start_position_x = random.randint(int(beginarea_x + x_move_distance), finisharea_x)
rand_start_position_y = random.randint(beginarea_y, int(finisharea_y - y_move_distance))
rand_end_position_x = rand_start_position_x - x_move_distance
rand_end_position_y = rand_start_position_y + y_move_distance
else:
rand_start_position_x = random.randint(int(beginarea_x + x_move_distance), finisharea_x)
rand_start_position_y = random.randint(int(beginarea_y + y_move_distance), finisharea_y)
rand_end_position_x = rand_start_position_x - x_move_distance
rand_end_position_y = rand_start_position_y - y_move_distance
point_swipe(rand_start_position_x, rand_start_position_y, int(rand_end_position_x), int(rand_end_position_y),
swipe_time, sleep_time)
@rectangle_inarea_rand_swipe.register(data_model.RectangleArea)
def _(area: data_model.RectangleArea, min_swipe_distance: int, max_swipe_distance: int,
swipe_time: float, sleep_time: float):
rectangle_inarea_rand_swipe(area.beginarea_x, area.finisharea_x, area.beginarea_y, area.finisharea_y,
min_swipe_distance, max_swipe_distance, swipe_time, sleep_time)
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/touch_event.py | touch_event.py |
import os
from ADB_Easy_Control import device_assistant
# 仅可输入英文字符
def input_text(text: str):
os.system("adb" + device_assistant.multi_devices_helper() + " shell input text " + format(text))
def input_by_keycode(keycode: int):
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent " + format(keycode))
def home_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 3")
def back_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 4")
def volume_up():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 24")
def volume_down():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 25")
def power_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 26")
def space_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 62")
def tab_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 61")
def enter_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 66")
def backspace_delete_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 67")
def escape_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 111")
def forward_delete_button():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 112")
def screen_off():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 223")
def screen_on():
os.system("adb" + device_assistant.multi_devices_helper() + " shell input keyevent 224")
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/key_event.py | key_event.py |
class Point(object):
def __init__(self, position_x: int, position_y: int):
self.position_x = position_x
self.position_y = position_y
class RectangleArea(object):
def __init__(self, beginarea_x: int, finisharea_x: int, beginarea_y: int, finisharea_y: int):
self.beginarea_x = beginarea_x
self.finisharea_x = finisharea_x
self.beginarea_y = beginarea_y
self.finisharea_y = finisharea_y
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/data_model.py | data_model.py |
from ADB_Easy_Control import app_assistant
from ADB_Easy_Control import data_model
from ADB_Easy_Control import device_assistant
from ADB_Easy_Control import key_event
from ADB_Easy_Control import screen_and_file
from ADB_Easy_Control import touch_event
from ADB_Easy_Control import touch_event_withobj
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/__init__.py | __init__.py |
import os
from ADB_Easy_Control import device_assistant
def get_grep_or_findstr() -> str:
if os.name == "nt":
return "findstr"
else:
return "grep"
def get_current_activity() -> str:
package_and_activity_string = os.popen(
"adb" + device_assistant.multi_devices_helper() + " shell dumpsys activity activities | " + get_grep_or_findstr() + " mCurrentFocus").read().split(
" ")[4]
separator = "/"
activity_string = package_and_activity_string[package_and_activity_string.index(separator) + 1:-2]
return activity_string
def get_current_package() -> str:
package_and_activity_string = os.popen(
"adb" + device_assistant.multi_devices_helper() + " shell dumpsys activity activities | " + get_grep_or_findstr() + " mCurrentFocus").read().split(
" ")[4]
separator = "/"
package_string = package_and_activity_string[:package_and_activity_string.index(separator)]
return package_string
def start_activity(target_package: str, target_activity: str):
os.system(
"adb" + device_assistant.multi_devices_helper() + " shell am start -n " + target_package + "/" + target_activity)
def start_activity_with_parameter(target_package: str, target_activity: str, parameter: str):
os.system(
"adb" + device_assistant.multi_devices_helper() + " shell am start -n " + target_package + "/" + target_activity + " -d " + parameter)
def start_activity_by_action(target_intent_action: str):
os.system("adb" + device_assistant.multi_devices_helper() + " shell am start -a " + target_intent_action)
def start_activity_by_action_parameter(target_intent_action: str, parameter: str):
os.system(
"adb" + device_assistant.multi_devices_helper() + " shell am start -a " + target_intent_action + " -d " + parameter)
def start_service(target_package: str, target_service: str):
os.system(
"adb" + device_assistant.multi_devices_helper() + " shell am startservice -n " + target_package + "/" + target_service)
def start_service_with_parameter(target_package: str, target_service: str, parameter: str):
os.system(
"adb" + device_assistant.multi_devices_helper() + " shell am start -n " + target_package + "/" + target_service + " -d " + parameter)
def send_broadcast(parameter_and_action: str):
os.system("adb" + device_assistant.multi_devices_helper() + " shell am broadcast " + parameter_and_action)
def stop_app(target_package: str):
os.system("adb" + device_assistant.multi_devices_helper() + " shell am force-stop " + target_package)
| ADB-Easy-Control | /ADB_Easy_Control-1.0.1-py3-none-any.whl/ADB_Easy_Control/app_assistant.py | app_assistant.py |
# ADB-Wifi
[](https://badge.fury.io/py/ADB-Wifi)
A script to automatically connect android devices in debug mode using WIFI
<p align="center">
<img src="extras/example.gif" width="100%" />
</p>
## Motivation
Everyday I need to connect to a lot of diferent devices to my computer.
Some devices have Micro-USB ports and others USB Type-C ports and I lose time plugging the devices and waiting for the ADB.
So, I have created this script to auto connect a device using WIFI.
**The diference to the other script and plugins:** The script save the connections in a configuration file to try reconnect when you boot your computer or when your device lost the wifi connection.
## Requirements
* Python 3
* ADB
## Installation
Using pip you can install ```adb-wifi```
### Linux and macOS:
```$ sudo pip install adb-wifi```
## Usage
1. Run ```$ adb-wifi```
You can add the ```adb-wifi``` to your startup applications.
2. Connect the devices to your computer and authorized the debub.
**Attention:** If your device turns off(battery, etc), you need to plug again the device to the computer because the adb need to open the ```tcpip port```!
If your device has rooted you can use this [application](https://play.google.com/store/apps/details?id=com.ttxapps.wifiadb)
to turn on the ```tcpip port```and ignore this step.
## Created & Maintained By
[Jorge Costa](https://github.com/extmkv)
| ADB-Wifi | /ADB-Wifi-0.4.2.tar.gz/ADB-Wifi-0.4.2/README.md | README.md |
from setuptools import setup
setup(
name='ADB-Wifi',
version='0.4.2',
scripts=['adb-wifi'],
install_requires=[
'python-nmap',
'netifaces',
'netaddr'
]
)
| ADB-Wifi | /ADB-Wifi-0.4.2.tar.gz/ADB-Wifi-0.4.2/setup.py | setup.py |
# ADB Wrapper
```
python3 -m pip install ADBWrapper
```
```
from ADBWrapper import ADBWrapper
if __name__ == "__main__":
adb = ADBWrapper( { "ip": "192.168.4.57" , "port": "5555" } )
adb.take_screen_shot()
adb.screen_shot.show()
adb.open_uri( "https://www.youtube.com/watch?v=naOsvWxeYgo&list=PLcW8xNfZoh7fCLYJi0m3JXLs0LdcAsc0R&index=1" )
adb.press_key_sequence( [ 22 , 22 , 22 , 22 ] )
time.sleep( 10 )
adb.press_keycode( "KEYCODE_MEDIA_PAUSE" )
adb.press_keycode( "KEYCODE_MEDIA_FAST_FORWARD" )
adb.press_keycode( "KEYCODE_MEDIA_PLAY" )
``` | ADBWrapper | /ADBWrapper-0.0.3.tar.gz/ADBWrapper-0.0.3/README.md | README.md |
import setuptools
import pathlib
import pkg_resources
# pipreqs ./XDoToolWrapper
# https://stackoverflow.com/a/59971469
with pathlib.Path( "./ADBWrapper/requirements.txt" ).open() as requirements_txt:
install_requires = [
str( requirement )
for requirement
in pkg_resources.parse_requirements( requirements_txt )
]
setuptools.setup(
name="ADBWrapper",
version="0.0.3",
author="7435171",
author_email="48723247842@protonmail.com",
description="ADB Wrapper",
url="https://github.com/48723247842/ADBWrapper",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data = True ,
include=[ "keycode_enum.py" ] ,
python_requires='>=3.6',
install_requires=install_requires
) | ADBWrapper | /ADBWrapper-0.0.3.tar.gz/ADBWrapper-0.0.3/setup.py | setup.py |
def add_numbers(num1,num2):
return num1 +num2
| ADC-Python-Library | /ADC%20Python%20Library-0.0.1.tar.gz/ADC Python Library-0.0.1/_init_.py | _init_.py |
# %%
from setuptools import setup, find_packages
# %%
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='ADC Python Library',
version='0.0.1',
description='A very basic test ',
long_description=open('README.txt').read() + '\n\n' + open('CHANGELOG.txt').read(),
url='',
author='Yang Wu',
author_email='yang.wu@ontario.ca',
license='MIT',
classifiers=classifiers,
keywords='a basic function',
packages=find_packages(),
install_requires=['']
) | ADC-Python-Library | /ADC%20Python%20Library-0.0.1.tar.gz/ADC Python Library-0.0.1/setup.py | setup.py |
This function clean the address data.
| ADC-Python-Library | /ADC%20Python%20Library-0.0.1.tar.gz/ADC Python Library-0.0.1/readme.txt | readme.txt |
# ADCPy - code to work with ADCP data from the raw binary using python 3.x
[](https://adcpy.readthedocs.io/en/latest/?badge=latest)
[](https://travis-ci.org/mmartini-usgs/ADCPy)
### Purpose
This code prepares large amounts of single ping ADCP data from the raw binary for use with xarray by converting it to netCDF.
### Motivation
The code was written for the TRDI ADCP when I discovered theat TRDI's Velocity software could not easily export single ping data. While there are other packages out there, as the time of writing this code, I had yet to find one that saved the data in netCDF format (so it can be accessed with xarray and dask), could be run on linux, windows and mac, and did not load it into memory (the files I have are > 2GB)
The code is written as a module of functions, rather than classes, ensemble information is stored as nested dicts, in order to be more readable and to make the structure of the raw data (particularly the TRDI instruments) understandable.
### Status
As the code stands now, a 3.5 GB, single ping Workhorse ADCP .pd0 file with 3 Million ensembles will take 4-5 hours to convert. I live with this, because I can just let the conversion happen overnight on such large data sets, and once my data is in netCDF, everything else is convenient and fast. I suspect that more speed might be acheived by making use of xarray and dask to write the netCDF output, and I may do this if time allows, and I invite an enterprising soul to beat me to it. I use this code myself on a routine basis in my work, and continue to make it better as I learn more about python.
At USGS Coastal and Marine Geology we use the PMEL EPIC convention for netCDF as we started doing this back in the early 1990's. Downstream we do convert to more current CF conventions, however our diagnostic and other legacy code for processing instrument data from binary and other raw formats depends on the EPIC convention for time, so you will see a time (Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968) and time2 (msec since 0:00 GMT) variable created as default. This may confuse your code. If you want the more python friendly CF time (seconds since 1970-01-01T00:00:00 UTC) set timetype to CF.
Use at your own risk - this is a work in progress and a python learning project.
Enjoy,
Marinna
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/README.md | README.md |
# this setup modelled on
# https://github.com/pypa/sampleproject/blob/master/setup.py
import setuptools
# Get the long description from the README file
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='ADCPy',
version='0.1.1',
author='Marinna Martini',
author_email='mmartini@usgs.gov',
description='read ADCP data from TRDI and Nortek instruments',
long_description=long_description, # read from README.md above
long_description_content_type='text/markdown',
url='https://github.com/mmartini-usgs/ADCPy',
packages=setuptools.find_packages(exclude=('tests', 'docs')),
classifiers=['Programming Language :: Python :: 3',
'License :: Public Domain',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
],
python_requires='>=3.5',
keywords='acoustic doppler profiler ADCP',
)
# TODO - include data for demos
# # If there are data files included in your packages that need to be
# # installed, specify them here.
# #
# # If using Python 2.6 or earlier, then these have to be included in
# # MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
#
# # Although 'package_data' is the preferred approach, in some case you may
# # need to place data files outside of your packages. See:
# # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# #
# # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/setup.py | setup.py |
from . import TRDIstuff
from . import Nortekstuff
from . import EPICstuff
from .EPICstuff import EPICmisc, ADCPcdf2ncEPIC, repopulateEPIC, reshapeEPIC
from .TRDIstuff import TRDIpd0tonetcdf, pd0splitter
from .Nortekstuff import Norteknc2USGScdf
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/__init__.py | __init__.py |
"""
Norteknc2USGScdf
================
Converts Nortek exported netcdf files of Signature data to raw current profile data to a netCDF4 file.
Data are taken from the "Burst" group of "Data"
Usage:
python Norteknc2USGScdf.py [path] infileBName outfileName
:param str path: is a path to prepend to the following
:param str infileBName: is path of netCDF4 input Burst file from a Nortek Signature
:param str infileIName: is path of netCDF4 input IBurst file from a Nortek Signature
:param str outfileName: is path of a netcdf4 output file
:param int start: ensemble at which to start exporting
:param in end: ensemble at which to stop exporting
Notes:
time and time2, the EPIC convention for netCDF, is not used here so that
the resulting very large files generated can be reduced using existing
python too ls such as xarrays
Files exported by the Contour program have differently named attributes
and this program may be updated to handle them at a later date.
"""
# TODO note that the ADCPcdf2ncEPIC keys off attributes for "burst" and there may be burst1, avg, avg1 etc.
# in a Nortek file. This may need to be dealt with as separate .cdf files
# TODO change ('time') instances to ('time',) to make srue they are passed as single element tuples, and test
# 10/4/2018 MM remove valid_range as it causes too many downstream problems
import sys, math
from netCDF4 import Dataset
from netCDF4 import num2date
import datetime as dt
# noinspection PyUnresolvedReferences
# import adcpy
from adcpy.TRDIstuff.TRDIpd0tonetcdf import julian
from adcpy.EPICstuff.EPICmisc import cftime2EPICtime
def doNortekRawFile(infileBName, infileIName, outfileName, goodens, timetype="CF"):
"""
Converts Nortek exported netcdf files of Signature data to raw current profile data to a netCDF4 file.
:param str infileBName: netCDF4 input Burst file from a Nortek Signature
:param str infileIName: is path of netCDF4 input IBurst file from a Nortek Signature
:param str outfileName: is path of a netcdf4 output file
:param list[int] goodens: ensemble numbers to export
:param str timetype: "CF" or "EPIC" time format to use for the "time" variable
"""
if infileIName == '':
midasdata = True
# TODO do this more elegantly, and make the default all ensembles
# this is necessary so that this function does not change the value
# in the calling function
ens2process = goodens[:]
nc = Dataset(infileBName, mode='r', format='NETCDF4')
# so the MIDAS output and the Contour output are different, here we hope
# to handle both, since MIDAS has been more tolerant of odd data
if midasdata:
ncI = nc
else:
ncI = Dataset(infileIName, mode='r', format='NETCDF4')
maxens = len(nc['Data']['Burst']['time'])
print('%s has %d ensembles' % (infileBName,maxens))
# TODO - ens2process[1] has the file size from the previous file run when multiple files are processed!
if ens2process[1] < 0:
ens2process[1] = maxens
# we are good to go, get the output file ready
print('Setting up netCDF output file %s' % outfileName)
# set up some pointers to the netCDF groups
config = nc['Config']
data = nc['Data']['Burst']
if 'IBurstHR' in ncI['Data'].groups:
idata = ncI['Data']['IBurstHR']
HRdata = True
else:
idata = ncI['Data']['IBurst']
HRdata = False
# TODO - pay attention to the possible number of bursts.
# we are assuming here that the first burst is the primary sample set of
# the four slant beams
# note that
# f4 = 4 byte, 32 bit float
# maxfloat = 3.402823*10**38;
intfill = -32768
floatfill = 1E35
nens = ens2process[1]-ens2process[0]
print('creating netCDF file %s with %d records' % (outfileName, nens))
cdf = Dataset(outfileName, 'w', clobber=True, format='NETCDF4')
# dimensions, in EPIC order
cdf.createDimension('time', nens)
if midasdata:
cdf.createDimension('depth', config.burst_nCells)
else:
cdf.createDimension('depth', config.Instrument_burst_nCells)
cdf.createDimension('lat', 1)
cdf.createDimension('lon', 1)
# write global attributes
cdf.history = "translated to USGS netCDF by Norteknc2USGScdf.py"
cdf.sensor_type = 'Nortek'
if midasdata:
cdf.serial_number = config.serialNumberDoppler
else:
cdf.serial_number = config.Instrument_serialNumberDoppler
# TODO - reduce the number if attributes we copy from the nc file
# build a dictionary of global attributes is a faster way to load attributes
# into a netcdf file http://unidata.github.io/netcdf4-python/#netCDF4.Dataset.setncatts
# put the "sensor_type" in front of the attributes that come directly
# from the instrument data
Nortek_config = dictifyatts(config, 'Nortek_')
cdf.setncatts(Nortek_config)
# it's not yet clear which way to go with this. python tools like xarray
# and panoply demand that time be a CF defined time.
# USGS CMG MATLAB tools need time and time2
# create the datetime object from the CF time
tobj = num2date(data['time'][:], data['time'].units, calendar=data['time'].calendar)
CFcount = data['time'][:]
CFunits = data['time'].units
EPICtime, EPICtime2 = cftime2EPICtime(CFcount,CFunits)
print('CFcount[0] = %f, CFunits = %s' % (CFcount[0], CFunits))
print('EPICtime[0] = %f, EPICtime2[0] = %f' % (EPICtime[0], EPICtime2[0]))
elapsed_sec = []
for idx in range(len(tobj)):
tdelta = tobj[idx]-tobj[0] # timedelta
elapsed_sec.append(tdelta.total_seconds())
# from the datetime object convert to time and time2
jd = []
time = []
time2 = []
# using u2 rather than u4 here because when EPIC time is written from this
# cdf to the nc file, it was getting messed up
# file is 1108sig001.cdf
# EPIC first time stamp = 25-Sep-2017 15:00:00
# seconds since 1970-01-01T00:00:00 UTC
# CF first time stamp = 25-Sep-2017 15:00:00
# file is 1108sig001.nc
# EPIC first time stamp = 08-Oct-5378 00:01:04
# seconds since 1970-01-01T00:00:00 UTC
# CF first time stamp = 25-Sep-2017 15:00:00
timevartype = 'u2'
for idx in range(len(tobj)):
j = julian(tobj[idx].year, tobj[idx].month, tobj[idx].day,
tobj[idx].hour, tobj[idx].minute, tobj[idx].second,
math.floor(tobj[idx].microsecond/10))
jd.append(j)
time.append(int(math.floor(j)))
time2.append(int((j - math.floor(j))*(24*3600*1000)))
if timetype == 'CF':
# cf_time for cf compliance and use by python packages like xarray
# if f8, 64 bit is not used, time is clipped
# TODO test this theory, because downstream 64 bit time is a problem
# for ADCP fast sampled, single ping data, need millisecond resolution
# cf_time = data['time'][:]
# cdf.createVariable('time','f8',('time'))
# cdf['time'].setncatts(dictifyatts(data['time'],''))
# cdf['time'][:] = cf_time[:]
varobj = cdf.createVariable('time', 'f8', ('time'))
varobj.setncatts(dictifyatts(data['time'], ''))
varobj[:] = data['time'][:]
varobj = cdf.createVariable('EPIC_time', timevartype,('time'))
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
# varobj[:] = time[:]
varobj[:] = EPICtime[:]
varobj = cdf.createVariable('EPIC_time2', timevartype, ('time'))
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
# varobj[:] = time2[:]
varobj[:] = EPICtime2[:]
else:
# we include cf_time for cf compliance and use by python packages like xarray
# if f8, 64 bit is not used, time is clipped
# for ADCP fast sampled, single ping data, need millisecond resolution
varobj = cdf.createVariable('cf_time', 'f8', ('time'))
varobj.setncatts(dictifyatts(data['time'], ''))
varobj[:] = data['time'][:]
# we include time and time2 for EPIC compliance
varobj = cdf.createVariable('time', timevartype, ('time'))
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
# varobj[:] = time[:]
varobj[:] = EPICtime[:]
varobj = cdf.createVariable('time2', timevartype, ('time'))
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
# varobj[:] = time2[:]
varobj[:] = EPICtime2[:]
cdf.start_time = '%s' % num2date(data['time'][0], data['time'].units)
cdf.stop_time = '%s' % num2date(data['time'][-1], data['time'].units)
print('times from the input file')
print(cdf.start_time)
print(cdf.stop_time)
print('times from the output file')
print('%s' % num2date(cdf['time'][0], cdf['time'].units))
print('%s' % num2date(cdf['time'][-1], cdf['time'].units))
varobj = cdf.createVariable('Rec', 'u4', ('time'), fill_value=intfill)
varobj.units = "count"
varobj.long_name = "Ensemble Count for each burst"
varobj[:] = data['EnsembleCount'][:]
varobj = cdf.createVariable('sv', 'f4', ('time'), fill_value=floatfill)
varobj.units = "m s-1"
varobj.long_name = "sound velocity (m s-1)"
varobj[:] = data['SpeedOfSound'][:]
# get the number
# there are separate Amplitude_Range, Correlation_Range and Velocity_Range
# we will pass on Velocity_Range as bindist
varobj = cdf.createVariable('bindist', 'f4', ('depth'), fill_value=floatfill)
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "bindist")
varobj.units = "m"
varobj.long_name = "bin distance from instrument for slant beams"
varobj.epic_code = 0
varobj.NOTE = "distance is not specified by Nortek as along beam or vertical"
if midasdata:
vardata = data.variables['Velocity Range'][:] # in raw data
else:
# because this is a coordinate variable, one can't just say data['Burst_Velocity_Beam_Range'][:]
try:
vardata = data.variables['Burst Velocity_Range'][:] # in raw data
except:
vardata = data.variables['Burst Velocity Beam_Range'][:] # in processed data
varobj[:] = vardata
nbbins = vardata.size
# map the Nortek beams onto TRDI order since later code expects TRDI order
TRDInumber = [3, 1, 4, 2]
for i in range(4):
varname = "vel%d" % TRDInumber[i]
if midasdata:
key = 'VelocityBeam%d' % (i+1)
else:
key = 'Vel_Beam%d' % (i+1)
varobj = cdf.createVariable(varname, 'f4', ('time', 'depth'), fill_value=floatfill)
varobj.units = "m s-1"
varobj.long_name = "Beam %d velocity (m s-1)" % TRDInumber[i]
varobj.epic_code = 1277+i
varobj.NOTE = "beams reordered from Nortek 1-2-3-4 to TRDI 3-1-4-2, as viewed clockwise from compass 0 " + \
"degree reference, when instrument is up-looking"
varobj[:, :] = data[key][:, :]
for i in range(4):
varname = "cor%d" % (i+1)
if midasdata:
key = 'CorrelationBeam%d' % (i+1)
else:
key = 'Cor_Beam%d' % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "percent"
varobj.long_name = "Beam %d correlation" % (i+1)
# varobj.epic_code = 1285+i
varobj[:, :] = data[key][:, :]
for i in range(4):
varname = "att%d" % (i+1)
if midasdata:
key = 'AmplitudeBeam%d' % (i+1)
else:
key = 'Amp_Beam%d' % (i+1)
varobj = cdf.createVariable(varname, 'f4', ('time', 'depth'), fill_value=intfill)
varobj.units = "dB"
# varobj.epic_code = 1281+i
varobj.long_name = "ADCP amplitude of beam %d" % (i+1)
varobj[:, :] = data[key][:, :]
varname = 'Heading'
varobj = cdf.createVariable('Hdg', 'f4', ('time'), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "INST Heading"
varobj.epic_code = 1215
# TODO can we tell on a Signature if a magvar was applied at deployment?
# no metadata found in the .nc file global attributes
# varobj.NOTE_9 = "no heading bias was applied during deployment"
varobj[:] = data[varname][:]
varname = 'Pitch'
varobj = cdf.createVariable('Ptch', 'f4', ('time'), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "INST Pitch"
varobj.epic_code = 1216
varobj[:] = data[varname][:]
varname = 'Roll'
varobj = cdf.createVariable('Roll', 'f4', ('time'), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "INST Roll"
varobj.epic_code = 1217
varobj[:] = data[varname][:]
# The Signature records magnetometer data we are not converting at this time
varname = 'WaterTemperature'
varobj = cdf.createVariable('Tx', 'f4', ('time'), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "Water temperature at ADCP"
# TODO - verify if Signature is IPTS-1990
# 20:T :TEMPERATURE (C) :temp:C:f10.2:IPTS-1990 standard
# varobj.epic_code = 28
varobj[:] = data[varname][:]
varname = 'Pressure'
varobj = cdf.createVariable('Pressure', 'f4', ('time'), fill_value=floatfill)
varobj.units = "dbar"
varobj.long_name = "ADCP Transducer Pressure"
varobj.epic_code = 4
varobj[:] = data[varname][:]
# TODO - Signature can bottom track, and we don't have an example yet
# we will want to model it on the TRDI ADCP format that laredy exists
# it is possible in a Signature for the vertical beam data to be on a
# different time base. Test for this. If it is the same time base we can
# include it now. If it isn't we will have to add it later by some other
# code. 5th beam Signature data is stored under the IBurst group
# it is also possible for the number of bins to be different
if midasdata:
vrkey = 'Velocity Range'
else:
vrkey = 'IBurstHR Velocity_Range'
if data['time'].size == idata['time'].size:
if nbbins == idata.variables[vrkey].size:
varobj = cdf.createVariable("vel5", 'f4', ('time', 'depth'), fill_value=floatfill)
varobj.units = "m s-1"
varobj.long_name = "Beam 5 velocity (m s-1)"
# varobj.valid_range = [-32767, 32767]
varobj[:, :] = idata['VelocityBeam5'][:, :]
# varobj[:,:] = idata['Vel_Beam5'][:,:] # contour version
varobj = cdf.createVariable("cor5", 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "percent"
varobj.long_name = "Beam 5 correlation"
# varobj.valid_range = [0, 100]
varobj[:, :] = idata['CorrelationBeam5'][: ,:]
# varobj[:,:] = idata['Cor_Beam5'][:,:] # contour version
varobj = cdf.createVariable("att5", 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "dB"
varobj.long_name = "ADCP amplitude of beam 5"
# varobj.valid_range = [0, 255]
varobj[:, :] = idata['AmplitudeBeam5'][:,:]
# varobj[:,:] = idata['Amp_Beam5'][:,:] # contour version
else:
print("Vertical beam data found with different number of cells.")
cdf.Nortek_VBeam_note = "Vertical beam data found with different number of cells. Vertical beam data " + \
"not exported to netCDF"
print("Vertical beam data not exported to netCDF")
else:
print("Vertical beam data found with different number of ensembles.")
cdf.Nortek_VBeam_note = "Vertical beam data found with different number of ensembles. Vertical beam data " + \
"not exported to netCDF"
print("Vertical beam data not exported to netCDF")
nc.close()
cdf.close()
print("%d ensembles copied" % maxens)
def dictifyatts(varptr, tag):
"""
read netcdf attributes and return them as a dict
:param varptr: pointer to a netcdf variable object
:param tag: string to add to the beginning of the keys in the dict
:return: the attributes as a dict where the keys are the attribute names and the values are the attribute data
"""
theDict = {}
for key in varptr.ncattrs():
if key.startswith('Instrument_'):
# we need to strip the 'Instrument_' off the beginning
n = key.find('_')
newkey = tag+key[n+1:]
else:
newkey = tag+key
theDict[newkey] = varptr.getncattr(key)
return theDict
# TODO add - and -- types of command line arguments
def __main():
print('%s running on python %s' % (sys.argv[0], sys.version))
if len(sys.argv) < 3:
print("%s usage:" % sys.argv[0])
print("Norteknc2USGScdf infileBName infileIName outfilename [startingensemble endingensemble]" )
sys.exit(1)
try:
infileBName = sys.argv[1]
except:
print('error - Burst input file name missing')
sys.exit(1)
try:
infileIName = sys.argv[2]
except:
print('error - IBurst input file name missing')
sys.exit(1)
try:
outfileName = sys.argv[3]
except:
print('error - output file name missing')
sys.exit(1)
print('Converting %s to %s' % (infileBName, outfileName))
try:
goodens = [int(sys.argv[4]), int(sys.argv[5])]
except:
print("No starting and ending ensembles specified, processing entire file")
goodens = [0, -1]
try:
timetype = sys.argv[6]
except:
print('Time type will be CF')
timetype = "CF"
print('Start file conversion at ', dt.datetime.now())
doNortekRawFile(infileBName, infileIName, outfileName, goodens, timetype)
print('Finished file conversion at ', dt.datetime.now())
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/Nortekstuff/Norteknc2USGScdf.py | Norteknc2USGScdf.py |
ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/Nortekstuff/__init__.py | __init__.py |
|
# -*- coding: utf-8 -*-
"""
reshapeEPIC
===========
apportion a continuous time series file into bursts (e.g. reshape)
Notes:
* the expected dimensions are [time, depth, lat, lon] for continuous data in EPIC
* we are reshaping to [time, sample, depth, lat, lon]
* for ADCP files, beams are expressed as variables vel1, vel2, ... veln
* if there is a shape problem then the data file might not be properly understood.
It might be that this code won't work, this problem will become evident if an error is produced and operation
returns to the keyboard in debug mode. If this happens, check the shapes of the variables.
WARNING: time may not be monotonically increasing within bursts (e.g. along the sample dimension)
this means that if the number of samples per burst is inconsistent, or if
there are gaps in time, the end of a burst may be fill_value, including time values
Marinna Martini for the USGS in Woods Hole, 9/20/2018
originally coded for MATLAB as reshapeEPIC
https://cmgsoft.repositoryhosting.com/trac/cmgsoft_m-cmg/browser/trunk/MMstuff/reshapeEPIC.m
Created on Thu Sep 20 14:52:42 2018
@author: mmartini
https://github.com/mmartini-usgs
"""
import sys
import os
import datetime as dt
import netCDF4 as nc
from netCDF4 import num2date
import numpy as np
import math
# noinspection PyUnboundLocalVariable,SpellCheckingInspection
def reshapeEPIC(cont_file, burst_file, burst_length, dim='time', edges=None, drop=None,
variable_attributes_to_omit=None, verbose=False):
"""
apportion a continuous time series file into bursts (e.g. reshape)
:usage: issue_flags = reshapeEPIC(cont_file, burst_file, burst_length,
dim=None, edges=None, drop=None)
:param str cont_file: name of netCDF file with continuous data
:param str burst_file: name of file to store the reshaped data, attributes will be copied
:param int burst_length: maximum number of samples in each burst
:param str dim: name of dimension along which we will split the data, usually 'time' or 'Rec'
:param list[tuple] edges: [(start0, end0), (start1, end1), ...] of edges defining the edges of each burst
:param str drop: set of variable names to omit from the output file
:param dict variable_attributes_to_omit: variable attributes to omit from output file
:param bool verbose: get lots of feedback to STDOUT
:return: dictionary of problem types and status
"""
print('%s running on python %s' % (sys.argv[0], sys.version))
print('Start file conversion at ', dt.datetime.now())
# check for the output file's existence before we try to delete it.
try:
os.remove(burst_file)
print('{} removed'.format(burst_file))
except FileNotFoundError:
pass
continuous_cdf = nc.Dataset(cont_file, format="NETCDF4")
if dim in continuous_cdf.dimensions:
print('the dimension we are operating on is {}'.format(dim))
else:
print('{} not found in input file, aborting'.format(dim))
continuous_cdf.close()
# create the new file
burst_cdf = nc.Dataset(burst_file, mode="w", clobber=True, format='NETCDF4')
# incoming data may be uneven, we need proper fill to be added
burst_cdf.set_fill_on()
# copy the global attributes
# first get a dict of them so that we can iterate
gatts = {}
for attr in continuous_cdf.ncattrs():
# print('{} = {}'.format(attr,getattr(continuous_cdf, attr)))
gatts[attr] = getattr(continuous_cdf, attr)
# add a few more important ones we will fill in later
gatts['start_time'] = ""
gatts['stop_time'] = ""
gatts['DELTA_T'] = ""
gatts['history'] = getattr(continuous_cdf, 'history') + '; converted to bursts by reshapeEPIC.py'
burst_cdf.setncatts(gatts)
print('Finished copying global attributes\n')
for item in continuous_cdf.dimensions.items():
print('Defining dimension {} which is {} long in continuous file'.format(item[0], len(item[1])))
if item[0] == dim:
# this is the dimension along which we will reshape
if len(edges) > 1:
nbursts = len(edges)
else:
nbursts = math.floor(len(item[1]) / burst_length)
burst_cdf.createDimension(dim, nbursts)
print('Reshaped dimension {} created for {} bursts'.format(item[0], nbursts))
else:
burst_cdf.createDimension(item[0], len(item[1]))
burst_cdf.createDimension('sample', burst_length)
# ---------------- set up the variables
# order of dimensions matters.
# per https://cmgsoft.repositoryhosting.com/trac/cmgsoft_m-cmg/wiki/EPIC_compliant
# for a burst file dimension order needs to be time, sample, depth, [lat], [lon]
for cvar in continuous_cdf.variables.items():
cvarobj = cvar[1]
print('{} is data type {}'.format(cvarobj.name, cvarobj.dtype))
try:
fill_value = cvarobj.getncattr('_FillValue')
if verbose:
print('\tthe fill value is {}'.format(fill_value))
except AttributeError:
print('\tfailed to read the fill value')
fill_value = False # do not use None here!!!
if verbose:
print('\tfillValue in burst file will be set to {} (if None, then False will be used)'.format(fill_value))
if cvarobj.name not in drop: # are we copying this variable?
dtype = cvarobj.dtype
if dim in cvarobj.dimensions: # are we reshaping this variable?
vdims_cont = cvarobj.dimensions
vdims_burst = []
for t in enumerate(vdims_cont):
vdims_burst.append(t[1])
if t[1] == dim:
vdims_burst.append('sample')
print('\tappending sample in {}'.format(cvarobj.name))
varobj = burst_cdf.createVariable(cvarobj.name, dtype, tuple(vdims_burst), fill_value=fill_value)
else:
# for a normal copy, no reshape
varobj = burst_cdf.createVariable(cvarobj.name, dtype, cvarobj.dimensions, fill_value=fill_value)
# copy the variable attributes
# first get a dict of them so that we can iterate
vatts = {}
for attr in cvarobj.ncattrs():
# print('{} = {}'.format(attr,getattr(continuous_cdf,attr)))
if attr not in variable_attributes_to_omit:
vatts[attr] = getattr(cvarobj, attr)
try:
varobj.setncatts(vatts)
except AttributeError:
print('AttributeError for {}'.format(cvarobj.name))
# not a coordinate but a fill value of None might cause problems
burst_cdf.createVariable('burst', 'uint16', ('time',), fill_value=False)
# these are coordinates and thus cannot have fill as their values
varobj = burst_cdf.createVariable('sample', 'uint16', ('sample',), fill_value=False)
varobj.units = "count"
try:
burst_cdf.createVariable('depth', 'float32', ('depth',), fill_value=False)
except:
pass # likely depth was already set up if this happens
# TODO - if edges is length 1, then we need to create the burst edges here
# --------- populate the file
# note that we don't have to change from a define to a read mode here
# coordinate variables are small(er) and can be done at once, be sure to use generative methods
print(f'\nNow populating data for {nbursts} bursts')
burst_cdf['burst'][:] = list(range(nbursts))
burst_cdf['sample'][:] = list(range(burst_length))
nbins = len(burst_cdf['depth'])
try:
binsize = continuous_cdf['depth'].bin_size_m
except AttributeError:
try:
binsize = continuous_cdf['depth'].bin_size
except AttributeError:
print('Warning: no depth size information found, assuming 1 m')
binsize = 1
try:
bin1distance = continuous_cdf['depth'].center_first_bin_m
except AttributeError:
try:
bin1distance = continuous_cdf['depth'].center_first_bin
except AttributeError:
print('Warning: no depth center of first bin information found, assuming 0.5 bins ')
bin1distance = binsize / 2
ranges_m = list(map(lambda ibin: bin1distance / 100 + ibin * binsize / 100, range(nbins)))
burst_cdf['depth'][:] = ranges_m
issue_flags = {}
diagnosticvars = {} # vars to generate diagnostic output
for cvar in continuous_cdf.variables.items():
varname = cvar[1].name
issue_flags[varname] = []
if varname not in drop:
# the variable objects in Continuous and Burst files
cvarobj = continuous_cdf[varname]
bvarobj = burst_cdf[varname]
vdims_cont = cvarobj.dimensions
vshapes_cont = cvarobj.shape
vndims_cont = len(cvarobj.dimensions)
vdims_burst = burst_cdf[varname].dimensions
vshapes_burst = burst_cdf[varname].shape
vndims_burst = len(burst_cdf[varname].dimensions)
if verbose:
print('{}\tin Continuous file is data type {} shape {}'.format(
varname, cvarobj.dtype, cvarobj.shape))
print('\tin Burst file it is data type {} shape {}'.format(
bvarobj.dtype, bvarobj.shape))
try:
fillval_burst = burst_cdf[varname].getncattr('_FillValue')
except:
if ('EPIC' in varname) and verbose:
# EPIC was ending up with odd fill values in the raw file
# this will avoid the typerror when EPIC_time is written
# not sure it is the best solution, for now it works
fillval_burst = 0
print('\tfillval_burst {}'.format(fillval_burst))
# this will prevent EPIC_time from being written
# fillval_burst = None
else:
fillval_burst = None
if 'sample' not in vdims_burst:
bvarobj[:] = continuous_cdf[varname][:]
else:
for iburst in range(nbursts):
continuous_cdf_corner = np.zeros(vndims_cont)
continuous_cdf_edges = np.ones(vndims_cont)
# look up data in the continuous file according to the user's indeces
continuous_cdf_corner[vdims_cont.index('time')] = edges[iburst][0]
ndatasamples = edges[iburst][1] - edges[iburst][0]
continuous_cdf_edges[vdims_cont.index('time')] = ndatasamples
if 'depth' in vdims_cont:
continuous_cdf_edges[vdims_cont.index('depth')] = vshapes_cont[vdims_cont.index('depth')]
if (iburst == 0) and verbose:
print('\tcontinuous_cdf_corner = {}, continuous_cdf_edges = {}'.format(
continuous_cdf_corner, continuous_cdf_edges))
# get the data, and this will be contingent on the number of dims
if vndims_cont == 1:
data = continuous_cdf[varname][int(continuous_cdf_corner[0]):int(continuous_cdf_corner[0]) +
int(continuous_cdf_edges[0])]
elif vndims_cont == 2:
if varname in diagnosticvars:
data = continuous_cdf[varname]
data = continuous_cdf[varname][int(continuous_cdf_corner[0]):int(continuous_cdf_corner[0]) +
int(continuous_cdf_edges[0]),
int(continuous_cdf_corner[1]):int(continuous_cdf_corner[1]) +
int(continuous_cdf_edges[1])]
elif vndims_cont == 3:
data = continuous_cdf[varname][int(continuous_cdf_corner[0]):int(continuous_cdf_corner[0]) +
int(continuous_cdf_edges[0]),
int(continuous_cdf_corner[1]):int(continuous_cdf_corner[1]) +
int(continuous_cdf_edges[1]),
int(continuous_cdf_corner[2]):int(continuous_cdf_corner[2]) +
int(continuous_cdf_edges[2])]
elif vndims_cont == 4:
data = continuous_cdf[varname][int(continuous_cdf_corner[0]):int(continuous_cdf_corner[0]) +
int(continuous_cdf_edges[0]),
int(continuous_cdf_corner[1]):int(continuous_cdf_corner[1]) +
int(continuous_cdf_edges[1]),
int(continuous_cdf_corner[2]):int(continuous_cdf_corner[2]) +
int(continuous_cdf_edges[2]),
int(continuous_cdf_corner[3]):int(continuous_cdf_corner[3]) +
int(continuous_cdf_edges[3])]
else:
if iburst == 0:
print('did not read data')
burstcorner = np.zeros(vndims_burst)
burstedges = np.ones(vndims_burst)
burstcorner[vdims_burst.index('time')] = iburst
burstedges[vdims_burst.index('time')] = burst_length
# since we don't have regular and recurring indeces, we need to handle
# situations where the data read is not the maximum number of samples
# samples MUST be the second dimension!
if ndatasamples < burst_length:
issue_flags[varname].append(ndatasamples)
if len(data.shape) == 1:
# start with a filled array
burstdata = np.full((1, vshapes_burst[1]), fillval_burst)
burstdata[:, 0:ndatasamples] = data[:]
elif len(data.shape) == 2:
# start with a filled array
burstdata = np.full((1, vshapes_burst[1], vshapes_burst[2]), fillval_burst)
burstdata[:, 0:ndatasamples] = data[:, :]
elif len(data.shape) == 3:
# start with a filled array
burstdata = np.full((1, vshapes_burst[1], vshapes_burst[2], vshapes_burst[3]),
fillval_burst)
burstdata[:, 0:ndatasamples, :] = data[:, :, :]
elif len(data.shape) == 4:
# start with a filled array
burstdata = np.full((1, vshapes_burst[1], vshapes_burst[2],
vshapes_burst[3], vshapes_burst[4]), fillval_burst)
burstdata[:, 0:ndatasamples, :, :] = data[:, :, :, :]
elif len(data.shape) == 5:
# start with a filled array
burstdata = np.full((1, vshapes_burst[1], vshapes_burst[2],
vshapes_burst[3], vshapes_burst[4],
vshapes_burst[5]), fillval_burst)
burstdata[:, 0:ndatasamples, :, :, :] = data[:, :, :, :, :]
else:
burstdata = data
if ('EPIC' in varname) and (iburst == 0) and verbose:
print('\tdata {}'.format(data[1:10]))
print('\tburstdata {}'.format(burstdata[1:10]))
if (iburst == 0) and verbose:
print('\tburstdata.shape = {} burst file dims {}'.format(
burstdata.shape, vdims_burst))
print('\tvndims_burst = {}'.format(vndims_burst))
print('\tdata.shape = {}'.format(data.shape))
# TODO -- can we simplify this code my making a function object?
if len(burstdata.shape) == 1:
try:
burst_cdf[varname][iburst] = burstdata[:]
except TypeError:
# TypeError: int() argument must be a string,
# a bytes-like object or a number, not 'NoneType'
# EPIC_time was given a fill value in the raw file.
# this was solved by making sure coordinate variables had fill value set to False
if iburst == 0:
print('\t{} in Burst file is data type {}, burstdata is type {}, '.format(
varname, bvarobj.dtype, type(burstdata)))
print(' and got a TypeError when writing')
except IndexError: # too many indices for array
if iburst == 0:
print('too many indices for array')
print('iburst = {}'.format(iburst))
print('burstdata = {}'.format(burstdata))
except ValueError:
if iburst == 0:
print('ValueError ')
elif len(burstdata.shape) == 2:
try:
burst_cdf[varname][iburst, :] = burstdata[:, :]
except TypeError:
# TypeError: int() argument must be a string,
# a bytes-like object or a number, not 'NoneType'
# EPIC_time was given a fill value in the raw file.
# this was solved by making sure coordinate variables had fill value set to False
if iburst == 0:
print('\t{} in Burst file is data type {}, burstdata is type {} '.format(
varname, bvarobj.dtype, type(burstdata)))
print('and got a TypeError when writing')
except IndexError: # too many indices for array
if iburst == 0:
print('too many indices for array')
print('iburst = {}'.format(iburst))
print('burstdata = {}'.format(burstdata))
except ValueError:
if iburst == 0:
print('ValueError ')
elif len(burstdata.shape) == 3:
try:
burst_cdf[varname][iburst, :, :] = burstdata[:, :, :]
except TypeError:
if iburst == 0:
print('\t{} is data type {} and got a TypeError when writing'.format(
varname, cvarobj.dtype))
except IndexError: # too many indices for array
if iburst == 0:
print('too many indices for array')
print('iburst = {}'.format(iburst))
print('burstdata = {}'.format(burstdata))
except ValueError:
if iburst == 0:
print('ValueError cannot reshape array of size 1 into shape (1,150,1,1)')
# here we have shapes [time lat lon]
elif len(burstdata.shape) == 4:
try:
burst_cdf[varname][iburst, :, :, :] = burstdata[:, :, :, :]
except TypeError:
if iburst == 0:
print('\t{} is data type {} and got a TypeError when writing'.format(
varname, cvarobj.dtype))
except IndexError: # too many indices for array
if iburst == 0:
print('too many indices for array')
print('iburst = {}'.format(iburst))
print('burstdata = {}'.format(burstdata))
except ValueError:
if iburst == 0:
print('ValueError cannot reshape array of size 1 into shape (1,150,1,1)')
# here we have shapes [time lat lon]
elif len(burstdata.shape) == 5:
try:
burst_cdf[varname][iburst, :, :, :] = burstdata[:, :, :, :, :]
except TypeError:
if iburst == 0:
print('\t{} is data type {} and got a TypeError when writing'.format(
varname, cvarobj.dtype))
except IndexError: # too many indices for array
if iburst == 0:
print('too many indices for array')
print('iburst = {}'.format(iburst))
print('burstdata = {}'.format(burstdata))
except ValueError:
if iburst == 0:
print('got a value error')
else:
if iburst == 0:
print('\tnot set up to write {} dimensions to burst file'.format(
len(burstdata.shape)))
# end of for iburst in range(nbursts):
# end of if 'sample' not in vdims_burst:
# end of if varname not in drop:
# for cvar in continuous_cdf.variables.items():
burst_cdf.start_time = str(num2date(burst_cdf['time'][0, 0], burst_cdf['time'].units))
burst_cdf.stop_time = str(num2date(burst_cdf['time'][-1, 0], burst_cdf['time'].units))
burst_cdf.close()
continuous_cdf.close()
print('Finished file conversion at ', dt.datetime.now())
return issue_flags
# utility functions for creating and managing indexes
# make a function to identify the indeces
# and this is where tuples are nice
def find_boundaries(data, edges):
"""
using a list of start and end timestamps (edges) that delineate the beginning times and ending times
of burts of measurements, find the indices into the data that correspond to these edges.
The time base may be irregular, it does not matter.
:param list data: time stamps from the data
:param list[tuple] edges: start and end times
:return: list of indices
"""
nparray = np.array(data) # make the data an numpy array for access to numpy's methods
idx = []
for edge in edges:
s = np.where(nparray >= edge[0])
e = np.where(nparray >= edge[1])
idx.append((int(s[0][0]), int(e[0][0])))
if not (len(idx) % 100):
print('.', end='')
print('\n')
return idx
def find_first_masked_value(x):
"""
helper function to find the first occurrence of a masked value in a numpy masked array
returns None if no masked values are found
:param numpy array x:
:return: index
"""
for tpl in enumerate(x):
# print(type(tpl[1]))
if type(tpl[1]) == np.ma.core.MaskedConstant:
# print(tpl[0])
return tpl[0]
return None
def generate_expected_start_times(cdffile, dim, burst_start_offset,
burst_interval, burst_length, sample_rate):
"""
generate a regular and recurring set of start and end timestamps that
delineate the beginning times and ending times of burts of measurements
:param str cdffile: name of a continuous time series data file
:param str dim: the unlimited or time dimension which we will find the indices to reshape
:param int burst_start_offset: when to start to make bursts in the continuous data, seconds
:param int burst_interval: time between start of bursts, seconds
:param int burst_length: number of samples in a burst
:param int sample_rate: Hertz
:return: list of tuples of start and end times for each burst
"""
# TODO - do this from a first burst first sample time stamp
print('the file we are looking up times in is {}'.format(cdffile))
cdf = nc.Dataset(cdffile, format="NETCDF4")
if dim in cdf.dimensions:
print('the dimension we are operating on is {}'.format(dim))
else:
print('{} not found in {} file, aborting'.format(dim, cdffile))
cdf.close()
print('loading the time variable data')
t = cdf['time'][:]
# check for masked/bad values
good_times = np.ma.masked_invalid(t)
print('there are {} times of which {} are good, searching for the first masked time'.format(
len(t), good_times.count()))
start_of_bad = find_first_masked_value(t)
if start_of_bad is None:
print('all times are good!')
else:
print('masked times start after {}'.format(num2date(t[start_of_bad - 1], cdf['time'].units)))
# get the number of bursts based on the elapsed time
print('len t = {} {} {} to {}'.format(len(t), type(t), t[0], t[-1]))
tfirst = num2date(t[0], cdf['time'].units)
if start_of_bad is None:
tlast = num2date(t[-1], cdf['time'].units)
else:
tlast = num2date(t[start_of_bad - 1], cdf['time'].units)
nbursts = int((tlast - tfirst).total_seconds() / burst_interval)
burst_start_times = []
for x in range(nbursts):
burst_start_times.append(burst_start_offset + x * burst_interval)
burst_duration = (1 / sample_rate) * burst_length # seconds
burst_end_times = list(map(lambda x: x + burst_duration, burst_start_times))
print('start times {} such as {}...'.format(len(burst_start_times), burst_start_times[0:5]))
print('end times {} such as {}...'.format(len(burst_end_times), burst_end_times[0:5]))
print('the last time is {} seconds from the start of the experiment'.format(cdf['time'][-1]))
# it turns out later to be convenient to have this as a list of tuples of start and end
slices = list(map(lambda s, e: (s, e), burst_start_times, burst_end_times))
print('edge tuples {} such as {}...'.format(len(slices), slices[0:5]))
cdf.close()
return slices
def save_indexes_to_file(cdffile, edge_tuples, index_file=None):
"""
write indexes to a file with the time stamps for QA/QC
:param str cdffile: the continuous time series netCDF file being operated upon
:param list[tuple] edge_tuples: the bursts to output
:param str index_file: a file to output a string listing of time stamps
"""
cdf = nc.Dataset(cdffile, format="NETCDF4")
tunits = cdf['time'].units
if index_file is not None:
index_file = cdffile.split('.')[0] + 'indices.txt'
with open(index_file, 'w') as outfile:
outfile.write('Burst Indexes for {}\n'.format(cdffile))
outfile.write('Burst, start index, end index, number of samples, start time, end time\n')
for x in enumerate(edge_tuples):
t0 = num2date(cdf['time'][x[1][0]], tunits)
t1 = num2date(cdf['time'][x[1][1]]-1, tunits)
try:
s = '{}, {}, {}, {}, {}, {}\n'.format(x[0], x[1][0], x[1][1],
x[1][1] - x[1][0], t0, t1)
except:
s = '{}, {}, {}, , , \n'.format(x[0], x[1][0], x[1][1])
outfile.write(s)
cdf.close()
print('Indexes written to {}'.format(index_file))
if __name__ == "__main__":
# then we have been run from the command line
if len(sys.argv) < 3:
print("%s \n usage:" % sys.argv[0])
print("reshapeEPIC(ContFile, burst_file, burst_length, [dim2changename], [edges], [vars2omit])")
sys.exit(1)
if len(sys.argv) != 3:
# TODO the keyword pairs do not get into reshape EPIC correctly,
reshapeEPIC(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:])
else:
reshapeEPIC(sys.argv[1], sys.argv[2], sys.argv[3])
else:
# we have been imported
# the argument passing here works fine
pass
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/EPICstuff/reshapeEPIC.py | reshapeEPIC.py |
"""
Helper functions, mostly EPIC specific
"""
# TODO bring up to PEP8
# TODO split these in to EPIC and non-EPIC functionality
from netCDF4 import Dataset
from netCDF4 import num2date
import datetime as dt
from pandas import Timestamp
import numpy as np
import math
import os
import sys
def s2hms(secs):
"""
convert seconds to hours, minutes and seconds
:param int secs:
:return: hours, minutes and seconds
"""
hour = math.floor(secs/3600)
mn = math.floor((secs % 3600)/60)
sec = secs % 60
return hour, mn, sec
def jdn(dto):
"""
convert datetime object to Julian Day Number
:param object dto: datetime
:return: int Julian Day Number
"""
year = dto.year
month = dto.month
day = dto.day
not_march = month < 3
if not_march:
year -= 1
month += 12
fr_y = math.floor(year / 100)
reform = 2 - fr_y + math.floor(fr_y / 4)
jjs = day + (
math.floor(365.25 * (year + 4716)) + math.floor(30.6001 * (month + 1)) + reform - 1524)
return jjs
def ajd(dto):
"""
Given datetime object returns Astronomical Julian Day.
Day is from midnight 00:00:00+00:00 with day fractional
value added.
:param object dto: datetime
:return: int Astronomical Julian Day
"""
jdd = jdn(dto)
day_fraction = dto.hour / 24.0 + dto.minute / 1440.0 + dto.second / 86400.0
return jdd + day_fraction - 0.5
# noinspection SpellCheckingInspection
def cftime2EPICtime(timecount, timeunits):
# take a CF time variable and convert to EPIC time and time2
# timecount is the integer count of minutes (for instance) since the time stamp
# given in timeunits
buf = timeunits.split()
t0 = dt.datetime.strptime(buf[2]+' '+buf[3], '%Y-%m-%d %H:%M:%S.%f')
t0j = ajd(t0)
# julian day for EPIC is the beginning of the day e.g. midnight
t0j = t0j+0.5 # add 0.5 because ajd() subtracts 0.5
if buf[0] == 'hours':
tj = timecount/24
elif buf[0] == 'minutes':
tj = timecount/(24*60)
elif buf[0] == 'seconds':
tj = timecount/(24*60*60)
elif buf[0] == 'milliseconds':
tj = timecount/(24*60*60*1000)
elif buf[0] == 'microseconds':
tj = timecount/(24*60*60*1000*1000)
else:
# TODO add a warning here, we're here because no units were recognized
tj = timecount
tj = t0j+tj
time = math.floor(tj)
time2 = math.floor((tj-time)*(24*3600*1000))
return time, time2
def EPICtime2datetime(time, time2):
"""
convert EPIC time and time2 to python datetime object
:param numpy array time:
:param numpy array time2:
:return: gregorian time as a list of int, datetime object
"""
# TODO - is there a rollover problem with this algorithm?
dtos = []
gtime = []
for idx in range(len(time)):
# time and time2 are the julian day and milliseconds
# in the day as per PMEL EPIC convention for netCDF
jd = time[idx]+(time2[idx]/(24*3600*1000))
secs = (jd % 1)*(24*3600)
j = math.floor(jd) - 1721119
in1 = 4*j-1
y = math.floor(in1/146097)
j = in1 - 146097*y
in1 = math.floor(j/4)
in1 = 4*in1 + 3
j = math.floor(in1/1461)
d = math.floor(((in1 - 1461*j) + 4)/4)
in1 = 5*d - 3
m = math.floor(in1/153)
d = math.floor(((in1 - 153*m) + 5)/5)
y = y*100 + j
mo = m-9
yr = y+1
if m < 10:
mo = m+3
yr = y
hour, mn, sec = s2hms(secs)
ss = math.floor(sec)
hundredths = math.floor((sec-ss)*100)
gtime.append([yr, mo, d, hour, mn, ss, hundredths])
# centiseconds * 10000 = microseconds
dto = dt.datetime(yr, mo, d, hour, mn, ss, int(hundredths*10000))
dtos.append(dto)
return gtime, dtos
def resample_cleanup(datafiles):
for file_name in datafiles:
print(f'Working on file {file_name}')
# re-open the dataset for numerical operations such as min and max
# we have to make attribute changes, etc. so need to open with the netCDF package
pyd = Dataset(file_name, mode="r+", format='NETCDF4')
dim_keys = pyd.dimensions.keys()
# add minimum and maximum attributes and replace NaNs with _FillValue
for var_key in pyd.variables.keys():
if (var_key not in dim_keys) & (var_key not in {'time', 'EPIC_time', 'EPIC_time2'}):
data = pyd[var_key][:]
nan_idx = np.isnan(pyd[var_key][:])
mn = np.min(data[~nan_idx])
mx = np.max(data[~nan_idx])
print('%s min = %f, max = %f' % (var_key, mn, mx))
pyd[var_key].minimum = mn
pyd[var_key].maximum = mx
fill = pyd.variables[var_key].getncattr('_FillValue')
data[nan_idx] = fill
pyd[var_key][:] = data
# Add back EPIC time
timecount = pyd['time']
timeunits = pyd['time'].units
time, time2 = cftime2EPICtime(timecount, timeunits)
print('first time = %7d and %8d' % (time[0], time2[0]))
# noinspection PyBroadException
try:
varobj = pyd.createVariable('EPIC_time', 'u4', ('time'))
except:
print('EPIC_time exists, updating.')
varobj = pyd['EPIC_time']
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj[:] = time[:]
try:
varobj = pyd.createVariable('EPIC_time2','u4',('time'))
except:
print('EPIC_time2 exists, updating.')
varobj = pyd['EPIC_time2']
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj[:] = time2[:]
# re-compute DELTA_T in seconds
dtime = np.diff(pyd['time'][:])
dtm = dtime.mean().astype('float').round()
u = timeunits.split()
if u[0] == 'minutes':
dtm = dtm*60
elif u[0] == 'hours':
dtm = dtm*60*60
elif u[0] == 'milliseconds':
dtm = dtm/1000
elif u[0] == 'microseconds':
dtm = dtm/1000000
DELTA_T = '%d' % int(dtm)
pyd.DELTA_T = DELTA_T
print(DELTA_T)
# check start and stop time
pyd.start_time = '%s' % num2date(pyd['time'][0],pyd['time'].units)
pyd.stop_time = '%s' % num2date(pyd['time'][-1],pyd['time'].units)
print('cf start time %s' % pyd.start_time)
print('cf stop time %s' % pyd.stop_time)
# add the variable descriptions
var_desc = ''
for var_key in pyd.variables.keys():
if var_key not in dim_keys:
var_desc = var_desc+':'+var_key
var_desc = var_desc[1:]
print(var_desc)
pyd.VAR_DESC = var_desc
pyd.close()
def catEPIC(datafiles, outfile):
nfiles = len(datafiles)
# use the first file to establish some information
nc0 = Dataset(datafiles[0], mode = 'r', format = 'NETCDF4')
varnames = nc0.variables.keys()
if 'time2' not in varnames:
CFtime = True
if 'calendar' not in nc0['time'].__dict__:
print('No calendar specified, using gregorian')
nccalendar = 'gregorian'
else:
nccalendar = nc0['time'].calendar
else:
CFtime = False
nc0.close()
# now glean time information from all the files
alldt = np.array([])
timelens = []
for ifile in range(nfiles):
print(datafiles[ifile])
nc = Dataset(datafiles[ifile], mode = 'r', format = 'NETCDF4')
timelens.append(nc.dimensions['time'].size)
if CFtime:
tobj = num2date(nc['time'][:],nc['time'].units,calendar=nccalendar)
alldt = np.concatenate((alldt,tobj))
else:
tobj = EPICtime2datetime(nc['time'][:],nc['time2'][:])
alldt = np.concatenate((alldt,tobj))
print('file %d is %s to %s' % (ifile, tobj[0], tobj[-1]))
print(' first time object nc[''time''][0] is %f' % nc['time'][0])
print(' time units are %s' % nc['time'].units)
#if 'corvert' in nc.variables.keys():
# print(' there is a corvert')
nc.close()
# it was the case in the MATLAB version that the schema technique
# would reorder the variables - not sure python does this
# reordering the variables might not be a problem here since they are
# iterated by name
# dimensions
ncid_out = Dataset(outfile, "w", clobber=True, format="NETCDF4")
ncid_in = Dataset(datafiles[0], mode = 'r', format = 'NETCDF4')
for dimname in ncid_in.dimensions.keys():
if 'time' in dimname:
ncid_out.createDimension('time',len(alldt))
else:
ncid_out.createDimension(dimname,ncid_in.dimensions[dimname].size)
# global attributes
for attname in ncid_in.__dict__:
ncid_out.setncattr(attname,ncid_in.getncattr(attname))
# variables with their attributes
for varname in ncid_in.variables.keys():
print('Creating %s as %s' % (varname, ncid_in[varname].datatype))
ncid_out.createVariable(varname, ncid_in[varname].datatype,
dimensions = ncid_in[varname].dimensions)
for attname in ncid_in[varname].__dict__:
ncid_out[varname].setncattr(attname, ncid_in[varname].getncattr(attname))
ncid_out.close()
ncid_in.close()
# load the data
ncid_out = Dataset(outfile, mode='r+', format="NETCDF4")
print(timelens)
for ifile in range(nfiles):
if ifile == 0:
outidx_start = 0
outidx_end = outidx_start+timelens[ifile]
else:
outidx_start = outidx_end
outidx_end = outidx_start+timelens[ifile]
print('getting data from file %s and writing %d to %d (pythonic indeces)' % (datafiles[ifile],outidx_start,outidx_end))
ncid_in = Dataset(datafiles[ifile], mode="r", format="NETCDF4")
# TODO - check for the variable int he outfile
for varname in ncid_in.variables.keys():
dimnames = ncid_in[varname].dimensions
if 'time' in dimnames:
s = outidx_start
e = outidx_end
else:
s = 0
e = len(ncid_in[varname])
ndims = len(ncid_in[varname].dimensions)
#print('%s, %d' % (varname, ndims))
#print(len(ncid_in[varname]))
if ndims == 1:
ncid_out[varname][s:e] = ncid_in[varname][:]
elif ndims == 2:
ncid_out[varname][s:e,:] = ncid_in[varname][:,:]
elif ndims == 3:
ncid_out[varname][s:e,:,:] = ncid_in[varname][:,:,:]
elif ndims == 4:
ncid_out[varname][s:e,:,:,:] = ncid_in[varname][:,:,:,:]
ncid_in.close()
# finally, put the correct time span in th output file
units = "seconds since %d-%d-%d %d:%d:%f" % (alldt[0].year,
alldt[0].month,alldt[0].day,alldt[0].hour,alldt[0].minute,
alldt[0].second+alldt[0].microsecond/1000000)
# the 0:00 here was causing problems for xarray
units = "seconds since %d-%d-%d %d:%d:%f +0:00" % (alldt[0].year,
alldt[0].month,alldt[0].day,alldt[0].hour,alldt[0].minute,
alldt[0].second+alldt[0].microsecond/1000000)
elapsed = alldt-alldt[0] # output is a numpy array of timedeltas
# have to iterate to get at the timedelta objects in the numpy container
# seems crazy, someone please show me a better trick!
elapsed_sec = []
for e in elapsed: elapsed_sec.append(e.total_seconds())
t = np.zeros((len(alldt),1))
t2 = np.zeros((len(alldt),1))
for i in range(len(alldt)):
jd = ajd(alldt[i])
t[i] = int(math.floor(jd))
t2[i] = int((jd - math.floor(jd))*(24*3600*1000))
if CFtime:
ncid_out['time'][:] = elapsed_sec[:]
ncid_out['time'].units = units
ncid_out['EPIC_time'][:] = t[:]
ncid_out['EPIC_time2'][:] = t2[:]
else:
ncid_out['CFtime'][:] = elapsed_sec[:]
ncid_out['CFtime'].units = units
ncid_out['time'][:] = int(math.floor(jd))
ncid_out['time2'][:] = int((jd - math.floor(jd))*(24*3600*1000))
# recompute start_time and end_time
ncid_out.start_time = '%s' % num2date(ncid_out['time'][0],ncid_out['time'].units)
print(ncid_out.start_time)
ncid_out.stop_time = '%s' % num2date(ncid_out['time'][-1],ncid_out['time'].units)
print(ncid_out.stop_time)
# TODO update history
ncid_out.close()
def check_fill_value_encoding(ds):
"""
restore encoding to what it needs to be for EPIC and CF compliance
variables' encoding will be examined for the correct _FillValue
:param ds: xarray Dataset
:return: xarray Dataset with corrected encoding, dict with encoding that can be used with xarray.to_netcdf
"""
encoding_dict = {}
for var in ds.variables.items():
encoding_dict[var[0]] = ds[var[0]].encoding
# is it a coordinate?
if var[0] in ds.coords:
# coordinates do not have a _FillValue
if '_FillValue' in var[1].encoding:
encoding_dict[var[0]]['_FillValue'] = False
else:
# _FillValue cannot be NaN and must match the data type so just make sure it matches the data type.
# xarray changes ints to floats, not sure why yet
if '_FillValue' in var[1].encoding:
if np.isnan(var[1].encoding['_FillValue']):
print('NaN found in _FillValue, correcting')
if var[1].encoding['dtype'] in {'float32', 'float64'}:
var[1].encoding['_FillValue'] = 1E35
encoding_dict[var[0]]['_FillValue'] = 1E35
elif var[1].encoding['dtype'] in {'int32', 'int64'}:
var[1].encoding['_FillValue'] = 32768
encoding_dict[var[0]]['_FillValue'] = 32768
return ds, encoding_dict
def fix_missing_time(ds, delta_t):
"""
fix missing time values
change any NaT values in 'time' to a time value based on the last known good time, iterating to cover
larger gaps by constructing time as we go along.
xarray.DataArray.dropna is one way to do this, automated and convenient, and will leave an uneven time series,
so if you don't mind time gaps, that is a better tool.
:param ds: xarray Dataset, time units are in seconds
:param deltat: inter-burst time, sec, for the experiment's sampling scheme
:return:
"""
# TODO This could be improved by using an index mapping method - when I know python better.
dsnew = ds
count = 0
tbad = dsnew['time'][:].data # returns a numpy array of numpy.datetime64
tgood = tbad
# TODO - what if the first time is bad? need to look forward, then work backward
for t in enumerate(tbad):
if np.isnat(t[1]):
count += 1
prev_time = tbad[t[0] - 1]
new_time = prev_time + np.timedelta64(delta_t, 's')
tgood[t[0]] = new_time
print('bad time at {} will be given {}'.format(t[0], tgood[t[0]]))
return dsnew, count
def apply_timezone(cf_units):
"""
In xarray, the presence of time zone information in the units was causing decode_cf to ignore the hour,
minute and second information. This function applys the time zone information and removes it from the units
:param str cf_units:
:return: str
"""
if len(cf_units.split()) > 4:
# there is a time zone included
print(f'time zone information found in {cf_units}')
split_units = cf_units.split()
hrs, mins = split_units[4].split(':')
if '-' in hrs:
hrs = hrs[1:]
sign = -1
else:
sign = 1
dtz = dt.timedelta(0, 0, 0, 0, int(mins), int(hrs)) # this will return seconds
ts = Timestamp(split_units[2] + ' ' + split_units[3], tzinfo=None)
if sign < 0:
new_ts = ts - dtz
else:
new_ts = ts + dtz
if 'seconds' in cf_units:
new_units = '{} since {}'.format(split_units[0], new_ts)
else:
new_units = cf_units
print('unrecognized time units, units not changed')
print(f'new_units = {new_units}')
return new_units
def make_encoding_dict(ds):
"""
prepare encoding dictionary for writing a netCDF file later using xarray.to_netcdf
:param ds: xarray Dataset
:return: dict with encoding prepared for xarray.to_netcdf to EPIC/CF conventions
"""
encoding_dict = {}
for item in ds.variables.items():
# print(item)
var_name = item[0]
var_encoding = ds[var_name].encoding
encoding_dict[var_name] = var_encoding
# print('encoding for {} is {}'.format(var_name, encoding_dict[var_name]))
# is it a coordinate?
if var_name in ds.coords:
# coordinates do not have a _FillValue
if '_FillValue' in encoding_dict[var_name]:
print(f'encoding {var_name} fill value to False')
else:
print(f'encoding {var_name} is missing fill value, now added and set to False')
encoding_dict[var_name]['_FillValue'] = False
else:
# _FillValue cannot be NaN and must match the data type
# so just make sure it matches the data type.
if '_FillValue' in encoding_dict[var_name]:
print('{} fill value is {}'.format(var_name, encoding_dict[var_name]['_FillValue']))
if np.isnan(encoding_dict[var_name]['_FillValue']):
if 'float' in str(encoding_dict[var_name]['dtype']):
encoding_dict[var_name]['_FillValue'] = 1E35
elif 'int' in str(encoding_dict[var_name]['dtype']):
encoding_dict[var_name]['_FillValue'] = 32768
print('NaN found in _FillValue of {}, corrected to {}'.format(
var_name, encoding_dict[var_name]['_FillValue']))
elif encoding_dict[var_name]['_FillValue'] is None:
if 'float' in str(encoding_dict[var_name]['dtype']):
encoding_dict[var_name]['_FillValue'] = 1E35
elif 'int' in str(encoding_dict[var_name]['dtype']):
encoding_dict[var_name]['_FillValue'] = 32768
print('None found in _FillValue of {}, corrected to {}'.format(
var_name, encoding_dict[var_name]['_FillValue']))
else:
print('encoding found in _FillValue of {} remains {}'.format(var_name,
encoding_dict[var_name]['_FillValue']))
return encoding_dict
# TODO this is coded only for catEPIC, expand for other methods in this file
def __main():
print('%s running on python %s' % (sys.argv[0], sys.version))
if len(sys.argv) < 2:
print("%s usage:" % sys.argv[0])
print("catEPIC file_list outfile\n")
sys.exit(1)
try:
datafiles = sys.argv[1]
except:
print('error - input file list missing')
sys.exit(1)
try:
outfile = sys.argv[2]
except:
print('error - output file name missing')
sys.exit(1)
# some input testing
if ~os.path.isfile(datafiles[0]):
print('error - input file not found')
sys.exit(1)
if ~os.path.isfile(outfile):
print('%s will be overwritten' % outfile)
print('Start concatination at ',dt.datetime.now())
catEPIC(datafiles, outfile)
print('Finished file concatination at ',dt.datetime.now())
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/EPICstuff/EPICmisc.py | EPICmisc.py |
"""
Example script of how to implement reshaping of a continuous ADCP data file into one with bursts,
e.g. time ---> time, sample
Because the continuous file can be very large, this splits the output into more than one file, if desired
"""
import netCDF4 as nc
from netCDF4 import num2date
import math
import sys
import datetime as dt
import adcpy.EPICstuff.reshapeEPIC as reshape
# making the indices
input_path = r'.\\'
output_path = r'.\\'
# continuous file
continuous_file = 'WAV17M2T02whV.nc' # rotated file with a 1D, continuous time series
number_of_output_files = 4
burst_file_name = 'WAV17M2T02whVwaves.nc'
index_file_name = 'WAV17M2T02whVindecesnc.txt'
"""
From the wavesmon config file
'Continuous waves 20171124T225553.pd0Wvs.cfg'
we have
[ADCP Setup]
EPB=2048
TBE=50
TBB=3600
"""
sample_rate = 2
# burst_length = 4096
burst_length = 2048
burst_interval = 3600 # 60 min interval
burst_start_offset = 0
# note we are dropping EPIC time as it is causing problems
variables_to_omit = {'EPIC_time', 'EPIC_time2'}
attributes_to_omit = {'valid_range'} # this is in older converted files and needs to be removed
dry_run = True
# ------------------- the rest of this should be automatic, no user settings below
operation_start = dt.datetime.now()
print('Start script run at ', operation_start)
dim = 'time'
# ----------- execute
all_slices = reshape.generate_expected_start_times(input_path + continuous_file, dim,
burst_start_offset, burst_interval, burst_length, sample_rate)
# here we limit the slices for testing
# print('** reducing the number of slices')
slices = all_slices # [200:300]
continuous_netcdf_object = nc.Dataset(input_path + continuous_file, format="NETCDF4")
print('the last time is {} seconds from the start of the experiment'.format(continuous_netcdf_object['time'][-1]))
print('looking up the boundaries... this takes about 10 minutes on a 12 GB file')
edges = reshape.find_boundaries(continuous_netcdf_object['time'][:], slices)
for x in edges[0:5]:
print('at indices {} to {} we found times {} to {}'.format(x[0], x[1], continuous_netcdf_object['time'][x[0]],
continuous_netcdf_object['time'][x[1]]))
burst_lengths = list(map(lambda t: t[1] - t[0], edges))
for x in burst_lengths[0:5]:
print('bursts are {} long'.format(x))
continuous_netcdf_object.close()
print('elapsed time is {} min'.format((dt.datetime.now() - operation_start).total_seconds() / 60))
reshape.save_indexes_to_file(input_path + continuous_file, edges, output_path + index_file_name)
number_of_bursts_per_file = int(math.floor(len(edges) / number_of_output_files))
# now iterate through the number of output files
# for ifile in range(1):
for file_index in range(number_of_output_files):
s = burst_file_name.split('.')
burstFile = s[0] + (f'%02d.' % file_index) + s[1]
print('making burst file {}'.format(burstFile))
burst_start_index = file_index * number_of_bursts_per_file
burst_end_index = burst_start_index + number_of_bursts_per_file
if burst_end_index > len(edges):
enburst = len(edges)
edges_this_file = edges[burst_start_index:burst_end_index]
samples_in_each_burst = list(map(lambda t: t[1] - t[0], edges_this_file))
# if there are no samples in a burst, we will skip the burst
# skip them by removing them from this index list
# this cleans up the tail end of the last file
# TODO - use None to signal
idx_empty_bursts = list(map(lambda y: False if x == 0 else True, samples_in_each_burst))
print('Zeros samples in {} bursts, these will be omitted'.format(idx_empty_bursts.count(0)))
continuous_netcdf_object = nc.Dataset(input_path + continuous_file, format="NETCDF4")
time_units = continuous_netcdf_object['time'].units
number_to_display = 5
if number_of_bursts_per_file < number_to_display or number_of_bursts_per_file < number_to_display * 2:
number_to_display = number_of_bursts_per_file
x = list(range(number_to_display))
else:
x = list(range(number_to_display)) + list(
range(len(edges_this_file) - number_to_display - 1, len(edges_this_file) - 1))
for i in x:
print('burst {} will be {} samples from {} to {}'.format(
i, samples_in_each_burst[i],
num2date(continuous_netcdf_object['time'][edges_this_file[i][0]], time_units),
num2date(continuous_netcdf_object['time'][edges_this_file[i][1]], time_units)))
continuous_netcdf_object.close()
if not dry_run:
reshape.reshapeEPIC(input_path + continuous_file, output_path + burstFile, burst_length,
dim='time', edges=edges_this_file, drop=variables_to_omit, atts2drop=attributes_to_omit)
print('End script run at ', dt.datetime.now())
print('elapsed time is {} min'.format((dt.datetime.now() - operation_start).total_seconds() / 60))
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/EPICstuff/doreshape.py | doreshape.py |
"""
This code takes a raw netcdf file containing data from any 4 beam Janus
acoustic doppler profiler, with or without a center beam, and transforms the
data into Earth coordinates. Data are output to netCDF using controlled
vocabulary for the variable names, following the EPIC convention wherever
possible.
ADCPcdf2ncEPIC.doEPIC_ADCPfile(cdfFile, ncFile, attFile, settings)
cdfFile = path to a USGS raw netCDF ADCP data file
ncFile = a netcdf file structured according to PMEL EPIC conventions
attFile = a file containing global attributes (metadata) for the data. See below
settings = a dictionary of preferences for the processing::
'good_ensembles': [0, np.inf] # starting and ending indices of the input file. For all data use [0,np.inf]
'orientation': 'UP' # uplooking ADCP, for downlooking, use DOWN
'transducer_offset_from_bottom': 1.0 # a float in meters
'transformation': 'EARTH' # | BEAM | INST
'adjust_to_UTC': 5 # for EST to UTC, if no adjustment, set to 0 or omit
Depth dependent attributes are compute from the mean Pressure found in the raw
data file. So it is best to have the time series trimmed to the in water
time or to provide the good ensemble indices for in water time
Note that file names and paths may not include spaces
Example contents of a Global Attribute file::
SciPi; J.Q. Scientist
PROJECT; USGS Coastal Marine Geology Program
EXPERIMENT; MVCO 2015 Stress Comparison
DESCRIPTION; Quadpod 13.9m
DATA_SUBTYPE; MOORED
COORD_SYSTEM; GEOGRAPHIC + SAMPLE
Conventions; PMEL/EPIC
MOORING; 1057
WATER_DEPTH; 13.9
WATER_DEPTH_NOTE; (meters), nominal
WATER_DEPTH_source; ship fathometer
latitude; 41.3336633
longitude; -70.565877
magnetic_variation; -14.7
Deployment_date; 17-Nov-2015
Recovery_date; 14-Dec-2015
DATA_CMNT;
platform_type; USGS aluminum T14 quadpod
DRIFTER; 0
POS_CONST; 0
DEPTH_CONST; 0
Conventions; PMEL/EPIC
institution; United States Geological Survey, Woods Hole Coastal and Marine Science Center
institution_url; http://woodshole.er.usgs.gov
Created on Tue May 16 13:33:31 2017
@author: mmartini
"""
import os
import sys
import numpy as np
from netCDF4 import Dataset
import netCDF4 as netcdf
import datetime as dt
from datetime import datetime
# noinspection PyPep8Naming
def doEPIC_ADCPfile(cdfFile, ncFile, attFile, settings):
"""
Convert a raw netcdf file containing data from any 4 beam Janus acoustic doppler profiler,
with or without a center beam, and transforms the data into Earth coordinates. Data are output to netCDF
using controlled vocabulary for the variable names, following the EPIC convention wherever possible.
:param str cdfFile: raw netCDF input data file name
:param str ncFile: output file name
:param str attFile: text file containing metadata
:param dict settings: a dict of settings as follows::
'good_ensembles': [] # starting and ending indices of the input file. For all data use [0,np.inf]
'orientation': 'UP' # uplooking ADCP, for downlooking, use DOWN
'transducer_offset_from_bottom': 2.02 # in meters
'transformation': 'EARTH' # | BEAM | INST
'adjust_to_UTC': 5 # for EST to UTC, if no adjustment, set to 0 or omit
"""
# check some of the settings we can't live without
# set flags, then remove from the settings list if we don't want them in metadata
if 'good_ensembles' not in settings.keys():
settings['good_ensembles'] = [0, np.inf] # nothing from user, do them all
print('No starting and ending ensembles specfied, processing entire file')
if 'orientation' not in settings.keys():
settings['orientation'] = "UP"
settings['orientation_note'] = "assumed by program"
print('No orientation specfied, assuming up-looking')
else:
settings['orientation_note'] = "user provided orientation"
if 'transducer_offset_from_bottom' not in settings.keys():
settings['transducer_offset_from_bottom'] = 0
print('No transducer_offset_from_bottom, assuming 0')
if 'transformation' not in settings.keys():
settings['transformation'] = "EARTH"
if 'adjust_to_UTC' not in settings.keys():
settings['adjust_to_UTC'] = 0
# TODO implement this time_type_out selection, right now does what is in the raw netCDF
# if 'time_type_out' not in settings.keys():
# settings['time_type_out'] = "CF"
# if 'time_type_out' in settings.keys():
# time_type_out = settings['time_type_out']
# else:
# time_type_out = "CF"
if 'use_pressure_for_WATER_DEPTH' in settings.keys():
if settings['use_pressure_for_WATER_DEPTH']:
usep4waterdepth = True
settings.pop('use_pressure_for_WATER_DEPTH')
else:
usep4waterdepth = False
else:
usep4waterdepth = True
rawcdf = Dataset(cdfFile, mode='r', format='NETCDF4')
rawvars = []
for key in rawcdf.variables.keys():
rawvars.append(key)
# this function will operate on the files using the netCDF package
nc = setupEPICnc(ncFile, rawcdf, attFile, settings)
nbeams = nc.number_of_slant_beams # what if this isn't 4?
nbins = len(rawcdf.dimensions['depth'])
nens = len(rawcdf.dimensions['time'])
ncvars = []
for key in nc.variables.keys():
ncvars.append(key)
declination = nc.magnetic_variation_at_site
# start and end indices
s = settings['good_ensembles'][0]
if settings['good_ensembles'][1] < 0:
e = nens
else:
e = settings['good_ensembles'][1]
print('Converting from index %d to %d of %s' % (s, e, cdfFile))
# many variables do not need processing and can just be copied to the
# new EPIC convention
varlist = {'sv': 'SV_80', 'Rec': 'Rec'}
for key in varlist:
varobj = nc.variables[varlist[key]]
varobj[:] = rawcdf.variables[key][s:e]
# check the time zone, Nortek data are usually set to UTC, no matter what
# the actual time zone of deployment might have been
if abs(settings['adjust_to_UTC']) > 0:
nc.time_zone_change_applied = settings['adjust_to_UTC']
nc.time_zone_change_applied_note = "adjust time to UTC requested by user"
toffset = settings['adjust_to_UTC']*3600
# determine what kind of time setup we have in the raw file
timevars = ['time', 'time2', 'EPIC_time', 'EPIC_time2', 'cf_time']
timevars_in_file = [item for item in timevars if item in rawvars]
if timevars_in_file == ['time', 'time2']:
time_type = "EPIC"
elif timevars_in_file == ['time', 'time2', 'cf_time']:
time_type = "EPIC_with_CF"
elif timevars_in_file == ['time', 'EPIC_time', 'EPIC_time2']:
time_type = "CF_with_EPIC"
elif timevars_in_file == ['time']:
time_type = "CF"
else:
time_type = None
print("Unrecognized time arrangement, known variables found: {}".format(timevars_in_file))
print("The raw netCDF file has time_type {}".format(time_type))
# raw variable name : EPIC variable name
if settings['time_type_out'] == 'EPIC':
varlist = {'time': 'time', 'time2': 'time2'}
elif settings['time_type_out'] == 'CF_with_EPIC':
varlist = {'time': 'time', 'EPIC_time': 'EPIC_time', 'EPIC_time2': 'EPIC_time2'}
elif settings['time_type_out'] == 'EPIC_with_CF':
varlist = {'time': 'time', 'time2': 'time2', 'cf_time': 'cf_time'}
else: # only CF time, the default
varlist = {'time': 'time'}
# TODO let user select type of time output, right now it uses what is in the netCDF file
for key in varlist:
print(key)
varobj = nc.variables[varlist[key]]
varobj[:] = rawcdf.variables[key][s:e]+toffset
# TRDI instruments have heading, pitch, roll and temperature in hundredths of degrees
if rawcdf.sensor_type == "TRDI":
degree_factor = 100
else:
degree_factor = 1
varlist = {'Ptch': 'Ptch_1216', 'Roll': 'Roll_1217', 'Tx': 'Tx_1211'}
for key in varlist:
varobj = nc.variables[varlist[key]]
varobj[:] = rawcdf.variables[key][s:e]/degree_factor
# TODO will need an instrument dependent methodology to check for any previous adjustments to heading
# prior to this correction. for instance, with TRDI instruments, Velocity or the EB command might have applied
# a correction. If EB is set, then that value was applied to the raw data seen by TRDIpd0tonetcdf.py
nc.magnetic_variation_applied = declination
nc.magnetic_variation_applied_note = "as provided by user"
heading = rawcdf.variables['Hdg'][s:e]/degree_factor + declination
heading[heading >= 360] = heading[heading >= 360] - 360
heading[heading < 0] = heading[heading < 0] + 360
nc['Hdg_1215'][:] = heading
# pressure needs to be in db or m
if 'Pressure' in rawvars:
pconvconst = 1 # when in doubt, do nothing
punits = rawcdf['Pressure'].units
if 'deca-pascals' in punits:
pconvconst = 1000 # decapascals to dbar = /1000
print('Pressure in deca-pascals will be converted to db')
nc['P_1'][:] = rawcdf.variables['Pressure'][s:e]/pconvconst
# check units of current velocity and convert to cm/s
vunits = rawcdf['vel1'].units
vconvconst = 1 # when in doubt, do nothing
if (vunits == 'mm s-1') | (vunits == 'mm/s'):
vconvconst = 0.1 # mm/s to cm/s
elif (vunits == 'm s-1') | (vunits == 'm/s'):
vconvconst = 100 # m/s to cm/s
print('Velocity in {} will be converted using a multiplier of {}'.format(vunits, vconvconst))
if 'vel5' in rawvars:
nc['Wvert'][:] = rawcdf.variables['vel5'][s:e, :] * vconvconst
if 'cor5' in rawvars:
nc['corvert'][:] = rawcdf.variables['cor5'][s:e, :]
if 'att5' in rawvars:
nc['AGCvert'][:] = rawcdf.variables['att5'][s:e, :]
if 'PGd4' in rawvars:
nc['PGd_1203'][:, :, 0, 0] = rawcdf.variables['PGd4'][s:e, :]
if 'PressVar' in rawvars:
nc['SDP_850'][:, 0, 0] = rawcdf.variables['PressVar'][s:e]
bindist = np.arange(len(nc['bindist']))
bindist = bindist*nc.bin_size+nc.center_first_bin
nc['bindist'][:] = bindist
# figure out DELTA_T - we need to use the cf_time, more convenient
if settings['time_type_out'] == 'CF':
# we will set the main "time' variable to CF convention
timekey = 'time'
else:
timekey = 'cf_time'
# this calculation uses for CF time
dtime = np.diff(nc[timekey][:])
delta_t = '%s' % int((dtime.mean().astype('float')).round()) # needs to be a string
nc.DELTA_T = delta_t
# depths and heights
nc.initial_instrument_height = settings['transducer_offset_from_bottom']
nc.initial_instrument_height_note = "height in meters above bottom: accurate for tripod mounted instruments"
# compute depth, make a guess we want to average all depths recorded
# deeper than user supplied water depth
# idx is returned as a tuple, the first of which is the actual index values
# set the water depth here, this will be used throughout
# the user may have put units next to the depth
if type(nc.WATER_DEPTH) is str:
water_depth = nc.WATER_DEPTH.split()
water_depth = float(water_depth[0])
else:
water_depth = nc.WATER_DEPTH
if ('Pressure' in rawvars) and usep4waterdepth:
idx = np.where(nc['P_1'][:] > water_depth/2)
# now for the mean of only on bottom pressure measurements
if len(idx[0]) > 0:
pmean = nc['P_1'][idx[0]].mean()
else:
pmean = 0 # this could be if the ADCP is in air the whole time
print('Site WATER_DEPTH given is %f' % water_depth)
print('Calculated mean water level from P_1 is %f m' % pmean)
print('Updating site WATER_DEPTH to %f m' % pmean)
nc.WATER_DEPTH = pmean+nc.transducer_offset_from_bottom
nc.WATER_DEPTH_source = "water depth = MSL from pressure sensor, (meters), nominal"
nc.WATER_DEPTH_NOTE = nc.WATER_DEPTH_source
nc.nominal_sensor_depth = nc.WATER_DEPTH-settings['transducer_offset_from_bottom']
nc.nominal_sensor_depth_note = "inst_depth = (water_depth - inst_height); nominal depth below surface, meters"
varnames = ['bindist', 'depth']
# WATER_DEPTH_datum is not used in this circumstance.
else:
print('Site WATER_DEPTH given is %f' % water_depth)
print('No pressure data available, so no adjustment to water depth made')
nc.WATER_DEPTH_source = "water depth as given by user, (meters), nominal"
nc.WATER_DEPTH_NOTE = nc.WATER_DEPTH_source
nc.nominal_sensor_depth = water_depth-settings['transducer_offset_from_bottom']
nc.nominal_sensor_depth_note = "inst_depth = (water_depth - inst_height); nominal depth below surface, meters"
varnames = ['bindist', 'depth']
# WATER_DEPTH_datum is not used in this circumstance.
for varname in varnames:
nc[varname].WATER_DEPTH = water_depth
nc[varname].WATER_DEPTH_source = nc.WATER_DEPTH_source
nc[varname].transducer_offset_from_bottom = nc.transducer_offset_from_bottom
# update depth variable for location of bins based on WATER_DEPTH information
if "UP" in nc.orientation:
depths = water_depth-nc.transducer_offset_from_bottom-nc['bindist']
else:
depths = -1 * (water_depth-nc.transducer_offset_from_bottom+nc['bindist'])
nc['depth'][:] = depths
nc.start_time = '%s' % netcdf.num2date(nc[timekey][0], nc[timekey].units)
nc.stop_time = '%s' % netcdf.num2date(nc[timekey][-1], nc[timekey].units)
# some of these repeating attributes depended on depth calculations
# these are the same for all variables because all sensors are in the same
# package, as of now, no remote sensors being logged by this ADCP
ncvarnames = []
for key in nc.variables.keys():
ncvarnames.append(key)
omitnames = []
for key in nc.dimensions.keys():
omitnames.append(key)
omitnames.append("Rec")
omitnames.append("depth")
for varname in ncvarnames:
if varname not in omitnames:
varobj = nc.variables[varname]
varobj.sensor_type = nc.INST_TYPE
varobj.sensor_depth = nc.nominal_sensor_depth
varobj.initial_sensor_height = nc.initial_instrument_height
varobj.initial_sensor_height_note = "height in meters above bottom: " +\
"accurate for tripod mounted instruments"
varobj.height_depth_units = "m"
print('finished copying data, starting computations at %s' % (dt.datetime.now()))
print('averaging cor at %s' % (dt.datetime.now()))
# this will be a problem - it loads all into memory
cor = (rawcdf.variables['cor1'][s:e, :] + rawcdf.variables['cor2'][s:e, :] +
rawcdf.variables['cor3'][s:e, :] + rawcdf.variables['cor4'][s:e, :]) / 4
nc['cor'][:, :, 0, 0] = cor[:, :]
print('averaging AGC at %s' % (dt.datetime.now()))
# this will be a problem - it loads all into memory
agc = (rawcdf.variables['att1'][s:e, :] + rawcdf.variables['att2'][s:e, :] +
rawcdf.variables['att3'][s:e, :]+rawcdf.variables['att4'][s:e, :]) / 4
nc['AGC_1202'][:, :, 0, 0] = agc[:, :]
print('converting %d ensembles from beam to earth %s' % (len(nc[timekey]), dt.datetime.now()))
# check our indexing
print('magnetic variation at site = %f' % nc.magnetic_variation_at_site)
print('magnetic variation applied = %f' % nc.magnetic_variation_applied)
print('magnetic variation applied note = %s' % nc.magnetic_variation_applied_note)
n = int(len(heading)/2)
print('From the middle of the time series at ensemble #%d, we have:' % n)
print('heading variable in this python process = %f' % heading[n])
print('rawcdf Hdg[n] = %f' % rawcdf['Hdg'][n])
print('nc Hdg_1215[n] = %f' % nc['Hdg_1215'][n, 0, 0])
# TODO add depth bin mapping
# this beam arrangement is for TRDI Workhorse and V, other instruments
# should be re-ordered to match
rawvarnames = ["vel1", "vel2", "vel3", "vel4"]
ncidx = 0
if settings['transformation'].upper() == "BEAM":
ncvarnames = ["Beam1", "Beam2", "Beam3", "Beam4"]
for idx in range(s, e):
for beam in range(nbeams):
nc[ncvarnames[beam]][ncidx, :, 0, 0] = \
rawcdf.variables[rawvarnames[beam]][idx, :] * vconvconst
ncidx = ncidx + 1
elif (settings['transformation'].upper() == "INST") or (settings['transformation'].upper() == "EARTH"):
ncvarnames = ["X", "Y", "Z", "Error"]
# the dolfyn way (https://github.com/lkilcher/dolfyn)
# load the ADCP data object - we have converted this from a class object to nested dictionaries for use here
adcpo = {
'props': {
'coord_sys': "beam",
'inst2earth:fixed': False,
},
'config': {
'beam_angle': nc.beam_angle,
'beam_pattern': nc.beam_pattern,
'orientation': nc.orientation,
},
# note declination is applied immediately when heading is read from the raw data file
'declination_in_heading': True,
# dolfyn shape for ensemble data is [bins x beams x ens]
'vel': np.ones([nbins, nbeams], dtype='float') * np.nan,
}
# vels has to be pre-defined to get the shapes to broadcast
# noinspection PyUnusedLocal
vels = np.ones([nbins, 1], dtype='float') * np.nan
# Nortek and TRDI do their along beam velocity directions opposite for
# slant beams. Vertical beam directions are the same.
if rawcdf.sensor_type == 'Nortek':
beam_vel_multiplier = -1
else:
beam_vel_multiplier = 1
for idx in range(s, e):
for beam in range(nbeams):
# load data of one ensemble to dolfyn shape, in cm/s
# adcpo['vel'][:,beam,0] = rawcdf.variables[rawvarnames[beam]][idx,:] * 0.1
vels = rawcdf.variables[rawvarnames[beam]][idx, :] * vconvconst * beam_vel_multiplier
adcpo['vel'][:, beam] = vels
# need to keep setting this with new beam data since we are iterating
adcpo['props']['coord_sys'] = "beam"
beam2inst(adcpo) # adcpo['vel'] is returned in inst coordinates
if settings['transformation'].upper() == "EARTH":
ncvarnames = ["u_1205", "v_1206", "w_1204", "Werr_1201"]
adcpo['heading_deg'] = nc.variables['Hdg_1215'][ncidx]
adcpo['pitch_deg'] = nc.variables['Ptch_1216'][ncidx]
adcpo['roll_deg'] = nc.variables['Roll_1217'][ncidx]
inst2earth(adcpo)
for beam in range(nbeams):
nc[ncvarnames[beam]][ncidx, :, 0, 0] = adcpo['vel'][:, beam]
ncidx = ncidx + 1
# immediate - then less feedback
ensf, ensi = np.modf(ncidx/1000)
if (ensf == 0) and (ncidx < 10000):
print('%d of %d ensembles read' % (ncidx, nens))
else:
ensf, ensi = np.modf(ncidx/10000)
if ensf == 0:
print('%d of %d ensembles read' % (ncidx, nens))
nc.transform = settings['transformation'].upper()
print('closing files at %s' % (dt.datetime.now()))
rawcdf.close()
nc.close()
def cal_earth_rotmatrix(heading=0, pitch=0, roll=0, declination=0):
"""
this transformation matrix is from the R.D. Instruments Coordinate Transformation booklet.
It presumes the beams are in the same position as RDI Workhorse ADCP beams, where,
when looking down on the transducers::
Beam 3 is in the direction of the compass' zero reference
Beam 1 is to the right
Beam 2 is to the left
Beam 4 is opposite beam 3
Pitch is about the beam 2-1 axis and is positive when beam 3 is raised
Roll is about the beam 3-4 axis and is positive when beam 2 is raised
Heading increases when beam 3 is rotated towards beam 1
Nortek Signature differs in these ways::
TRDI beam 3 = Nortek beam 1
TRDI beam 1 = Nortek beam 2
TRDI beam 4 = Nortek beam 3
TRDI beam 2 = Nortek beam 4
Heading, pitch and roll behave the same as TRDI
:param float heading: ADCP heading in degrees
:param float pitch: ADCP pitch in degrees
:param float roll: ADCP roll in degrees
:param float declination: heading offset from true, Westerly is negative
:return:
"""
heading = heading + declination
ch = np.cos(heading)
sh = np.sin(heading)
cp = np.cos(pitch)
sp = np.sin(pitch)
cr = np.cos(roll)
sr = np.sin(roll)
return np.asmatrix(np.array([
[(ch*cr+sh*sp*sr), (sh*cp), (ch*sr-sh*sp*cr)],
[(-sh*cr+ch*sp*sr), (ch*cp), (-sh*sr-ch*sp*cr)],
[(-cp*sr), sp, (cp*cr)]
]))
# start code from dolfyn
def calc_beam_rotmatrix(theta=20, convex=True, degrees=True):
"""
Calculate the rotation matrix from beam coordinates to
instrument head coordinates.
per dolfyn rotate.py code here: https://github.com/lkilcher/dolfyn
:param float theta: is the angle of the heads (usually 20 or 30 degrees)
:param int convex: is a flag for convex or concave head configuration.
:param bool degrees: is a flag which specifies whether theta is in degrees or radians (default: degrees=True)
"""
deg2rad = np.pi / 180.
if degrees:
theta = theta * deg2rad
if convex == 0 or convex == -1:
c = -1
else:
c = 1
a = 1 / (2. * np.sin(theta))
b = 1 / (4. * np.cos(theta))
d = a / (2. ** 0.5)
return np.array([[c * a, -c * a, 0, 0],
[0, 0, -c * a, c * a],
[b, b, b, b],
[d, d, -d, -d]])
def _cat4rot(tpl):
# TODO do we need this function _cat4rot
"""
helper function
:param tpl:
:return: numpy array
"""
tmp = []
for vl in tpl:
tmp.append(vl[:, None, :])
return np.concatenate(tuple(tmp), axis=1)
def beam2inst(adcpo, reverse=False, force=False):
"""
Rotate velocities from beam to instrument coordinates.
:param dict adcpo: containing the beam velocity data.
:param bool reverse: If True, this function performs the inverse rotation (inst->beam).
:param bool force: When true do not check which coordinate system the data is in prior to performing this rotation.
"""
if not force:
if not reverse and adcpo['props']['coord_sys'] != 'beam':
raise ValueError('The input must be in beam coordinates.')
if reverse and adcpo['props']['coord_sys'] != 'inst':
raise ValueError('The input must be in inst coordinates.')
if 'rotmat' in adcpo['config'].keys():
rotmat = adcpo['config']['rotmat']
else:
rotmat = calc_beam_rotmatrix(adcpo['config']['beam_angle'],
adcpo['config']['beam_pattern'].lower()
== 'convex')
cs = 'inst'
if reverse:
# Can't use transpose because rotation is not between
# orthogonal coordinate systems
rotmat = np.linalg.inv(rotmat)
cs = 'beam'
# raw = adcpo['vel'].transpose()
raw = np.asmatrix(adcpo['vel'])
# here I end up with an extra dimension of 4
# vels = np.einsum('ij,jkl->ikl', rotmat, raw)
# vels = np.einsum('ij,jk->ik', rotmat, raw)
vels = np.array(np.asmatrix(rotmat)*raw.transpose())
# vels = np.einsum('ij,jkl->ikl', rotmat, adcpo['vel'])
# ValueError: operands could not be broadcast together with remapped
# shapes [original->remapped]: (4,4)->(4,newaxis,newaxis,4) (16,4,1)->(4,1,16)
# adcpo['vel'] = np.einsum('ij,jkl->ikl', rotmat, adcpo['vel'])
adcpo['vel'] = vels.transpose()
adcpo['props']['coord_sys'] = cs
def inst2earth(adcpo, reverse=False, fixed_orientation=False, force=False):
"""
Rotate velocities from the instrument to earth coordinates.
:param dict adcpo: containing the data in instrument coordinates
:param bool reverse: If True, this function performs the inverse rotation (earth->inst).
:param bool fixed_orientation: When true, take the average orientation and apply it over the whole record.
:param bool force: When true do not check which coordinate system the data is in prior to performing this rotation.
Notes
-----
The rotation matrix is taken from the Teledyne RDI ADCP Coordinate Transformation manual January 2008
When performing the forward rotation, this function sets the 'inst2earth:fixed' flag to the value of
`fixed_orientation. When performing the reverse rotation, that value is 'popped' from the props dict and the input
value to this function`fixed_orientation` has no effect. If `'inst2earth:fixed'` is not in the props dict then
the input value *is* used.
"""
deg2rad = np.pi / 180.
if not force:
if not reverse and adcpo['props']['coord_sys'] != 'inst':
raise ValueError('The input must be in inst coordinates.')
if reverse and adcpo['props']['coord_sys'] != 'earth':
raise ValueError('The input must be in earth coordinates.')
if not reverse and 'declination' in adcpo['props'].keys() and not adcpo['props']['declination_in_heading']:
# Only do this if making the forward rotation.
adcpo['heading_deg'] += adcpo['props']['declination']
adcpo['props']['declination_in_heading'] = True
r = adcpo['roll_deg'] * deg2rad
p = np.arctan(np.tan(adcpo['pitch_deg'] * deg2rad) * np.cos(r))
h = adcpo['heading_deg'] * deg2rad
if adcpo['config']['orientation'].lower() == 'up':
r += np.pi
ch = np.cos(h)
sh = np.sin(h)
cr = np.cos(r)
sr = np.sin(r)
cp = np.cos(p)
sp = np.sin(p)
rotmat = np.empty((3, 3, len(r)))
rotmat[0, 0, :] = ch * cr + sh * sp * sr
rotmat[0, 1, :] = sh * cp
rotmat[0, 2, :] = ch * sr - sh * sp * cr
rotmat[1, 0, :] = -sh * cr + ch * sp * sr
rotmat[1, 1, :] = ch * cp
rotmat[1, 2, :] = -sh * sr - ch * sp * cr
rotmat[2, 0, :] = -cp * sr
rotmat[2, 1, :] = sp
rotmat[2, 2, :] = cp * cr
# Only operate on the first 3-components, b/c the 4th is err_vel
# ess = 'ijk,jlk->ilk'
cs = 'earth'
if reverse:
cs = 'inst'
fixed_orientation = adcpo['props'].pop('inst2earth:fixed', fixed_orientation)
# ess = ess.replace('ij', 'ji')
else:
adcpo['props']['inst2earth:fixed'] = fixed_orientation
if fixed_orientation:
# ess = ess.replace('k,', ',')
rotmat = rotmat.mean(-1)
# todo is the einsum method better? If so, uncomment the ess statements above
# vels = np.einsum(ess, rotmat, adcpo['vel'][:,:3])
vels = np.asmatrix(rotmat) * np.asmatrix(adcpo['vel'][:, :3].transpose())
adcpo['vel'][:, :3] = vels.transpose()
adcpo['props']['coord_sys'] = cs
# end code from dolfyn
def setupEPICnc(fname, rawcdf, attfile, settings):
"""
Construct an empty netCDF output file to EPIC conventions
:param str fname: output netCDF file name
:param Dataset rawcdf: input netCDF raw data file object
:param str attfile: metadata text file
:param dict settings: settings as follows::
'good_ensembles': [] # starting and ending indices of the input file. For all data use [0,np.inf]
'orientation': 'UP' # uplooking ADCP, for downlooking, use DOWN
'transducer_offset_from_bottom': 2.02 # in meters
'transformation': 'EARTH' # | BEAM | INST
'adjust_to_UTC': 5 # for EST to UTC, if no adjustment, set to 0 or omit
:return: netCDF file object
"""
# note that
# f4 = 4 byte, 32 bit float
# maximum value for 32 bit float = 3.402823*10**38;
intfill = -32768
floatfill = 1E35
# check the ensemble limits asked for by the user
nens = rawcdf.variables['Rec'].size
if settings['good_ensembles'][1] < 0:
settings['good_ensembles'][1] = nens
if settings['good_ensembles'][0] < 0:
settings['good_ensembles'][0] = 0
if settings['good_ensembles'][1] > nens:
settings['good_ensembles'][1] = nens-1
nens2write = settings['good_ensembles'][1]-settings['good_ensembles'][0]
print('creating netCDF file %s with %d records' % (fname, nens2write))
rawvars = []
for key in rawcdf.variables.keys():
rawvars.append(key)
nbins = len(rawcdf.dimensions['depth'])
cdf = Dataset(fname, "w", clobber=True, format="NETCDF4")
# dimensions, in EPIC order
cdf.createDimension('time', nens2write)
cdf.createDimension('depth', nbins)
cdf.createDimension('lat', 1)
cdf.createDimension('lon', 1)
# write global attributes
cdf.history = rawcdf.history + "rotations calculated and converted to EPIC format by ADCPcdf2ncEPIC.py"
# these get returned as a dictionary
gatts = read_globalatts(attfile)
if 'WATER_DEPTH' not in gatts.keys():
# noinspection PyTypeChecker
gatts['WATER_DEPTH'] = 0.0 # nothing from user
print('No WATER_DEPTH found, check depths of bins and WATER_DEPTH!')
gatts['orientation'] = settings['orientation'].upper()
if 'serial_number' not in gatts.keys():
gatts['serial_number'] = "unknown"
if 'magnetic_variation' not in gatts.keys():
# noinspection PyTypeChecker
gatts['magnetic_variation_at_site'] = 0.0
print('No magnetic_variation, assuming magnetic_variation_at_site = 0')
else:
gatts['magnetic_variation_at_site'] = gatts['magnetic_variation']
gatts.pop('magnetic_variation')
if type(gatts['MOORING']) != str:
gatts['MOORING'] = str(int(np.floor(gatts['MOORING'])))
writeDict2atts(cdf, gatts, "")
# more standard attributes
cdf.latitude_units = "degree_north"
cdf.longitude_units = "degree_east"
cdf.CREATION_DATE = "%s" % datetime.now()
cdf.DATA_TYPE = "ADCP"
cdf.FILL_FLAG = 0
cdf.COMPOSITE = 0
# attributes that the names will vary depending on the ADCP vendor
if rawcdf.sensor_type == "TRDI":
# TRDI attributes
if any('VBeam' in item for item in rawcdf.ncattrs()):
cdf.INST_TYPE = "TRDI Workhorse V"
else:
cdf.INST_TYPE = "TRDI Workhorse"
cdf.bin_size = rawcdf.TRDI_Depth_Cell_Length_cm/100
cdf.bin_count = rawcdf.TRDI_Number_of_Cells
cdf.center_first_bin = rawcdf.TRDI_Bin_1_distance_cm/100
cdf.blanking_distance = rawcdf.TRDI_Blank_after_Transmit_cm/100
cdf.transform = rawcdf.TRDI_Coord_Transform
cdf.beam_angle = rawcdf.TRDI_Beam_Angle
cdf.number_of_slant_beams = rawcdf.TRDI_Number_of_Beams
cdf.heading_bias_applied_EB = rawcdf.TRDI_Heading_Bias_Hundredths_of_Deg
cdf.beam_angle = rawcdf.TRDI_Beam_Angle
cdf.beam_pattern = rawcdf.TRDI_Beam_Pattern
elif rawcdf.sensor_type == "Nortek":
# Nortek attributes
# TODO - what to do about multiple sampling schemes?
cdf.INST_TYPE = "Nortek Signature"
cdf.bin_size = rawcdf.Nortek_burst_cellSize
cdf.bin_count = rawcdf.Nortek_burst_nCells
# Nortek Signature does not seem to have an odd offset to center of
# bin 1. This value comes from the Velocity Range provided by Nortek
cdf.center_first_bin = rawcdf['bindist'][0]
cdf.blanking_distance = rawcdf.Nortek_burst_blankingDistance
cdf.transform = rawcdf.Nortek_burst_coordSystem
# Nortek provides two angles for each beam, theta being from the vertical
cdf.beam_angle = rawcdf.Nortek_beamConfiguration1_theta
cdf.number_of_slant_beams = 4
# there's no indication from Nortek's metadata that magvar is applied or not
# TODO have to include information from the user
cdf.heading_bias_applied_EB = 0
# hard coded based on known Signature design
# Could be deduced from the theta and phi beam angles
cdf.beam_pattern = "Convex"
# attributes requiring user input
cdf.transducer_offset_from_bottom = settings['transducer_offset_from_bottom']
cdf.initial_instrument_height = settings['transducer_offset_from_bottom']
# TODO check on orientation, using user input for now
# rawcdf.TRDI_Orientation
# need translation to UP from "Up-facing beams"
cdf.orientation = settings['orientation'].upper()
cdf.orientation_note = settings['orientation_note']
if settings['orientation'].upper() == 'UP':
cdf.depth_note = "uplooking bin depths = WATER_DEPTH-transducer_offset_from_bottom-bindist"
else:
cdf.depth_note = "downlooking bin depths = WATER_DEPTH-transducer_offset_from_bottom+bindist"
cdf.serial_number = rawcdf.serial_number
# TODO consider using a float for time since less common integers are causing issues
# the problem is, CF time is a count, so integer is appropriate
timetype = 'u2' # u4 may be causing downstream problems with NCO
# u2 caused rollover problems when EPIC time was stored or read:
# file is 1108sig001.nc
# EPIC first time stamp = 08-Oct-5378 00:01:04
# seconds since 1970-01-01T00:00:00 UTC
# CF first time stamp = 25-Sep-2017 15:00:00
# the bad EPIC time is because a u4 datatype in the cdf file
# is being sent to a u2 datatype in hte nc file. Changing u2 to u4, etc.
# causes other problems
# timetype = 'u4' # u4 causes problems downstream in catEPIC with fill values
# for now choosing to live with the u2 problems
varobj = cdf.createVariable('Rec', timetype, ('time',), fill_value=intfill)
varobj.units = "count"
varobj.long_name = "Ensemble Number"
if settings['timetype'] == 'CF':
# if f8, 64 bit is not used, time is clipped
# TODO test this theory, because downstream 64 bit time is a problem
# for ADCP fast sampled, single ping data, need millisecond resolution
# for CF convention
varobj = cdf.createVariable('time', 'f8', ('time',))
varobj.units = rawcdf.variables['time'].units
# for EPIC convention
varobj = cdf.createVariable('EPIC_time', timetype, ('time',))
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj = cdf.createVariable('EPIC_time2', timetype, ('time',))
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
else:
# if f8, 64 bit is not used, time is clipped
# for ADCP fast sampled, single ping data, need millisecond resolution
# for CF convention
varobj = cdf.createVariable('cf_time', 'f8', ('time',))
varobj.units = rawcdf.variables['cf_time'].units
# for EPIC convention
varobj = cdf.createVariable('time', timetype, ('time',))
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj = cdf.createVariable('time2', timetype, ('time',))
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj = cdf.createVariable('depth', 'f4', ('depth',))
varobj.units = "m"
varobj.long_name = "DEPTH (M)"
varobj.epic_code = 3
varobj.center_first_bin = cdf.center_first_bin
varobj.blanking_distance = cdf.blanking_distance
varobj.bin_size = cdf.bin_size
varobj.bin_count = nbins
varobj.transducer_offset_from_bottom = cdf.transducer_offset_from_bottom
varobj = cdf.createVariable('lat', 'f8', ('lat',))
varobj.units = "degree_north"
varobj.epic_code = 500
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "LAT")
varobj.long_name = "LATITUDE"
varobj.datum = "NAD83"
varobj[:] = float(gatts['latitude'])
varobj = cdf.createVariable('lon', 'f8', ('lon',))
varobj.units = "degree_east"
varobj.epic_code = 502
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "LON")
varobj.long_name = "LONGITUDE"
varobj.datum = "NAD83"
varobj[:] = float(gatts['longitude'])
varobj = cdf.createVariable('bindist', 'f4', ('depth',), fill_value=floatfill)
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "bindist")
varobj.units = "m"
varobj.long_name = "bin distance from instrument"
varobj.epic_code = 0
varobj.center_first_bin = cdf.center_first_bin
varobj.blanking_distance = cdf.blanking_distance
varobj.bin_size = cdf.bin_size
varobj.bin_count = nbins
varobj.transducer_offset_from_bottom = cdf.transducer_offset_from_bottom
varobj.NOTE = "distance is along profile from instrument head to center of bin"
varobj = cdf.createVariable('SV_80', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "m s-1"
varobj.epic_code = 80
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "SV")
varobj.long_name = "SOUND VELOCITY (M/S)"
varobj = cdf.createVariable('Hdg_1215', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "degrees"
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "Hdg")
varobj.long_name = "INST Heading"
varobj.epic_code = 1215
# varobj.heading_alignment = rawvarobj.heading_alignment
# varobj.heading_bias = rawvarobj.heading_bias
varobj = cdf.createVariable('Ptch_1216', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "INST Pitch"
varobj.epic_code = 1216
varobj = cdf.createVariable('Roll_1217', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "INST Roll"
varobj.epic_code = 1217
varobj = cdf.createVariable('Tx_1211', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "C"
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "T")
varobj.long_name = "instrument Transducer Temp."
varobj.epic_code = 1211
if 'Pressure' in rawvars:
# rawvarobj = rawcdf.variables['Pressure']
varobj = cdf.createVariable('P_1', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "dbar"
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "P")
varobj.long_name = "PRESSURE (DB)"
varobj.epic_code = 1
if 'PressVar' in rawvars:
varobj = cdf.createVariable('SDP_850', 'f4', ('time', 'lat', 'lon'), fill_value=floatfill)
varobj.setncattr('name', 'SDP')
varobj.long_name = "STAND. DEV. (PRESS)"
varobj.units = "mbar"
varobj.epic_code = 850
varobj = cdf.createVariable('cor', 'u2', ('time', 'depth', 'lat', 'lon'), fill_value=intfill)
varobj.setncattr('name', 'cor')
varobj.long_name = "Slant Beam Average Correlation (cor)"
varobj.units = "counts"
varobj.epic_code = 1202
varobj.NOTE = "Calculated from the slant beams"
if 'PGd4' in rawvars:
varobj = cdf.createVariable('PGd_1203', 'u2', ('time', 'depth', 'lat', 'lon'), fill_value=intfill)
varobj.setncattr('name', 'Pgd')
varobj.long_name = "Percent Good Pings"
varobj.units = "percent"
varobj.epic_code = 1203
varobj.NOTE = "Percentage of good 4-bem solutions (Field #4)"
varobj = cdf.createVariable('AGC_1202', 'u2', ('time', 'depth', 'lat', 'lon'), fill_value=intfill)
varobj.setncattr('name', 'AGC')
varobj.long_name = "Average Echo Intensity (AGC)"
varobj.units = "counts"
varobj.epic_code = 1202
varobj.NOTE = "Calculated from the slant beams"
if 'cor5' in rawvars:
varobj = cdf.createVariable('corvert', 'u2', ('time', 'depth', 'lat', 'lon'), fill_value=intfill)
varobj.setncattr('name', 'cor')
varobj.long_name = "Vertical Beam Correlation (cor)"
varobj.units = "counts"
varobj.epic_code = 1202
varobj.NOTE = "From the center vertical beam"
if 'att5' in rawvars:
varobj = cdf.createVariable('AGCvert', 'u2', ('time', 'depth', 'lat', 'lon'), fill_value=intfill)
varobj.setncattr('name', 'AGC')
varobj.long_name = "Vertical Beam Echo Intensity (AGC)"
varobj.units = "counts"
varobj.epic_code = 1202
varobj.NOTE = "From the center vertical beam"
# repeating attributes that do not depend on height or depth calculations
cdfvarnames = []
for key in cdf.variables.keys():
cdfvarnames.append(key)
omitnames = []
for key in cdf.dimensions.keys():
omitnames.append(key)
omitnames.append("Rec")
for varname in cdfvarnames:
if varname not in omitnames:
varobj = cdf.variables[varname]
varobj.serial_number = cdf.serial_number
if settings['transformation'].upper() == "BEAM":
varnames = ["Beam1", "Beam2", "Beam3", "Beam4"]
codes = [0, 0, 0, 0]
elif settings['transformation'].upper() == "INST":
varnames = ["X", "Y", "Z", "Error"]
codes = [0, 0, 0, 0]
else:
varnames = ["u_1205", "v_1206", "w_1204", "Werr_1201"]
codes = [1205, 1206, 1204, 1201]
for i in range(4):
varname = varnames[i]
varobj = cdf.createVariable(varname, 'f4', ('time', 'depth', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "cm s-1"
varobj.long_name = "%s velocity (cm s-1)" % varnames[i]
varobj.epic_code = codes[i]
if 'vel5' in rawvars:
varobj = cdf.createVariable('Wvert', 'f4', ('time', 'depth', 'lat', 'lon'), fill_value=floatfill)
varobj.units = "cm s-1"
varobj.long_name = "Vertical velocity (cm s-1)"
# TODO do we do bottom track data here? Later? Or as a separate thing?
add_VAR_DESC(cdf)
return cdf
# noinspection PyUnresolvedReferences
def add_VAR_DESC(cdf):
"""
add the VAR_DESC global attribute constructed from variable names found in the file
:param object cdf: netCDF file object
"""
# cdf is an netcdf file object (e.g. pointer to open netcdf file)
varkeys = cdf.variables.keys() # get the names
dimkeys = cdf.dimensions.keys()
varnames = []
for key in varkeys:
varnames.append(key)
dimnames = []
for key in dimkeys:
dimnames.append(key)
buf = ""
for varname in varnames:
if varname not in dimnames:
buf = "%s:%s" % (buf, varname)
cdf.VAR_DESC = buf
def read_globalatts(fname):
"""
read_globalatts: read in file of metadata for a tripod or mooring
reads global attributes for an experiment from a text file (fname) called by all data processing programs
to get uniform metadata input one argument is required- the name of the file to read- it should have this form::
SciPi; J.Q. Scientist
PROJECT; USGS Coastal Marine Geology Program
EXPERIMENT; MVCO 2015 Stress Comparison
DESCRIPTION; Quadpod 13.9m
DATA_SUBTYPE; MOORED
COORD_SYSTEM; GEOGRAPHIC + SAMPLE
Conventions; PMEL/EPIC
MOORING; 1057
WATER_DEPTH; 13.9
WATER_DEPTH_NOTE; (meters), nominal
WATER_DEPTH_source; ship fathometer
latitude; 41.3336633
longitude; -70.565877
magnetic_variation; -14.7
Deployment_date; 17-Nov-2015
Recovery_date; 14-Dec-2015
DATA_CMNT;
platform_type; USGS aluminum T14 quadpod
DRIFTER; 0
POS_CONST; 0
DEPTH_CONST; 0
Conventions; PMEL/EPIC
institution; United States Geological Survey, Woods Hole Coastal and Marine Science Center
institution_url; http://woodshole.er.usgs.gov
:param str fname: input file name
:return: dict of metadata
"""
gatts = {}
f = open(fname, 'r')
for line in f:
line = line.strip()
cols = line.split(";")
gatts[cols[0]] = cols[1].strip()
f.close()
return gatts
# noinspection PyUnresolvedReferences
def writeDict2atts(cdfobj, d, tag):
"""
write a dictionary to netCDF attributes
:param object cdfobj: netcdf file object
:param dict d: metadata
:param str tag: tag to add before each atrribute name
:return: dict of metadata as written to file
"""
i = 0
# first, convert as many of the values in d to numbers as we can
for key in iter(d):
if type(d[key]) == str:
try:
d[key] = float(d[key])
except ValueError:
i += 1
for key in iter(d):
newkey = tag + key
try:
cdfobj.setncattr(newkey, d[key])
except:
print('can\'t set %s attribute' % key)
return d
def floor(dec):
"""
convenience function to round down
provided to avoid loading the math package
and because np.floor was causing unexpected behavior w.r.t ints
:param float dec:
:return: rounded number
"""
return int(dec - (dec % 1))
def __main():
print('%s running on python %s' % (sys.argv[0], sys.version))
if len(sys.argv) < 2:
print("%s useage:" % sys.argv[0])
print("ADCPcdf2ncEPIC rawcdfname ncEPICname USGSattfile [startingensemble endingensemble]\n")
print("starting and ending ensemble are netcdf file indeces, NOT TRDI ensemble numbers")
print("USGSattfile is a file containing EPIC metadata")
sys.exit(1)
try:
infile_name = sys.argv[1]
except:
print('error - input file name missing')
sys.exit(1)
try:
outfile_name = sys.argv[2]
except:
print('error - output file name missing')
sys.exit(1)
try:
attfile_name = sys.argv[3]
except:
print('error - global attribute file name missing')
sys.exit(1)
try:
settings = sys.argv[4]
except:
print('error - settings missing - need dictionary of:')
print('settings[\'good_ensembles\'] = [0, np.inf] # use np.inf for all ensembles or omit')
print('settings[\'transducer_offset_from_bottom\'] = 1 # m')
print('settings[\'transformation\'] = "EARTH" # | BEAM | INST')
sys.exit(1)
# some input testing
if ~os.path.isfile(infile_name):
print('error - input file not found')
sys.exit(1)
if ~os.path.isfile(attfile_name):
print('error - attribute file not found')
sys.exit(1)
print('Converting %s to %s' % (infile_name, outfile_name))
print('Start file conversion at ', dt.datetime.now())
# noinspection PyTypeChecker
doEPIC_ADCPfile(infile_name, outfile_name, attfile_name, settings)
print(f'Finished file conversion at {dt.datetime.now()}')
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/EPICstuff/ADCPcdf2ncEPIC.py | ADCPcdf2ncEPIC.py |
# -*- coding: utf-8 -*-
"""
repopulateEPIC
==============
Distribute burst data output from reshapeEPIC along the sample dimension
a burst shaped file output from reshapeEPIC will have two issues that need
to be addressed before the data can be used with xarray::
-- reshape time to be one dimension
-- make sure the samples within each burst are index according to their
time stamps. Within burst time stamps will not be preserved
Usage:
python repopulateEPIC.py shaped_file new_file sample_rate [start='left'] [drop = None]
:param str shaped_file: output from reshapeEPIC, the expected shape of the data is one of::
[time, sample]
[time, sample, depth]
[time, sample, depth, lat, lon]
:param str new_file: a new file with the adjusted time, this file, if it exists, will be overwritten
:param int sample_rate: the sample rate the instrument was intended to use during each burst, in seconds
:param str start: what the time stamp should be for each burst::
left = beginning of the burst based on the first sample time
center = middle of the burst based on first sample and last sample times
right = end of the burst based on the last sample time
:param list drop: variable names to omit from the output file
Created on Wed Oct 3 15:21:53 2018
@author: mmartini
"""
# 10/15/2018 MM was using np.nan to pre-fill arrays and this was causing
# NaNs in final output, a problem for CF. Replace np.nan with _FillValue
import os
import sys
import datetime as dt
import netCDF4 as nc
import numpy as np
def repopulateEPIC(*args, **kwargs):
# the argument passing here works fine
print('%s running on python %s' % (sys.argv[0], sys.version))
print('Start file conversion at ', dt.datetime.now())
shaped_file = args[0]
new_file = args[1]
sample_rate = args[2]
if 'start' in kwargs.keys():
start = kwargs['start']
else:
start = 'left'
if 'drop' in kwargs.keys():
drop = kwargs['drop']
else:
drop = {}
for key, value in kwargs.items():
print('{} = {}'.format(key, value))
print('Start file conversion at ', dt.datetime.now())
# check for the output file's existence before we try to delete it.
try:
os.remove(new_file)
print('{} removed'.format(new_file))
except FileNotFoundError:
pass
shapedcdf = nc.Dataset(shaped_file, format="NETCDF4")
ndims = len(shapedcdf.dimensions)
print(ndims)
nvars = len(shapedcdf.variables)
print(nvars)
# shapedcdf.getncattr('sensor_type')
ngatts = len(shapedcdf.ncattrs())
print(ngatts)
newcdf = nc.Dataset(new_file, mode="w", clobber=True, format='NETCDF4')
newcdf.set_fill_off()
# copy the global attributes
# first get a dict of them so that we can iterate
gatts = {}
for attr in shapedcdf.ncattrs():
gatts[attr] = getattr(shapedcdf, attr)
gatts['history'] = getattr(shapedcdf, 'history')+'; distributing time using redistributeSamples.py'
newcdf.setncatts(gatts)
for item in shapedcdf.dimensions.items():
print('Defining dimension {} which is {} long'.format(item[0], len(item[1])))
newcdf.createDimension(item[0], len(item[1]))
# this is the dimension along which we will redistribute the burst samples
# this is also the dimension we will drop from 'time'
dim = 'sample'
for var in shapedcdf.variables.items():
varobj = var[1]
try:
fill_value = varobj.getncattr('_FillValue')
except AttributeError:
fill_value = False # do not use None here!!!
print('{} is data type {} with fill {}'.format(varobj.name, varobj.dtype,
fill_value))
if varobj.name not in drop: # are we copying this variable?
if varobj.name == 'time': # is this time which we need to drop a dimension?
vdims_shaped = varobj.dimensions
vdims_new = []
for d in vdims_shaped:
if d == dim:
print('\tskipping sample in {}'.format(varobj.name))
else:
vdims_new.append(d)
newvarobj = newcdf.createVariable(varobj.name, varobj.dtype, tuple(vdims_new), fill_value=fill_value)
else:
# for a normal copy, no dimension drop
newvarobj = newcdf.createVariable(varobj.name, varobj.dtype, varobj.dimensions, fill_value=fill_value)
print('\t{} to {}'.format(varobj.dimensions, newvarobj.dimensions))
# copy the variable attributes
# first get a dict of them so that we can iterate
vatts = {}
for attr in varobj.ncattrs():
vatts[attr] = getattr(varobj, attr)
try:
newvarobj.setncatts(vatts)
except AttributeError:
print('Unable to copy atts for {}'.format(varobj.name))
# --------------- populate the data
# time is special, take care of it after time is populated
# we know because we are doing this that it is [time, sample]
if start == 'right':
print('copying time, using the last time stamp in each burst')
newcdf['time'][:] = shapedcdf['time'][:, -1]
# TODO -- bring into the loop and implement
# elif start == 'center':
# print('copying time, using the middle in each burst')
# #t =
# i = int(np.floor(len(shapedcdf['time'][0,:]/2)))
# newcdf['time'][:] = shapedcdf['time'][:,-1]
else:
print('copying time, using the first time stamp in each burst')
newcdf['time'][:] = shapedcdf['time'][:, 0]
drop = {'time'} # we have already done time
nbursts = len(newcdf['time'])
# Note we are dependent on the shape [time, sample, depth, lat, lon]
for svar in shapedcdf.variables.items():
varname = svar[1].name
print('{} is data type {}'.format(svar[0], svar[1].dtype))
if varname not in drop:
ndims = len(svar[1].dimensions)
print('\t{} dims to {} dims'.format(shapedcdf[varname].shape, newcdf[varname].shape))
if ('time' in svar[1].dimensions) and ('sample' in svar[1].dimensions):
print('\tdistributing samples, iterating through bursts')
try:
fill_value = svar[1].getncattr('_FillValue')
except AttributeError:
fill_value = None
for iburst in range(nbursts):
# get the data
if ndims == 2:
data = shapedcdf[varname][iburst, :]
elif ndims == 3:
data = shapedcdf[varname][iburst, :, :]
elif ndims == 4:
data = shapedcdf[varname][iburst, :, :, :]
elif ndims == 5:
data = shapedcdf[varname][iburst, :, :, :, :]
else:
data = None
if iburst == 0:
print('{} dims found - too many'.format(ndims))
# TODO: what do we do when this fails?
if iburst == 0 and data is not None:
print('\t data is {}'.format(data.shape))
# do not need to iterate over depth if shapes are correct!
# set up the index using the time stamps
t = shapedcdf['time'][iburst, :]
tidx = np.array(t-t[0])*sample_rate
# incoming data is represented as NaN, how we find them
tidxgood = ~np.isnan(tidx)
# reset the new container, same shape as old data
# new_data = np.full(data.shape,np.nan) # don't use NaN!
new_data = np.full(data.shape, fill_value)
# need an integer representation of the indices
# to make this assignment work: new_data[idxasint] = data[tidxgood]
idxasint = tidx[tidxgood].astype(int)
# different index types because these are different values
if iburst == 0:
print('\tnumber of dimensions = {}'.format(ndims))
if ndims == 2:
new_data[idxasint] = data[tidxgood]
newcdf[varname][iburst, :] = new_data
elif ndims == 3:
new_data[idxasint, :] = data[tidxgood, :]
newcdf[varname][iburst, :, :] = new_data
elif ndims == 4:
new_data[idxasint, :, :] = data[tidxgood, :, :]
newcdf[varname][iburst, :, :, :] = new_data
elif ndims == 5:
new_data[idxasint, :, :, :] = data[tidxgood, :, :, :]
newcdf[varname][iburst, :, :, :, :] = new_data
# no need to redistribute time
else: # if 'time' not in svar[1].dimensions:
print('\tno time and sample combination found, simple copy')
if ndims == 1:
newcdf[varname][:] = shapedcdf[varname][:]
elif ndims == 2:
newcdf[varname][:, :] = shapedcdf[varname][:, :]
elif ndims == 3:
newcdf[varname][:, :, :] = shapedcdf[varname][:, :, :]
elif ndims == 4:
newcdf[varname][:, :, :, :] = shapedcdf[varname][:, :, :, :]
elif ndims == 5:
newcdf[varname][:, :, :, :, :] = shapedcdf[varname][:, :, :, :, :]
else:
print('Not coded for more than 5 dimensions')
print('\n')
shapedcdf.close()
newcdf.close()
print('Finished writing new file {}'.format(new_file))
def __main():
print('%s running on python %s' % (sys.argv[0], sys.version))
if len(sys.argv) < 3:
print(__doc__)
return
try:
shaped_file = sys.argv[1]
except:
print('error - shaped netCDF input file name missing')
sys.exit(1)
try:
new_file = sys.argv[2]
except:
print('error - output file name missing')
sys.exit(1)
try:
sample_rate = sys.argv[3]
except:
print('error - sample_rate missing')
sys.exit(1)
print('repopulating {} to {} with {} sample_rate'.format(shaped_file, new_file, sample_rate))
repopulateEPIC(shaped_file, new_file, sample_rate, sys.argv[4:])
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/EPICstuff/repopulateEPIC.py | repopulateEPIC.py |
ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/EPICstuff/__init__.py | __init__.py |
|
#!/usr/bin/python
"""
Functions to handle Acoustic Doppler Current Profiler data from
a Teledyne RD Instruments instrument that is in pd0 format
As a script (to split a raw PD0 file into waves and currents):
python pd0.py [path] rawFile wavesFile currentsFile
python pd0.py [-p path] -r rawFile -w wavesFile -c currentsFile
python pd0.py [--path=path] \\
--raw=rawFile \\
--waves=wavesFile \\
--currents=currentsFile
where:
path is a path to prepend to the following
rawFile is path of raw PD0 format input file
wavesFile is path of waves PD0 format output file
currentsFile is path of currents PD0 format output file
or (to run the test suite):
python pd0.py -t
python pd0.py --test
or (to see this help message):
python pd0.py -h
python pd0.py --help
As a module:
import adcp.pd0
adcp.pd0.split(rawFile,wavesFile,currentsFile)
where:
rawFile is a file object representing the raw PD0 format input
wavesFile is a file object representing the waves PD0 format output
currentsFile is a file object representing the currents PD0 format output
"""
import struct
#
# The rawfile is assumed to be in PD0 format.
#
# PD0 format assumes the file is a succession of ensembles.
#
# Each ensemble starts with a two byte header identifying the type of data
# contained in the ensemble.
#
# Following the header is a two byte length field specifying the length of
# the header, length field, and data combined
#
# Following the length field is raw data for the number of bytes indicated by
# the length field
#
# Following the raw data is a checksum field which is the two least
# significant bytes of the sum of the byte values of the header, length field,
# and raw data.
#
# updated to run in python 3x, Marinna Martini 1/12/2017
# adapted from pd0.py by Gregory P. Dusek
# http://trac.nccoos.org/dataproc/wiki/DPWP/docs
def split(raw_file, waves_file, currents_file):
"""
Split PD0 format data into seperate waves and currents files
:param binaryIO raw_file:
:param binaryIO waves_file:
:param binaryIO currents_file:
:return:
"""
# header IDs
waves_id = 0x797F
currents_id = 0x7F7F
# convenience function reused for header, length, and checksum
def __nextLittleEndianUnsignedShort(file):
"""Get next little endian unsigned short from file"""
raw = file.read(2)
"""for python 3.5, struct.unpack('<H', raw)[0] needs to return a
byte, not an int
"""
return raw, struct.unpack("<H", raw)[0]
# factored for readability
def __computeChecksum(data, nbytes, ensemble):
"""Compute a checksum from header, length, and ensemble"""
cs = 0
for byte in data:
# since the for loop returns an int to byte, use as-is
# value = struct.unpack('B', byte)[0]
# cs += value
cs += byte
for byte in nbytes:
# value = struct.unpack('B', byte)[0]
# cs += value
cs += byte
for byte in ensemble:
# value = struct.unpack('B', byte)[0]
# cs += value
cs += byte
return cs & 0xFFFF
# find the first instance of a waves or currents header
raw_data = raw_file.read()
first_waves = raw_data.find(struct.pack("<H", waves_id))
first_currents = raw_data.find(struct.pack("<H", currents_id))
# bail if neither waves nor currents found
if (first_waves < 0) and (first_currents < 0):
# raise IOError, "Neither waves nor currents header found"
raise IOError("Neither waves nor currents header found")
# get the starting point by throwing out unfound headers
# and selecting the minimum
first_ensemble = min([x for x in (first_waves, first_currents) if x >= 0])
# seeks to the first occurrence of a waves or currents data
raw_file.seek(first_ensemble)
# loop through raw data
raw_header, header = __nextLittleEndianUnsignedShort(raw_file)
while (header == waves_id) or (header == currents_id):
# get ensemble length
raw_length, length = __nextLittleEndianUnsignedShort(raw_file)
# read up to the checksum
raw_ensemble = raw_file.read(length - 4)
# get checksum
raw_checksum, checksum = __nextLittleEndianUnsignedShort(raw_file)
computed_checksum = __computeChecksum(raw_header, raw_length, raw_ensemble)
if checksum != computed_checksum:
raise IOError("Checksum error")
# append to output stream
if header == waves_id:
waves_file.write(raw_header)
waves_file.write(raw_length)
waves_file.write(raw_ensemble)
waves_file.write(raw_checksum)
elif header == currents_id:
currents_file.write(raw_header)
currents_file.write(raw_length)
currents_file.write(raw_ensemble)
currents_file.write(raw_checksum)
try:
raw_header, header = __nextLittleEndianUnsignedShort(raw_file)
except struct.error:
break
def test():
"""Execute test suite"""
try:
import adcp.tests.runalltests as runalltests
except:
# possible if executed as script
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "tests"))
import runalltests
runalltests.runalltests(subset="pd0")
class __TestException(Exception):
"""Flow control for running as script"""
pass
# wrapper function
def __test():
"""Execute test suite from command line"""
test()
# raise __TestException, 'Wrapper function for command line testing only'
raise __TestException("Wrapper function for command line testing only")
def __main():
"""Process as script from command line"""
import getopt
import os
import sys
# get the command line options and arguments
path = ""
raw_name, waves_name, currents_name = 3 * [None]
try:
opts, args = getopt.gnu_getopt(
sys.argv[1:],
"htp:r:w:c:",
["help", "test", "path=", "raw=", "waves=", "currents="],
)
for opt, arg in opts:
if opt in ["-h", "--help"]:
raise getopt.GetoptError("")
if opt in ["-t", "--test"]:
__test()
elif opt in ["-p", "--path"]:
path = arg
elif opt in ["-r", "--raw"]:
raw_name = arg
elif opt in ["-w", "--waves"]:
waves_name = arg
elif opt in ["-c", "--currents"]:
currents_name = arg
else:
raise getopt.GetoptError("")
if (raw_name is None) or (waves_name is None) or (currents_name is None):
if len(args) not in [3, 4]:
raise getopt.GetoptError("")
else:
if (
(raw_name is not None)
or (waves_name is not None)
or (currents_name is not None)
):
raise getopt.GetoptError("")
else:
if len(args) == 4:
path = args[0]
del args[0]
raw_name = args[0]
waves_name = args[1]
currents_name = args[2]
elif len(args) != 0:
raise getopt.GetoptError("")
except getopt.GetoptError:
print(__doc__)
return
except __TestException:
return
# split a raw PD0 file
raw_name = os.path.join(path, raw_name)
print(("Raw file path:", raw_name))
waves_name = os.path.join(path, waves_name)
print(("Waves file path:", waves_name))
currents_name = os.path.join(path, currents_name)
print(("Currents file path:", currents_name))
raw_file = open(raw_name, "rb")
try:
waves_file = open(waves_name, "wb")
try:
currents_file = open(currents_name, "wb")
try:
split(raw_file, waves_file, currents_file)
finally:
currents_file.close()
finally:
waves_file.close()
finally:
raw_file.close()
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/TRDIstuff/pd0.py | pd0.py |
"""
pd0splitter
===========
Functions to handle Acoustic Doppler Current Profiler data Teledyne RD Instruments pd0 format
As a script (to split a raw PD0 file into waves packets and currents binary files)
Usage:
python pd0splitter.py pd0file packets_file currents_file [first_ensemble] [last_ensemble]
:param str pd0file: is path of raw PD0 format input file
:param str packets_file: is path of waves PD0 format output file
:param str currents_file: is path of currents PD0 format output file
:param int first_ensemble: is a integer ensemble number
:param int last_ensemble: is a integer ensemble number
The pd0file is assumed to be in PD0 format. PD0 format assumes the file is a succession of ensembles.
Each ensemble starts with a two byte header identifying the type of data contained in the ensemble.
Following the header is a two byte length field specifying the length of the header, length field, and data combined
Following the length field is raw data for the number of bytes indicated by the length field
Following the raw data is a checksum field which is the two least significant bytes of the sum of the byte values
of the header, length field, and raw data.
updated to run in python 3x, Marinna Martini 1/12/2017
adapted from pd0.py by Gregory P. Dusek http://trac.nccoos.org/dataproc/wiki/DPWP/docs
"""
import sys
import struct
def split(pd0file, packets_file, currents_file, first_ensemble, last_ensemble):
"""
split ADCP data in pd0 format into current profiles and wave packets
:param str pd0file: path and name of raw PD0 format input file
:param str packets_file: path and name of waves PD0 format output file
:param str currents_file: path and name of currents PD0 format output file
:param int first_ensemble: ensemble number of the first ensemble to read
:param int last_ensemble: ensemble number of the last ensemble to read
"""
try:
pd0file = open(pd0file, 'rb')
except:
print('Cannot open %s' % pd0file)
try:
packets_file = open(packets_file, 'wb')
except:
print('Cannot open %s' % packets_file)
try:
currents_file = open(currents_file, 'wb')
except:
print('Cannot open %s' % currents_file)
# header IDs
waves_id = 0x797f
currents_id = 0x7f7f
if last_ensemble < 0:
last_ensemble = 1E35
print('Reading from %d to the last ensemble found\n' % first_ensemble)
else:
print('Reading from ensemble %d to %d\n' % (first_ensemble, last_ensemble))
# find the first instance of a waves or currents header
raw_data = pd0file.read()
first_waves = raw_data.find(struct.pack('<H', waves_id))
first_currents = raw_data.find(struct.pack('<H', currents_id))
# bail if neither waves nor currents found
if (first_waves < 0) and (first_currents < 0):
# raise IOError, "Neither waves nor currents header found"
raise IOError('Neither waves nor currents header found')
# get the starting point by throwing out unknown headers
# and selecting the minimum
first_file_ensemble = min([x for x in (first_waves, first_currents) if x >= 0])
# seeks to the first occurrence of a waves or currents data
pd0file.seek(first_file_ensemble)
# loop through raw data
raw_header, header = __nextLittleEndianUnsignedShort(pd0file)
wave_count = 0
current_count = 0
while (header == waves_id) or (header == currents_id):
# get ensemble length
raw_length, length = __nextLittleEndianUnsignedShort(pd0file)
# read up to the checksum
raw_ensemble = pd0file.read(length-4)
# get checksum
raw_checksum, checksum = __nextLittleEndianUnsignedShort(pd0file)
computed_checksum = __computeChecksum(raw_header, raw_length, raw_ensemble)
if checksum != computed_checksum:
raise IOError('Checksum error')
# append to output stream
if header == waves_id:
wave_count = wave_count+1
packets_file.write(raw_header)
packets_file.write(raw_length)
packets_file.write(raw_ensemble)
packets_file.write(raw_checksum)
elif header == currents_id:
current_count = current_count+1
if (current_count >= first_ensemble) & (current_count < last_ensemble):
currents_file.write(raw_header)
currents_file.write(raw_length)
currents_file.write(raw_ensemble)
currents_file.write(raw_checksum)
elif current_count > last_ensemble:
break
try:
raw_header, header = __nextLittleEndianUnsignedShort(pd0file)
except struct.error:
break
if (current_count > 0) & ((current_count % 100) == 0):
print('%d current ensembles read' % current_count)
if (wave_count > 0) & ((wave_count % 1000) == 0):
print('%d wave ensembles read' % wave_count)
print('wave Ensemble count = %d\n' % wave_count)
print('current Ensemble count = %d\n' % current_count)
currents_file.close()
packets_file.close()
pd0file.close()
# convenience function reused for header, length, and checksum
def __nextLittleEndianUnsignedShort(file):
"""
Get next little endian unsigned short from file
:param file: file object open for reading as binary
:return: a tuple of raw bytes and unpacked data
"""
raw = file.read(2)
# for python 3.5, struct.unpack('<H', raw)[0] needs to return a byte, not an int
return raw, struct.unpack('<H', raw)[0]
# factored for readability
def __computeChecksum(file_header, ensemble_length, ensemble):
"""
Compute a checksum
:param file_header: file header
:param ensemble_length: ensemble ensemble_length
:param ensemble: ensemble raw data
:return: checksum for ensemble
"""
cs = 0
for byte in file_header:
# since the for loop returns an int to byte, use as-is
# value = struct.unpack('B', byte)[0]
# cs += value
cs += byte
for byte in ensemble_length:
# value = struct.unpack('B', byte)[0]
# cs += value
cs += byte
for byte in ensemble:
# value = struct.unpack('B', byte)[0]
# cs += value
cs += byte
return cs & 0xffff
def __main():
print('%s running on python %s' % (sys.argv[0], sys.version))
if len(sys.argv) < 3:
print(__doc__)
return
try:
pd0file = sys.argv[1]
except:
print('error - pd0 input file name missing')
sys.exit(1)
try:
packets_file = sys.argv[2]
except:
print('error - packets output file name missing')
sys.exit(1)
try:
currents_file = sys.argv[3]
except:
print('error - current profile output file name missing')
sys.exit(1)
print('Splitting %s to %s and %s' % (pd0file, packets_file, currents_file))
try:
first_ensemble = sys.argv[4]
except:
first_ensemble = 1
try:
last_ensemble = sys.argv[5]
except:
last_ensemble = -1
split(pd0file, packets_file, currents_file, first_ensemble, last_ensemble)
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/TRDIstuff/pd0splitter.py | pd0splitter.py |
ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/TRDIstuff/__init__.py | __init__.py |
|
"""
TRDIpd0tonetcdf
===============
Convert full profile ADCP data ensembles currents to a netCDF4 file.
If you have a file with wave packets data, the splitter must be run first.
Usage:
python TRDIpd0tonetcdf.py pd0File cdfFile [good_ens] [serial_number="unknown"] [time_type="CF"] [delta_t=None]
:param str pd0File: is path of raw PD0 format input file with ensembles of full profiles
:param str cdfFile: is path of a netcdf4 EPIC compliant output file
:param tuple[int] good_ens: (start_ensemble, end_ensemble) ensemble range to convert, use -1 for all
:param str serial_number: instrument serial number
:param str time_type: specify "CF" for Climate and Forecast convention time (cfconventions.org) or "EPIC",
https://www.pmel.noaa.gov/epic/index.html
or for both use "CF_with_EPIC" or "EPIC_with_CF"
:param str delta_t: time between ensembles or ensemble groups
Reference:
RD Instruments data format documentation "Workhorse Commands and Output Data Format" June 2018
"""
# 10/4/2018 remove valid_range as it causes too many downstream problems
# 1/25/2017 MM got this running on old Workhorse ADCP data
import sys
import struct
import math
import numpy as np
# this line works in my local environment, fails in Travis
from netCDF4 import Dataset
import datetime as dt
from adcpy.EPICstuff.EPICmisc import cftime2EPICtime
from adcpy.EPICstuff.EPICmisc import ajd
def convert_pd0_to_netcdf(pd0File, cdfFile, good_ens, serial_number, time_type, delta_t):
"""
convert from binary pd0 format to netcdf
:param str pd0File: is path of raw PD0 format input file with current ensembles
:param str cdfFile: is path of a netcdf4 EPIC compliant output file
:param list good_ens: [start, end] ensembles to export. end = -1 for all ensembles in file
:param str serial_number: serial number of the instrument
:param str time_type: "CF" for CF conventions, "EPIC" for EPIC conventions
:param str delta_t: time between ensembles, in seconds. 15 min profiles would be 900
:return: count of ensembles read, ending index of netCDF file, error type if file could not be read
"""
# TODO figure out a better way to handle this situation
# need this check in case this function is used as a stand alone function
# this is necessary so that this function does not change the value
# in the calling function
ens2process = good_ens[:]
verbose = True # diagnostic, True = turn on output, False = silent
maxens, ens_len, ens_data, data_start_posn = analyzepd0file(pd0File, verbose)
infile = open(pd0File, 'rb')
infile.seek(data_start_posn)
if (ens2process[1] < 0) or ens2process[1] == np.inf:
ens2process[1] = maxens
# we are good to go, get the output file ready
print('Setting up netCDF file %s' % cdfFile)
cdf, cf_units = setup_netcdf_file(cdfFile, ens_data, ens2process, serial_number, time_type, delta_t)
# we want to save the time stamp from this ensemble since it is the
# time from which all other times in the file will be relative to
t0 = ens_data['VLeader']['dtobj']
netcdf_index = 0
ensemble_count = 0
verbose = False # diagnostic, True = turn on output, False = silent
nslantbeams = 4
# priming read - for the while loop
# note that ensemble lengths can change in the middle of the file!
# horribly inefficient, but here we go, one step backward, two forward...
bookmark = infile.tell() # save beginning of next ensemble
# need to read the header from the file to know the ensemble size
header = read_TRDI_header(infile)
if header['sourceID'] != b'\x7f':
print('non-currents ensemble found at %d' % bookmark)
if ens_len != header['nbytesperens']+2:
ens_len = header['nbytesperens']+2 # update to what we have
# go back to where this ensemble started before we checked the header
infile.seek(bookmark)
ens = infile.read(ens_len)
ens_error = None
while len(ens) > 0:
# print('-- ensemble %d length %g, file position %g' % (ensemble_count, len(ens), infile.tell()))
# print(ens_data['header'])
ens_data, ens_error = parse_TRDI_ensemble(ens, verbose)
if (ens_error is None) and (ensemble_count >= ens2process[0]):
# write to netCDF
if netcdf_index == 0:
print('--- first ensembles read at %s and TRDI #%d' % (
ens_data['VLeader']['timestr'], ens_data['VLeader']['Ensemble_Number']))
varobj = cdf.variables['Rec']
try:
varobj[netcdf_index] = ens_data['VLeader']['Ensemble_Number']
except:
# here we have reached the end of the netCDF file
cdf.close()
infile.close()
return
# time calculations done when vleader is read
if time_type == 'EPIC_with_CF':
varobj = cdf.variables['time']
varobj[netcdf_index] = ens_data['VLeader']['EPIC_time']
varobj = cdf.variables['time2']
varobj[netcdf_index] = ens_data['VLeader']['EPIC_time2']
varobj = cdf.variables['cf_time']
elapsed = ens_data['VLeader']['dtobj']-t0 # timedelta
elapsed_sec = elapsed.total_seconds()
varobj[netcdf_index] = elapsed_sec
elif time_type == 'CF_with_EPIC':
varobj = cdf.variables['time']
elapsed = ens_data['VLeader']['dtobj'] - t0 # timedelta
elapsed_sec = elapsed.total_seconds()
if elapsed_sec == 0:
print('elapsed seconds from ensemble {} is {}'.format(ensemble_count, elapsed_sec))
varobj[netcdf_index] = elapsed_sec
t1, t2 = cftime2EPICtime(elapsed_sec, cf_units)
varobj = cdf.variables['EPIC_time']
varobj[netcdf_index] = t1
varobj = cdf.variables['EPIC_time2']
varobj[netcdf_index] = t2
elif time_type == 'EPIC':
varobj = cdf.variables['time']
varobj[netcdf_index] = ens_data['VLeader']['EPIC_time']
varobj = cdf.variables['time2']
varobj[netcdf_index] = ens_data['VLeader']['EPIC_time2']
else: # only CF time, the default
varobj = cdf.variables['time']
elapsed = ens_data['VLeader']['dtobj']-t0 # timedelta
elapsed_sec = elapsed.total_seconds()
varobj[netcdf_index] = elapsed_sec
# diagnostic
if (ens2process[1]-ens2process[0]-1) < 100:
print('%d %15.8f %s' % (ens_data['VLeader']['Ensemble_Number'],
ens_data['VLeader']['julian_day_from_julian'],
ens_data['VLeader']['timestr']))
varobj = cdf.variables['sv']
varobj[netcdf_index] = ens_data['VLeader']['Speed_of_Sound']
for i in range(nslantbeams):
varname = "vel%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index, :] = ens_data['VData'][i, :]
for i in range(nslantbeams):
varname = "cor%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index, :] = ens_data['CData'][i, :]
for i in range(nslantbeams):
varname = "att%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index, :] = ens_data['IData'][i, :]
if 'GData' in ens_data:
for i in range(nslantbeams):
varname = "PGd%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index, :] = ens_data['GData'][i, :]
varobj = cdf.variables['Rec']
varobj[netcdf_index] = ens_data['VLeader']['Ensemble_Number']
varobj = cdf.variables['Hdg']
varobj[netcdf_index] = ens_data['VLeader']['Heading']
varobj = cdf.variables['Ptch']
varobj[netcdf_index] = ens_data['VLeader']['Pitch']
varobj = cdf.variables['Roll']
varobj[netcdf_index] = ens_data['VLeader']['Roll']
varobj = cdf.variables['HdgSTD']
varobj[netcdf_index] = ens_data['VLeader']['H/Hdg_Std_Dev']
varobj = cdf.variables['PtchSTD']
varobj[netcdf_index] = ens_data['VLeader']['P/Pitch_Std_Dev']
varobj = cdf.variables['RollSTD']
varobj[netcdf_index] = ens_data['VLeader']['R/Roll_Std_Dev']
varobj = cdf.variables['Tx']
varobj[netcdf_index] = ens_data['VLeader']['Temperature']
varobj = cdf.variables['S']
varobj[netcdf_index] = ens_data['VLeader']['Salinity']
varobj = cdf.variables['xmitc']
varobj[netcdf_index] = ens_data['VLeader']['Xmit_Current']
varobj = cdf.variables['xmitv']
varobj[netcdf_index] = ens_data['VLeader']['Xmit_Voltage']
varobj = cdf.variables['Ambient_Temp']
varobj[netcdf_index] = ens_data['VLeader']['Ambient_Temp']
varobj = cdf.variables['Pressure+']
varobj[netcdf_index] = ens_data['VLeader']['Pressure_(+)']
varobj = cdf.variables['Pressure-']
varobj[netcdf_index] = ens_data['VLeader']['Pressure_(-)']
varobj = cdf.variables['Attitude_Temp']
varobj[netcdf_index] = ens_data['VLeader']['Attitude_Temp']
varobj = cdf.variables['EWD1']
varobj[netcdf_index] = int(ens_data['VLeader']['Error_Status_Word_Low_16_bits_LSB'])
varobj = cdf.variables['EWD2']
varobj[netcdf_index] = int(ens_data['VLeader']['Error_Status_Word_Low_16_bits_MSB'])
varobj = cdf.variables['EWD3']
varobj[netcdf_index] = int(ens_data['VLeader']['Error_Status_Word_High_16_bits_LSB'])
varobj = cdf.variables['EWD4']
varobj[netcdf_index] = int(ens_data['VLeader']['Error_Status_Word_High_16_bits_MSB'])
if ens_data['FLeader']['Depth_sensor_available'] == 'Yes':
varobj = cdf.variables['Pressure']
varobj[netcdf_index] = ens_data['VLeader']['Pressure_deca-pascals']
varobj = cdf.variables['PressVar']
varobj[netcdf_index] = ens_data['VLeader']['Pressure_variance_deca-pascals']
# add bottom track data write to cdf here
if 'BTData' in ens_data:
if ens_data['BTData']['Mode'] == 0:
varobj = cdf.variables['BTRmin']
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_Min']
varobj = cdf.variables['BTRnear']
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_Near']
varobj = cdf.variables['BTRfar']
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_Far']
varnames = ('BTWe', 'BTWu', 'BTWv', 'BTWd')
for i in range(nslantbeams):
varname = "BTR%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['BT_Range'][i]
if ens_data['FLeader']['Coord_Transform'] == 'EARTH':
varobj = cdf.variables[varnames[i]]
else:
varname = "BTV%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['BT_Vel'][i]
varname = "BTc%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['BT_Corr'][i]
varname = "BTe%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['BT_Amp'][i]
varname = "BTp%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['BT_PGd'][i]
varname = "BTRSSI%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['RSSI_Amp'][i]
if ens_data['BTData']['Mode'] == 0:
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_Vel'][i]
varname = "BTRc%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_Corr'][i]
varname = "BTRi%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_Amp'][i]
varname = "BTRp%d" % (i+1)
varobj = cdf.variables[varname]
varobj[netcdf_index] = ens_data['BTData']['Ref_Layer_PGd'][i]
if 'VBeamVData' in ens_data:
if ens_data['VBeamLeader']['Vertical_Depth_Cells'] == ens_data['FLeader']['Number_of_Cells']:
varobj = cdf.variables['vel5']
varobj[netcdf_index, :] = ens_data['VBeamVData']
varobj = cdf.variables['cor5']
varobj[netcdf_index, :] = ens_data['VBeamCData']
varobj = cdf.variables['att5']
varobj[netcdf_index, :] = ens_data['VBeamIData']
if 'VBeamGData' in ens_data:
varobj = cdf.variables['PGd5']
varobj[netcdf_index, :] = ens_data['VBeamGData']
if 'WaveParams' in ens_data:
# we can get away with this because the key names and var names are the same
for key in ens_data['WaveParams']:
varobj = cdf.variables[key]
varobj[netcdf_index] = ens_data['WaveParams'][key]
if 'WaveSeaSwell' in ens_data:
# we can get away with this because the key names and var names are the same
for key in ens_data['WaveSeaSwell']:
varobj = cdf.variables[key]
varobj[netcdf_index] = ens_data['WaveSeaSwell'][key]
netcdf_index += 1
elif ens_error == 'no ID':
print('Stopping because ID tracking lost')
infile.close()
cdf.close()
sys.exit(1)
ensemble_count += 1
if ensemble_count > maxens:
print('stopping at estimated end of file ensemble %d' % ens2process[1])
break
n = 10000
ensf, ensi = math.modf(ensemble_count/n)
if ensf == 0:
print('%d ensembles read at %s and TRDI #%d' % (ensemble_count, ens_data['VLeader']['dtobj'],
ens_data['VLeader']['Ensemble_Number']))
if ensemble_count >= ens2process[1]-1:
print('stopping at requested ensemble %d' % ens2process[1])
break
# note that ensemble lengths can change in the middle of the file!
# TODO - is there a faster way to do this??
bookmark = infile.tell() # save beginning of next ensemble
# TODO - since we are jumping around, we should check here to see
# how close to the end of the file we are - if it is within one
# header length - we are done
# need to read the header from the file to know the ensemble size
header = read_TRDI_header(infile)
if header is None:
# we presume this is the end of the file, since we don't have header info
print('end of file reached with incomplete header')
break
if header['sourceID'] != b'\x7f':
print('non-currents ensemble found at %d' % bookmark)
if ens_len != header['nbytesperens']+2:
ens_len = header['nbytesperens']+2 # update to what we have
# TODO - fix this so that we aren't going back and forth, it is really slow
# go back to where this ensemble started before we checked the header
infile.seek(bookmark)
ens = infile.read(ens_len)
else: # while len(ens) > 0:
print('end of file reached')
if ensemble_count < maxens:
print('end of file reached after %d ensembles, less than estimated in the file' % ensemble_count)
elif ensemble_count > maxens:
print('end of file reached after %d ensembles, more than estimated in the file' % ensemble_count)
infile.close()
cdf.close()
print('%d ensembles read, %d records written' % (ensemble_count, netcdf_index))
return ensemble_count, netcdf_index, ens_error
# TODO this is not used - consider removing
def transpose_rotation_matrix(matrix):
"""
transpose the rotation matrix
:param matrix: rotation matrix from file
:return: transposed matrix
"""
if not matrix:
return []
return [[row[i] for row in matrix] for i in range(len(matrix[0]))]
def write_dict_to_cdf_attributes(netcdf_object, d, tag):
"""
write a dictionary to netCDF attributes
:param netcdf_object: netcdf file object
:param dict d: dictionary of attribute names and values
:param str tag: an identifier to prepend to the attribute name
:return: the dictionary d with any strings that can be changed to numbers, as numbers
"""
i = 0
# first, convert as many of the values in d to numbers as we can
for key in iter(d):
if type(d[key]) == str:
try:
d[key] = float(d[key])
except ValueError:
# we really don't need to print here,
# but python insists we do something
# print(' can\'t convert %s to float' % key)
i += 1
for key in iter(d):
newkey = tag + key
try:
netcdf_object.setncattr(newkey, d[key])
except:
print('can\'t set %s attribute' % key)
return d
def parse_TRDI_ensemble(ensbytes, verbose):
"""
convert the binary data for one ensemble to a dictionary of readable data
:param binary ensbytes: the raw binary data for the ensemble
:param verbose: print out the data as it is converted
:return: a dictionary of the data, a string describing any errors
"""
ens_data = {}
ens_error = None
ens_data['Header'] = parse_TRDI_header(ensbytes)
for i in range(ens_data['Header']['ndatatypes']):
# go to each offset and parse depending on what we find
offset = ens_data['Header']['offsets'][i]
# raw, val = __parseTRDIushort(ensbytes, offset)
val = struct.unpack('<H', ensbytes[offset:offset+2])[0]
if val == 0: # \x00\x00
if verbose:
print('Fixed Leader found at %g' % offset)
ens_data['FLeader'] = parse_TRDI_fixed_leader(ensbytes, offset)
# we need this to decode the other data records
ncells = int(ens_data['FLeader']['Number_of_Cells'])
nbeams = 4 # the 5th beam has it's own record
elif val == 128: # \x80\x00
if verbose:
print('Variable Leader found at %g' % offset)
ens_data['VLeader'] = parse_TRDI_variable_leader(ensbytes, offset)
# print(VLeader)
elif val == 256: # raw == b'\x00\x01': 256
if verbose:
print('Velocity found at %g' % offset)
ens_data['VData'] = parse_TRDI_velocity(ensbytes, offset, ncells, nbeams)
elif val == 512: # raw == b'\x00\x02':
if verbose:
print('Correlation found at %g' % offset)
ens_data['CData'] = parse_TRDI_correlation(ensbytes, offset, ncells, nbeams)
elif val == 768: # raw == b'\x00\x03':
if verbose:
print('Intensity found at %g' % offset)
ens_data['IData'] = parse_TRDI_intensity(ensbytes, offset, ncells, nbeams)
elif val == 1024: # raw == b'\x00\x04':
if verbose:
print('PGood found at %g' % offset)
ens_data['GData'] = parse_TRDI_percent_good(ensbytes, offset, ncells, nbeams)
elif val == 1280: # raw == b'\x00\x05':
if verbose:
print('Status profile found at %g' % offset)
elif val == 1536: # raw == b'\x00\x06':
if verbose:
print('BT found at %g' % offset)
ens_data['BTData'] = parse_TRDI_bottom_track(ensbytes, offset, nbeams)
elif val == 1792: # raw == b'\x00\x07':
# this not defined in TRDI docs
pass
elif val == 2048: # raw == b'\x00\x08':
if verbose:
print('MicroCAT data found at %g' % offset)
elif val == 12800: # raw == b'\x00\x32': #12800
if verbose:
print('Instrument transformation found at %g' % offset)
ens_data['XformMatrix'] = parse_TRDI_transformation_matrix(ensbytes, offset, nbeams)
elif val == 28672: # raw == b'\x00\x70':
if verbose:
print('V Series system config found at %g' % offset)
ens_data['VSysConfig'] = parse_TRDI_vertical_system_configuration(ensbytes, offset)
elif val == 28673: # raw == b'\x01\x70':
if verbose:
print('V Series ping setup found at %g' % offset)
ens_data['VPingSetup'] = parse_TRDI_vertical_ping_setup(ensbytes, offset)
elif val == 28674: # raw == b'\x02\x70':
if verbose:
print('V Series ADC Data found at %g' % offset)
# currently not defined well in TRDI docs
elif val == 28675: # raw == b'\x03\x70':
if verbose:
print('V Series System Configuration Data found at %g' % offset)
# currently not defined well in TRDI docs
elif val == 3841: # raw == b'\x01\x0f':
if verbose:
print('Vertical Beam Leader Data found at %g' % offset)
ens_data['VBeamLeader'] = parse_TRDI_vertical_beam_leader(ensbytes, offset)
elif val == 2560: # raw == b'\x00\x0a':
if verbose:
print('Vertical Beam Velocity Data found at %g' % offset)
ens_data['VBeamVData'] = parse_TRDI_vertical_velocity(ensbytes, offset,
ens_data['VBeamLeader']['Vertical_Depth_Cells'])
elif val == 2816: # raw == b'\x00\x0b':
if verbose:
print('Vertical Beam Correlation Data found at %g' % offset)
ens_data['VBeamCData'] = parse_TRDI_vertical_correlation(ensbytes, offset,
ens_data['VBeamLeader']['Vertical_Depth_Cells'])
elif val == 3072: # raw == b'\x00\x0c':
if verbose:
print('Vertical Beam Amplitude Data found at %g' % offset)
ens_data['VBeamIData'] = parse_TRDI_vertical_intensity(ensbytes, offset,
ens_data['VBeamLeader']['Vertical_Depth_Cells'])
elif val == 3328: # raw == b'\x00\x0d':
if verbose:
print('Vertical Beam Percent Good Data found at %g' % offset)
ens_data['VBeamGData'] = parse_TRDI_vertical_percent_good(ensbytes, offset,
ens_data['VBeamLeader']['Vertical_Depth_Cells'])
elif val == 28676: # raw == b'\x40\x70':
if verbose:
print('V Series Event Log Data found at %g' % offset)
elif val == 11: # raw == b'\x0b\x00':
if verbose:
print('Wavesmon 4 Wave Parameters found at %g' % offset)
ens_data['WaveParams'] = parse_TRDI_wave_parameters(ensbytes, offset)
elif val == 12: # raw == b'\x0c\x00':
if verbose:
print('Wavesmon 4 Sea and Swell found at %g' % offset)
ens_data['WaveSeaSwell'] = parse_TRDI_wave_sea_swell(ensbytes, offset)
else:
print('ID %d unrecognized at %g' % (val, offset))
ens_error = 'no ID'
csum = __computeChecksum(ensbytes)
if csum != (ensbytes[-2]+(ensbytes[-1] << 8)):
ens_error = 'checksum failure'
return ens_data, ens_error
def setup_netcdf_file(fname, ens_data, gens, serial_number, time_type, delta_t):
"""
create the netcdf output file, define dimensions and variables
:param str fname: path and name of netcdf file
:param dict ens_data: data from the first ensemble to be read
:param tuple gens: start and end ensemble indices
:param str serial_number: instrument serial number
:param str time_type: indicate if "CF", "CF_with_EPIC", "EPIC_with_CF" or "EPIC" timebase for "time"
:param str delta_t: time between ensembles
:return: netcdf file object, string describing the time units for CF time
"""
# note that
# f4 = 4 byte, 32 bit float
# maxfloat = 3.402823*10**38;
# where the variable is based ona single dimension, usually time, it is still expressed as a tuple ("time") and
# needs to be kept that way, even though pylint complains
intfill = -32768
floatfill = 1E35
# is it possible for delta_t to be none or an int. Deal with that here
if delta_t is None:
delta_t = "none"
if isinstance(delta_t, int):
delta_t = str(delta_t)
nens = gens[1]-gens[0]-1
print('creating netCDF file %s with %d records' % (fname, nens))
cdf = Dataset(fname, "w", clobber=True, format="NETCDF4")
# dimensions, in EPIC order
cdf.createDimension('time', nens)
cdf.createDimension('depth', ens_data['FLeader']['Number_of_Cells'])
cdf.createDimension('lat', 1)
cdf.createDimension('lon', 1)
# write global attributes
cdf.history = "translated to netCDF by TRDIpd0tonetcdf.py"
cdf.sensor_type = "TRDI"
cdf.serial_number = serial_number
cdf.DELTA_T = delta_t
cdf.sample_rate = ens_data['FLeader']['Time_Between_Ping Groups']
write_dict_to_cdf_attributes(cdf, ens_data['FLeader'], "TRDI_")
varobj = cdf.createVariable('Rec', 'u4', 'time', fill_value=intfill)
varobj.units = "count"
varobj.long_name = "Ensemble Number"
# the ensemble number is a two byte LSB and a one byte MSB (for the rollover)
# varobj.valid_range = [0, 2**23]
# it's not yet clear which way to go with this. python tools like xarray
# and panoply demand that time be a CF defined time.
# USGS CMG MATLAB tools need time and time2
# TODO - CF_time can come out as YYYY-M-D for dates with single digit months and days, check to see if this is ISO
# and fix if it is not. This is a better way:
# d = datetime.datetime(2010, 7, 4, 12, 15, 58)
# '{:%Y-%m-%d %H:%M:%S}'.format(d)
if time_type == 'EPIC_with_CF':
# we include time and time2 for EPIC compliance
varobj = cdf.createVariable('time', 'u4', ('time',))
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj = cdf.createVariable('time2', 'u4', ('time',))
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
cf_units = ""
# we include cf_time for cf compliance and use by python packages like xarray
# if f8, 64 bit is not used, time is clipped
# for ADCP fast sampled, single ping data, need millisecond resolution
varobj = cdf.createVariable('cf_time', 'f8', 'time')
# for cf convention, always assume UTC for now, and use the UNIX Epoch as the reference
varobj.units = "seconds since %d-%d-%d %d:%d:%f 0:00" % (ens_data['VLeader']['Year'],
ens_data['VLeader']['Month'],
ens_data['VLeader']['Day'],
ens_data['VLeader']['Hour'],
ens_data['VLeader']['Minute'],
ens_data['VLeader']['Second'] +
ens_data['VLeader']['Hundredths'] / 100)
varobj.standard_name = "time"
varobj.axis = "T"
elif time_type == "CF_with_EPIC":
# cf_time for cf compliance and use by python packages like xarray
# if f8, 64 bit is not used, time is clipped
# for ADCP fast sampled, single ping data, need millisecond resolution
varobj = cdf.createVariable('time', 'f8', ('time',))
# for cf convention, always assume UTC for now, and use the UNIX Epoch as the reference
varobj.units = "seconds since %d-%d-%d %d:%d:%f 0:00" % (ens_data['VLeader']['Year'],
ens_data['VLeader']['Month'],
ens_data['VLeader']['Day'],
ens_data['VLeader']['Hour'],
ens_data['VLeader']['Minute'],
ens_data['VLeader']['Second'] +
ens_data['VLeader']['Hundredths'] / 100)
cf_units = "seconds since %d-%d-%d %d:%d:%f 0:00" % (ens_data['VLeader']['Year'], ens_data['VLeader']['Month'],
ens_data['VLeader']['Day'], ens_data['VLeader']['Hour'],
ens_data['VLeader']['Minute'],
ens_data['VLeader']['Second']
+ ens_data['VLeader']['Hundredths'] / 100)
varobj.standard_name = "time"
varobj.axis = "T"
varobj.type = "UNEVEN"
# we include time and time2 for EPIC compliance
# this statement resulted in a fill value of -1??
# varobj = cdf.createVariable('EPIC_time','u4',('time',))
varobj = cdf.createVariable('EPIC_time', 'u4', ('time',), fill_value=False)
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
# this statement resulted in a fill value of -1??
# varobj = cdf.createVariable('EPIC_time2','u4',('time',))
varobj = cdf.createVariable('EPIC_time2', 'u4', ('time',), fill_value=False)
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
elif time_type == "EPIC":
varobj = cdf.createVariable('time', 'u4', ('time',))
varobj.units = "True Julian Day"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
varobj = cdf.createVariable('time2', 'u4', ('time',))
varobj.units = "msec since 0:00 GMT"
varobj.epic_code = 624
varobj.datum = "Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968"
varobj.NOTE = "Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )"
cf_units = ""
else: # only CF time
# this is best for use by python packages like xarray
# if f8, 64 bit is not used, time is clipped
# for ADCP fast sampled, single ping data, need millisecond resolution
varobj = cdf.createVariable('time', 'f8', ('time',))
# for cf convention, always assume UTC for now, and use the UNIX Epoch as the reference
varobj.units = "seconds since %d-%d-%d %d:%d:%f 0:00" % (ens_data['VLeader']['Year'],
ens_data['VLeader']['Month'],
ens_data['VLeader']['Day'],
ens_data['VLeader']['Hour'],
ens_data['VLeader']['Minute'],
ens_data['VLeader']['Second'] +
ens_data['VLeader']['Hundredths'] / 100)
cf_units = "seconds since %d-%d-%d %d:%d:%f 0:00" % (ens_data['VLeader']['Year'], ens_data['VLeader']['Month'],
ens_data['VLeader']['Day'], ens_data['VLeader']['Hour'],
ens_data['VLeader']['Minute'],
ens_data['VLeader']['Second']
+ ens_data['VLeader']['Hundredths'] / 100)
varobj.standard_name = "time"
varobj.axis = "T"
varobj.type = "UNEVEN"
varobj = cdf.createVariable('bindist', 'f4', ('depth',), fill_value=floatfill)
# note name is one of the netcdf4 reserved attributes, use setncattr
varobj.setncattr('name', "bindist")
varobj.units = "m"
varobj.long_name = "bin distance from instrument for slant beams"
varobj.epic_code = 0
# varobj.valid_range = [0 0]
varobj.NOTE = "distance is calculated from center of bin 1 and bin size"
bindist = []
for idx in range(ens_data['FLeader']['Number_of_Cells']):
bindist.append(idx * (ens_data['FLeader']['Depth_Cell_Length_cm'] / 100) +
ens_data['FLeader']['Bin_1_distance_cm'] / 100)
varobj[:] = bindist[:]
varobj = cdf.createVariable('depth', 'f4', ('depth',)) # no fill for ordinates
varobj.units = "m"
varobj.long_name = "distance from transducer, depth placeholder"
varobj.center_first_bin_m = ens_data['FLeader']['Bin_1_distance_cm'] / 100
varobj.blanking_distance_m = ens_data['FLeader']['Blank_after_Transmit_cm'] / 100
varobj.bin_size_m = ens_data['FLeader']['Depth_Cell_Length_cm'] / 100
varobj.bin_count = ens_data['FLeader']['Number_of_Cells']
varobj[:] = bindist[:]
varobj = cdf.createVariable('sv', 'f4', ('time',), fill_value=floatfill)
varobj.units = "m s-1"
varobj.long_name = "sound velocity (m s-1)"
# varobj.valid_range = [1400, 1600]
for i in range(4):
varname = "vel%d" % (i+1)
varobj = cdf.createVariable(varname, 'f4', ('time', 'depth'), fill_value=floatfill)
varobj.units = "mm s-1"
varobj.long_name = "Beam %d velocity (mm s-1)" % (i+1)
varobj.epic_code = 1277+i
# varobj.valid_range = [-32767, 32767]
for i in range(4):
varname = "cor%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "Beam %d correlation" % (i+1)
varobj.epic_code = 1285+i
# varobj.valid_range = [0, 255]
for i in range(4):
varname = "att%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "counts"
varobj.epic_code = 1281+i
varobj.long_name = "ADCP attenuation of beam %d" % (i+1)
# varobj.valid_range = [0, 255]
if 'GData' in ens_data:
for i in range(4):
varname = "PGd%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "Percent Good Beam %d" % (i+1)
varobj.epic_code = 1241+i
# varobj.valid_range = [0, 100]
varobj = cdf.createVariable('Hdg', 'f4', ('time',), fill_value=floatfill)
varobj.units = "hundredths of degrees"
varobj.long_name = "INST Heading"
varobj.epic_code = 1215
varobj.heading_alignment = ens_data['FLeader']['Heading_Alignment_Hundredths_of_Deg']
varobj.heading_bias = ens_data['FLeader']['Heading_Bias_Hundredths_of_Deg']
# varobj.valid_range = [0, 36000]
if ens_data['FLeader']['Heading_Bias_Hundredths_of_Deg'] == 0:
varobj.NOTE_9 = "no heading bias was applied by EB during deployment or by wavesmon"
else:
varobj.NOTE_9 = "a heading bias was applied by EB during deployment or by wavesmon"
varobj = cdf.createVariable('Ptch', 'f4', ('time',), fill_value=floatfill)
varobj.units = "hundredths of degrees"
varobj.long_name = "INST Pitch"
varobj.epic_code = 1216
# varobj.valid_range = [-18000, 18000] # physical limit, not sensor limit
varobj = cdf.createVariable('Roll', 'f4', ('time',), fill_value=floatfill)
varobj.units = "hundredths of degrees"
varobj.long_name = "INST Roll"
varobj.epic_code = 1217
# varobj.valid_range = [-18000, 18000] # physical limit, not sensor limit
varobj = cdf.createVariable('HdgSTD', 'f4', ('time',), fill_value=floatfill)
varobj.units = "degrees"
varobj.long_name = "Heading Standard Deviation"
varobj = cdf.createVariable('PtchSTD', 'f4', ('time',), fill_value=floatfill)
varobj.units = "tenths of degrees"
varobj.long_name = "Pitch Standard Deviation"
varobj = cdf.createVariable('RollSTD', 'f4', ('time',), fill_value=floatfill)
varobj.units = "tenths of degrees"
varobj.long_name = "Roll Standard Deviation"
varobj = cdf.createVariable('Tx', 'f4', ('time',), fill_value=floatfill)
varobj.units = "hundredths of degrees"
varobj.long_name = "ADCP Transducer Temperature"
varobj.epic_code = 3017
# varobj.valid_range = [-500, 4000]
varobj = cdf.createVariable('S', 'f4', ('time',), fill_value=floatfill)
varobj.units = "PPT"
varobj.long_name = "SALINITY (PPT)"
varobj.epic_code = 40
# varobj.valid_range = [0, 40]
varobj = cdf.createVariable('xmitc', 'f4', ('time',), fill_value=floatfill)
varobj.units = "amps"
varobj.long_name = "transmit current"
varobj = cdf.createVariable('xmitv', 'f4', ('time',), fill_value=floatfill)
varobj.units = "volts"
varobj.long_name = "transmit voltage"
varobj = cdf.createVariable('Ambient_Temp', 'i2', ('time',), fill_value=intfill)
varobj.units = "C"
varobj.long_name = "Ambient_Temp"
varobj = cdf.createVariable('Pressure+', 'i2', ('time',), fill_value=intfill)
varobj.units = "unknown"
varobj.long_name = "Pressure+"
varobj = cdf.createVariable('Pressure-', 'i2', ('time',), fill_value=intfill)
varobj.units = "unknown"
varobj.long_name = "Pressure-"
varobj = cdf.createVariable('Attitude_Temp', 'i2', ('time',), fill_value=intfill)
varobj.units = "C"
varobj.long_name = "Attitude_Temp"
for i in range(4):
varname = "EWD%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "binary flag"
varobj.long_name = "Error Status Word %d" % (i+1)
if ens_data['FLeader']['Depth_sensor_available'] == 'Yes':
varobj = cdf.createVariable('Pressure', 'f4', ('time',), fill_value=floatfill)
varobj.units = "deca-pascals"
varobj.long_name = "ADCP Transducer Pressure"
varobj.epic_code = 4
varobj = cdf.createVariable('PressVar', 'f4', ('time',), fill_value=floatfill)
varobj.units = "deca-pascals"
varobj.long_name = "ADCP Transducer Pressure Variance"
if 'BTData' in ens_data:
# write globals attributable to BT setup
cdf.setncattr('TRDI_BT_pings_per_ensemble', ens_data['BTData']['Pings_per_ensemble'])
cdf.setncattr('TRDI_BT_reacquire_delay', ens_data['BTData']['delay_before_reacquire'])
cdf.setncattr('TRDI_BT_min_corr_mag', ens_data['BTData']['Corr_Mag_Min'])
cdf.setncattr('TRDI_BT_min_eval_mag', ens_data['BTData']['Eval_Amp_Min'])
cdf.setncattr('TRDI_BT_min_percent_good', ens_data['BTData']['PGd_Minimum'])
cdf.setncattr('TRDI_BT_mode', ens_data['BTData']['Mode'])
cdf.setncattr('TRDI_BT_max_err_vel', ens_data['BTData']['Err_Vel_Max'])
# cdf.setncattr('TRDI_BT_max_tracking_depth',ens_data['BTData'][''])
# cdf.setncattr('TRDI_BT_shallow_water_gain',ens_data['BTData'][''])
for i in range(4):
varname = "BTR%d" % (i+1)
varobj = cdf.createVariable(varname, 'u8', ('time',), fill_value=intfill)
varobj.units = "cm"
varobj.long_name = "BT Range %d" % (i+1)
for i in range(4):
varnames = ('BTWe', 'BTWu', 'BTWv', 'BTWd')
longnames = ('BT Error Velocity', 'BT Eastward Velocity', 'BT Northward Velocity', 'BT Vertical Velocity')
if ens_data['FLeader']['Coord_Transform'] == 'EARTH':
varobj = cdf.createVariable(varnames[i+1], 'i2', ('time',), fill_value=intfill)
varobj.units = "mm s-1"
varobj.long_name = "%s, mm s-1" % longnames[i+1]
else:
varname = "BTV%d" % (i+1)
varobj = cdf.createVariable(varname, 'i2', ('time',), fill_value=intfill)
varobj.units = "mm s-1"
varobj.long_name = "BT velocity, mm s-1 %d" % (i+1)
for i in range(4):
varname = "BTc%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "BT correlation %d" % (i+1)
for i in range(4):
varname = "BTe%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "BT evaluation amplitude %d" % (i+1)
for i in range(4):
varname = "BTp%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "percent"
varobj.long_name = "BT percent good %d" % (i+1)
# varobj.valid_range = [0, 100]
for i in range(4):
varname = "BTRSSI%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "BT Receiver Signal Strength Indicator %d" % (i+1)
if ens_data['BTData']['Mode'] == 0: # water reference layer was used
varobj = cdf.createVariable('BTRmin', 'f4', ('time',), fill_value=floatfill)
varobj.units = 'dm'
varobj.long_name = "BT Ref. min"
varobj = cdf.createVariable('BTRnear', 'f4', ('time',), fill_value=floatfill)
varobj.units = 'dm'
varobj.long_name = "BT Ref. near"
varobj = cdf.createVariable('BTRfar', 'f4', ('time',), fill_value=floatfill)
varobj.units = 'dm'
varobj.long_name = "BT Ref. far"
for i in range(4):
varname = "BTRv%d" % (i+1)
varobj = cdf.createVariable(varname, 'i2', ('time',), fill_value=intfill)
varobj.units = "mm s-1"
varobj.long_name = "BT Ref. velocity, mm s-1 %d" % (i+1)
for i in range(4):
varname = "BTRc%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "BT Ref. correlation %d" % (i+1)
for i in range(4):
varname = "BTRi%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "BT Ref. intensity %d" % (i+1)
for i in range(4):
varname = "BTRp%d" % (i+1)
varobj = cdf.createVariable(varname, 'u2', ('time',), fill_value=intfill)
varobj.units = "percent"
varobj.long_name = "BT Ref. percent good %d" % (i+1)
varobj.epic_code = 1269+i
if 'VPingSetup' in ens_data:
write_dict_to_cdf_attributes(cdf, ens_data['VPingSetup'], "TRDI_VBeam_")
if 'VBeamLeader' in ens_data:
write_dict_to_cdf_attributes(cdf, ens_data['VBeamLeader'], "TRDI_VBeam_")
if 'VBeamVData' in ens_data:
if ens_data['VBeamLeader']['Vertical_Depth_Cells'] == ens_data['FLeader']['Number_of_Cells']:
varobj = cdf.createVariable("vel5", 'f4', ('time', 'depth'), fill_value=floatfill)
varobj.units = "mm s-1"
varobj.long_name = "Beam 5 velocity (mm s-1)"
varobj = cdf.createVariable("cor5", 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "Beam 5 correlation"
varobj = cdf.createVariable("att5", 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "ADCP attenuation of beam 5"
if 'VBeamGData' in ens_data:
varobj = cdf.createVariable("PGd5", 'u2', ('time', 'depth'), fill_value=intfill)
varobj.units = "counts"
varobj.long_name = "Percent Good Beam 5"
else:
cdf.TRDI_VBeam_note1 = 'Vertical beam data found without Percent Good'
else:
print("Vertical beam data found with different number of cells.")
cdf.TRDI_VBeam_note = "Vertical beam data found with different number of cells. " + \
"Vertical beam data not exported to netCDF"
print("Vertical beam data not exported to netCDF")
if 'WaveParams' in ens_data:
# no units given for any of these in the TRDI docs
varobj = cdf.createVariable("Hs", 'f4', ('time',), fill_value=floatfill)
varobj.units = "m"
varobj.long_name = "Significant Wave Height (m)"
varobj = cdf.createVariable("Tp", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Peak Wave Period (s)"
varobj = cdf.createVariable("Dp", 'f4', ('time',), fill_value=floatfill)
varobj.units = "Deg."
varobj.long_name = "Peak Wave Direction (Deg.)"
varobj = cdf.createVariable("Dm", 'f4', ('time',), fill_value=floatfill)
varobj.units = "Deg."
varobj.long_name = "Mea Peak Wave Direction (Deg.)"
varobj = cdf.createVariable("SHmax", 'f4', ('time',), fill_value=floatfill)
varobj.units = "m"
varobj.long_name = "Maximum Wave Height (m)"
varobj.note = "from zero crossing analysis of surface track time series"
varobj = cdf.createVariable("SH13", 'f4', ('time',), fill_value=floatfill)
varobj.units = "m"
varobj.long_name = "Significant Wave Height of the largest 1/3 of the waves (m)"
varobj.note = "in the field from zero crossing anaylsis of surface track time series"
varobj = cdf.createVariable("SH10", 'f4', ('time',), fill_value=floatfill)
varobj.units = "m"
varobj.long_name = "Significant Wave Height of the largest 1/10 of the waves (m)"
varobj.note = "in the field from zero crossing anaylsis of surface track time series"
varobj = cdf.createVariable("STmax", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Maximum Peak Wave Period (s)"
varobj.note = "from zero crossing analysis of surface track time series"
varobj = cdf.createVariable("ST13", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Period associated with the peak wave height of the largest 1/3 of the waves (s)"
varobj.note = "in the field from zero crossing analysis of surface track time series"
varobj = cdf.createVariable("ST10", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Period associated with the peak wave height of the largest 1/10 of the waves (s)"
varobj.note = "in the field from zero crossing anaylsis of surface track time series"
varobj = cdf.createVariable("T01", 'f4', ('time',), fill_value=floatfill)
varobj.units = " "
varobj = cdf.createVariable("Tz", 'f4', ('time',), fill_value=floatfill)
varobj.units = " "
varobj = cdf.createVariable("Tinv1", 'f4', ('time',), fill_value=floatfill)
varobj.units = " "
varobj = cdf.createVariable("S0", 'f4', ('time',), fill_value=floatfill)
varobj.units = " "
varobj = cdf.createVariable("Source", 'f4', ('time',), fill_value=floatfill)
varobj.units = " "
if 'WaveSeaSwell' in ens_data:
# no units given for any of these in the TRDI docs
varobj = cdf.createVariable("HsSea", 'f4', ('time',), fill_value=floatfill)
varobj.units = "m"
varobj.long_name = "Significant Wave Height (m)"
varobj.note = "in the sea region of the power spectrum"
varobj = cdf.createVariable("HsSwell", 'f4', ('time',), fill_value=floatfill)
varobj.units = "m"
varobj.long_name = "Significant Wave Height (m)"
varobj.note = "in the swell region of the power spectrum"
varobj = cdf.createVariable("TpSea", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Peak Wave Period (s)"
varobj.note = "in the sea region of the power spectrum"
varobj = cdf.createVariable("TpSwell", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Peak Wave Period (s)"
varobj.note = "in the swell region of the power spectrum"
varobj = cdf.createVariable("DpSea", 'f4', ('time',), fill_value=floatfill)
varobj.units = "Deg."
varobj.long_name = "Peak Wave Direction (Deg.)"
varobj.note = "in the sea region of the power spectrum"
varobj = cdf.createVariable("DpSwell", 'f4', ('time',), fill_value=floatfill)
varobj.units = "Deg."
varobj.long_name = "Peak Wave Direction (Deg.)"
varobj.note = "in the swell region of the power spectrum"
varobj = cdf.createVariable("SeaSwellPeriod", 'f4', ('time',), fill_value=floatfill)
varobj.units = "s"
varobj.long_name = "Transition Period between Sea and Swell (s)"
return cdf, cf_units
def bitstrLE(byte):
"""
make a bit string from little endian byte
:param byte byte: a byte
:return: a string of ones and zeros, the bits in the byte
"""
# surely there's a better way to do this!!
bits = ""
for i in [7, 6, 5, 4, 3, 2, 1, 0]: # Little Endian
if (byte >> i) & 1:
bits += "1"
else:
bits += "0"
return bits
def bitstrBE(byte):
"""
make a bit string from big endian byte
:param byte byte: a byte
:return: a string of ones and zeros, the bbits in the byte
"""
# surely there's a better way to do this!!
bits = ""
for i in range(8): # Big Endian
if (byte[0] >> i) & 1:
bits += "1"
else:
bits += "0"
return bits
def read_TRDI_header(infile):
"""
read the TRDI header bytes directly from a file pointer position and test for end of file
:param infile: pointer to a file open for reading
:return: a dictionary of the TRDI Header data
"""
header_data = {}
try:
header_data['headerID'] = infile.read(1)
except:
return None
try:
header_data['sourceID'] = infile.read(1)
except:
return None
try:
header_data['nbytesperens'] = struct.unpack('<H', infile.read(2))[0]
except:
return None
infile.read(1) # spare, skip it
header_data['ndatatypes'] = infile.read(1)[0] # remember, bytes objects are arrays
offsets = [0]*header_data['ndatatypes'] # predefine a list of ints to fill
for i in range(header_data['ndatatypes']):
offsets[i] = struct.unpack('<H', infile.read(2))[0]
header_data['offsets'] = offsets
return header_data
def parse_TRDI_header(bstream):
"""
parse the TRDI header data for the number of data types and byte offsets to each
:param bytes bstream: the raw binary header information
:return: dictionary of readable header data
"""
header_data = {
'headerID': bstream[0], # byte 1
'sourceID': bstream[1], # byte 2
'nbytesperens': struct.unpack('<H', bstream[2:4])[0],
# spare, skip it, byte 5
'ndatatypes': bstream[5] # byte 6
}
offsets = [0]*header_data['ndatatypes'] # predefine a list of ints to fill
for i in range(header_data['ndatatypes']):
offsets[i] = struct.unpack('<H', bstream[6+i*2:6+i*2+2])[0]
header_data['offsets'] = offsets
return header_data
def parse_TRDI_fixed_leader(bstream, offset):
"""
parse the Fixed Leader section of data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: dictionary of readable fixed leader data
"""
f_leader_data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 0:
print("expected fixed leader ID, instead found %g", leader_id)
return -1
f_leader_data['CPU_Version'] = "%s.%s" % (bstream[offset+2], bstream[offset+4])
f_leader_data['System_Configuration_LSB'] = bitstrLE(bstream[offset+4])
# anyone who has a better way to convert these bits, please tell me!
f_leader_data['System_Frequency'] = int(f_leader_data['System_Configuration_LSB'][5:8], 2)
sys_freqs = (75, 150, 300, 600, 1200, 2400)
f_leader_data['System_Frequency'] = sys_freqs[f_leader_data['System_Frequency']]
if f_leader_data['System_Configuration_LSB'][4] == "1":
f_leader_data['Beam_Pattern'] = 'Convex'
else:
f_leader_data['Beam_Pattern'] = 'Concave'
f_leader_data['Sensor_Configuration'] = int(f_leader_data['System_Configuration_LSB'][2:4], 2) + 1
if f_leader_data['System_Configuration_LSB'][1] == "1":
f_leader_data['Transducer_Head_Is_Attached'] = 'Yes'
else:
f_leader_data['Transducer_Head_Is_Attached'] = 'No'
if f_leader_data['System_Configuration_LSB'][0] == "1":
f_leader_data['Orientation'] = 'Up-facing beams'
else:
f_leader_data['Orientation'] = 'Down-facing beams'
f_leader_data['System_Configuration_MSB'] = bitstrLE(bstream[offset+5])
f_leader_data['Beam_Angle'] = int(f_leader_data['System_Configuration_MSB'][5:8], 2)
# the angles 15, 20, and 30 are used by the Workhorse
# the angle 25 is used by the Sentinel V, and so far, is always 25
angles = (15, 20, 30, 0, 0, 0, 0, 25)
f_leader_data['Beam_Angle'] = angles[f_leader_data['Beam_Angle']]
f_leader_data['Beam_Configuration'] = int(f_leader_data['System_Configuration_MSB'][0:4], 2)
if f_leader_data['Beam_Configuration'] == 4:
f_leader_data['Beam_Configuration'] = '4-bm janus'
elif f_leader_data['Beam_Configuration'] == 5:
f_leader_data['Beam_Configuration'] = '5-bm janus cfig demod'
elif f_leader_data['Beam_Configuration'] == 15:
f_leader_data['Beam_Configuration'] = '5-bm janus cfig (2 demod)'
else:
f_leader_data['Beam_Configuration'] = 'unknown'
f_leader_data['Simulated_Data'] = bstream[offset+6]
f_leader_data['Lag_Length'] = bstream[offset+7]
f_leader_data['Number_of_Beams'] = bstream[offset+8]
f_leader_data['Number_of_Cells'] = bstream[offset+9]
f_leader_data['Pings_Per_Ensemble'] = struct.unpack('<h', bstream[offset+10:offset+12])[0]
f_leader_data['Depth_Cell_Length_cm'] = struct.unpack('<h', bstream[offset+12:offset+14])[0]
f_leader_data['Blank_after_Transmit_cm'] = struct.unpack('<h', bstream[offset+14:offset+16])[0]
f_leader_data['Signal_Processing_Mode'] = bstream[offset+16]
f_leader_data['Low_Corr_Threshold'] = bstream[offset+17]
f_leader_data['No._Code_Reps'] = bstream[offset+18]
f_leader_data['PGd_Minimum'] = bstream[offset+19]
f_leader_data['Error_Velocity_Threshold'] = struct.unpack('<h', bstream[offset+20:offset+22])[0]
# TODO ping group time needs to be formatted better
f_leader_data['Time_Between_Ping Groups'] = "%03d:%02d:%02d" % (bstream[offset+22], bstream[offset+23],
bstream[offset+24])
f_leader_data['Coord_Transform_LSB'] = bitstrLE(bstream[offset+25])
f_leader_data['Coord_Transform'] = int(f_leader_data['Coord_Transform_LSB'][3:5], 2)
xforms = ('BEAM', 'INST', 'SHIP', 'EARTH')
f_leader_data['Coord_Transform'] = xforms[f_leader_data['Coord_Transform']]
if f_leader_data['Coord_Transform_LSB'][5] == '1':
f_leader_data['Tilts_Used'] = 'Yes'
else:
f_leader_data['Tilts_Used'] = 'No'
if f_leader_data['Coord_Transform_LSB'][6] == '1':
f_leader_data['3-Beam_Solution_Used'] = 'Yes'
else:
f_leader_data['3-Beam_Solution_Used'] = 'No'
if f_leader_data['Coord_Transform_LSB'][7] == '1':
f_leader_data['Bin_Mapping_Used'] = 'Yes'
else:
f_leader_data['Bin_Mapping_Used'] = 'No'
f_leader_data['Heading_Alignment_Hundredths_of_Deg'] = struct.unpack('<h', bstream[offset+26:offset+28])[0]
f_leader_data['Heading_Bias_Hundredths_of_Deg'] = struct.unpack('<h', bstream[offset+28:offset+30])[0]
f_leader_data['Sensor_Source_Byte'] = bitstrLE(bstream[offset+30])
if f_leader_data['Sensor_Source_Byte'][1] == '1':
f_leader_data['Calculate_EC_from_ED_ES_and_ET'] = 'Yes'
else:
f_leader_data['Calculate_EC_from_ED_ES_and_ET'] = 'No'
if f_leader_data['Sensor_Source_Byte'][2] == '1':
f_leader_data['Uses_ED_from_depth_sensor'] = 'Yes'
else:
f_leader_data['Uses_ED_from_depth_sensor'] = 'No'
if f_leader_data['Sensor_Source_Byte'][3] == '1':
f_leader_data['Uses_EH_from_transducer_heading_sensor'] = 'Yes'
else:
f_leader_data['Uses_EH_from_transducer_heading_sensor'] = 'No'
if f_leader_data['Sensor_Source_Byte'][4] == '1':
f_leader_data['Uses_EP_from_transducer_pitch_sensor'] = 'Yes'
else:
f_leader_data['Uses_EP_from_transducer_pitch sensor'] = 'No'
if f_leader_data['Sensor_Source_Byte'][5] == '1':
f_leader_data['Uses_ER_from_transducer_roll_sensor'] = 'Yes'
else:
f_leader_data['Uses_ER_from_transducer_roll_sensor'] = 'No'
if f_leader_data['Sensor_Source_Byte'][6] == '1':
f_leader_data['Uses_ES_from_conductivity_sensor'] = 'Yes'
else:
f_leader_data['Uses_ES_from_conductivity_sensor'] = 'No'
if f_leader_data['Sensor_Source_Byte'][7] == '1':
f_leader_data['Uses_ET_from_transducer_temperature_sensor'] = 'Yes'
else:
f_leader_data['Uses_ET_from_transducer_temperature_sensor'] = 'No'
f_leader_data['Sensor_Avail_Byte'] = bitstrLE(bstream[offset+31])
if f_leader_data['Sensor_Avail_Byte'][1] == '1':
f_leader_data['Speed_of_sound_sensor_available'] = 'Yes'
else:
f_leader_data['Speed_of_sound_sensor_available'] = 'No'
if f_leader_data['Sensor_Avail_Byte'][2] == '1':
f_leader_data['Depth_sensor_available'] = 'Yes'
else:
f_leader_data['Depth_sensor_available'] = 'No'
if f_leader_data['Sensor_Avail_Byte'][3] == '1':
f_leader_data['Heading_sensor_available'] = 'Yes'
else:
f_leader_data['Heading_sensor_available'] = 'No'
if f_leader_data['Sensor_Avail_Byte'][4] == '1':
f_leader_data['Pitch_sensor_available'] = 'Yes'
else:
f_leader_data['Pitch_sensor_available'] = 'No'
if f_leader_data['Sensor_Avail_Byte'][5] == '1':
f_leader_data['Roll_sensor_available'] = 'Yes'
else:
f_leader_data['Roll_sensor_available'] = 'No'
if f_leader_data['Sensor_Avail_Byte'][6] == '1':
f_leader_data['Conductivity_sensor_available'] = 'Yes'
else:
f_leader_data['Conductivity_sensor_available'] = 'No'
if f_leader_data['Sensor_Avail_Byte'][7] == '1':
f_leader_data['Temperature_sensor_available'] = 'Yes'
else:
f_leader_data['Temperature_sensor_available'] = 'No'
f_leader_data['Bin_1_distance_cm'] = struct.unpack('<h', bstream[offset+32:offset+34])[0]
f_leader_data['Xmit_pulse_length_cm'] = struct.unpack('<h', bstream[offset+34:offset+36])[0]
f_leader_data['Ref_Lyr_Avg_Starting_cell'] = bstream[offset+36]
f_leader_data['Ref_Lyr_Avg_Ending_cell'] = bstream[offset+37]
f_leader_data['False_Target_Threshold'] = bstream[offset+38]
f_leader_data['Transmit_lag_distance_cm'] = struct.unpack('<h', bstream[offset+40:offset+42])[0]
f_leader_data['CPU_Board_Serial_Number'] = ""
for i in range(8):
f_leader_data['CPU_Board_Serial_Number'] = f_leader_data['CPU_Board_Serial_Number'] + \
("%x" % bstream[offset+42+i])
f_leader_data['System_Bandwidth'] = struct.unpack('<h', bstream[offset+50:offset+52])[0]
f_leader_data['System_Power'] = bstream[offset+52]
f_leader_data['Base_Frequency_Index'] = bstream[offset+53]
# TODO these two need to be interpreted as spare if WH ADCP
# rawBytes, f_leader_data['Serial Number for Remus only'] = struct.unpack('<H',infile.read(2))[0]
# f_leader_data['Beam Angle for H-ADCP only'] = "%g" % infile.read(1)[0]
return f_leader_data
def parse_TRDI_variable_leader(bstream, offset):
"""
parse the Variable Leader section of data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: dictionary of readable variable leader data
"""
v_leader_data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 128:
print("expected variable leader ID, instead found %g", leader_id)
return -1
v_leader_data['Ensemble_Number'] = struct.unpack('<H', bstream[offset+2:offset+4])[0]
v_leader_data['Year'] = bstream[offset+4]
if v_leader_data['Year'] < 50: # circa 2000
v_leader_data['Year'] += 2000
else:
v_leader_data['Year'] += 1900
v_leader_data['Month'] = bstream[offset+5]
v_leader_data['Day'] = bstream[offset+6]
v_leader_data['Hour'] = bstream[offset+7]
v_leader_data['Minute'] = bstream[offset+8]
v_leader_data['Second'] = bstream[offset+9]
v_leader_data['Hundredths'] = bstream[offset+10]
v_leader_data['Ensemble_#_MSB'] = bstream[offset+11]
v_leader_data['Ensemble_Number'] = v_leader_data['Ensemble_Number']+(v_leader_data['Ensemble_#_MSB'] << 16)
v_leader_data['timestr'] = "%04d:%02d:%02d %02d:%02d:%02d.%03d" % (
v_leader_data['Year'], v_leader_data['Month'],
v_leader_data['Day'], v_leader_data['Hour'], v_leader_data['Minute'],
v_leader_data['Second'], v_leader_data['Hundredths'])
# compute time and time2
jd = julian(v_leader_data['Year'], v_leader_data['Month'], v_leader_data['Day'],
v_leader_data['Hour'], v_leader_data['Minute'], v_leader_data['Second'],
v_leader_data['Hundredths'])
v_leader_data['dtobj'] = dt.datetime(v_leader_data['Year'], v_leader_data['Month'], v_leader_data['Day'],
v_leader_data['Hour'], v_leader_data['Minute'], v_leader_data['Second'],
v_leader_data['Hundredths']*10000)
# centiseconds * 10000 = microseconds
jddt = ajd(v_leader_data['dtobj'])
v_leader_data['julian_day_from_as_datetime_object'] = jddt
v_leader_data['julian_day_from_julian'] = jd
# v_leader_data['time'] = jd
v_leader_data['EPIC_time'] = int(math.floor(jd))
v_leader_data['EPIC_time2'] = int((jd - math.floor(jd))*(24*3600*1000))
v_leader_data['BIT_Result_Byte_13'] = bitstrLE(bstream[offset+12])
v_leader_data['Demod_1_error_bit'] = int(v_leader_data['BIT_Result_Byte_13'][3])
v_leader_data['Demod_0_error_bit'] = int(v_leader_data['BIT_Result_Byte_13'][4])
v_leader_data['Timing_Card_error_bit'] = int(v_leader_data['BIT_Result_Byte_13'][6])
v_leader_data['Speed_of_Sound'] = struct.unpack('<H', bstream[offset+14:offset+16])[0]
v_leader_data['Depth_of_Transducer'] = struct.unpack('<H', bstream[offset+16:offset+18])[0]
v_leader_data['Heading, Pitch, Roll units'] = "hundredths_of_a_degree"
v_leader_data['Heading'] = struct.unpack('<H', bstream[offset+18:offset+20])[0]
v_leader_data['Pitch'] = struct.unpack('<h', bstream[offset+20:offset+22])[0]
v_leader_data['Roll'] = struct.unpack('<h', bstream[offset+22:offset+24])[0]
v_leader_data['Salinity'] = struct.unpack('<H', bstream[offset+24:offset+26])[0]
v_leader_data['Temperature'] = struct.unpack('<H', bstream[offset+26:offset+28])[0]
v_leader_data['MPT_minutes'] = bstream[offset+28]
v_leader_data['MPT_seconds'] = bstream[offset+29]
v_leader_data['MPT_hundredths'] = bstream[offset+30]
v_leader_data['H/Hdg_Std_Dev'] = bstream[offset+31]
v_leader_data['P/Pitch_Std_Dev'] = bstream[offset+32]
v_leader_data['R/Roll_Std_Dev'] = bstream[offset+33]
# the V Series PDO Output is different for the ADC channels
# V PD0 this is ADC Channel 0 not used
v_leader_data['Xmit_Current'] = bstream[offset+34] # ADC Channel 0
# V PD0 this is ADC Channel 1 XMIT Voltage
v_leader_data['Xmit_Voltage'] = bstream[offset+35] # ADC Channel 1
# V PD0 this is ADC Channel 2 not used
v_leader_data['Ambient_Temp'] = bstream[offset+36] # ADC Channel 2
# V PD0 this is ADC Channel 3 not used
v_leader_data['Pressure_(+)'] = bstream[offset+37] # ADC Channel 3
# V PD0 this is ADC Channel 4 not used
v_leader_data['Pressure_(-)'] = bstream[offset+38] # ADC Channel 4
# V PD0 this is ADC Channel 5 not used
v_leader_data['Attitude_Temp'] = bstream[offset+39] # ADC Channel 5
# V PD0 this is ADC Channel 6 not used
v_leader_data['Attitude'] = bstream[offset+40] # ADC Channel 6
# V PD0 this is ADC Channel 7 not used
v_leader_data['Contamination_Sensor'] = bstream[offset+41] # ADC Channel 7
v_leader_data['Error_Status_Word_Low_16_bits_LSB'] = bitstrLE(bstream[offset+42])
v_leader_data['Bus_Error_exception'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][7])
v_leader_data['Address_Error_exception'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][6])
v_leader_data['Illegal_Instruction_exception'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][5])
v_leader_data['Zero_Divide_exception'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][4])
v_leader_data['Emulator_exception'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][3])
v_leader_data['Unassigned_exception'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][2])
v_leader_data['Watchdog_restart_occurred'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][1])
v_leader_data['Battery_Saver_power'] = int(v_leader_data['Error_Status_Word_Low_16_bits_LSB'][0])
v_leader_data['Error_Status_Word_Low_16_bits_MSB'] = bitstrLE(bstream[offset+43])
v_leader_data['Pinging'] = int(v_leader_data['Error_Status_Word_Low_16_bits_MSB'][7])
v_leader_data['Cold_Wakeup_occurred'] = int(v_leader_data['Error_Status_Word_Low_16_bits_MSB'][1])
v_leader_data['Unknown_Wakeup_occurred'] = int(v_leader_data['Error_Status_Word_Low_16_bits_MSB'][0])
v_leader_data['Error_Status_Word_High_16_bits_LSB'] = bitstrLE(bstream[offset+44])
v_leader_data['Clock_Read_error_occurred'] = int(v_leader_data['Error_Status_Word_High_16_bits_LSB'][7])
v_leader_data['Unexpected_alarm'] = int(v_leader_data['Error_Status_Word_High_16_bits_LSB'][6])
v_leader_data['Clock_jump_forward'] = int(v_leader_data['Error_Status_Word_High_16_bits_LSB'][5])
v_leader_data['Clock_jump_backward'] = int(v_leader_data['Error_Status_Word_High_16_bits_LSB'][4])
v_leader_data['Error_Status_Word_High_16_bits_MSB'] = bitstrLE(bstream[offset+42])
v_leader_data['Power_Fail_(Unrecorded)'] = int(v_leader_data['Error_Status_Word_High_16_bits_MSB'][4])
v_leader_data['Spurious_level_4_intr_(DSP)'] = int(v_leader_data['Error_Status_Word_High_16_bits_MSB'][3])
v_leader_data['Spurious_level_5_intr_(UART)'] = int(v_leader_data['Error_Status_Word_High_16_bits_MSB'][2])
v_leader_data['Spurious_level_6_intr_(CLOCK)'] = int(v_leader_data['Error_Status_Word_High_16_bits_MSB'][1])
v_leader_data['Level_7_interrupt_occurred'] = int(v_leader_data['Error_Status_Word_High_16_bits_MSB'][0])
# pressure of the water at the transducer head relative to one atmosphere (sea level)
# v_leader_data['Pressure word byte 1'] = bitstrLE(bstream[offset+48])
# v_leader_data['Pressure word byte 2'] = bitstrLE(bstream[offset+49])
# v_leader_data['Pressure word byte 3'] = bitstrLE(bstream[offset+50])
# v_leader_data['Pressure word byte 4'] = bitstrLE(bstream[offset+51])
v_leader_data['Pressure_deca-pascals'] = bstream[offset+48]+(bstream[offset+49] << 8)+(bstream[offset+50] << 16) + \
(bstream[offset+51] << 24)
v_leader_data['Pressure_variance_deca-pascals'] = bstream[offset+52]+(bstream[offset+53] << 8) + \
(bstream[offset+54] << 16)+(bstream[offset+55] << 24)
v_leader_data['RTC_Century'] = bstream[offset+57]
v_leader_data['RTC_Year'] = bstream[offset+58]
v_leader_data['RTC_Month'] = bstream[offset+59]
v_leader_data['RTC_Day'] = bstream[offset+60]
v_leader_data['RTC_Hour'] = bstream[offset+61]
v_leader_data['RTC_Minute'] = bstream[offset+62]
v_leader_data['RTC_Second'] = bstream[offset+63]
v_leader_data['RTC_Hundredths'] = bstream[offset+64]
return v_leader_data
def parse_TRDI_velocity(bstream, offset, ncells, nbeams):
"""
parse the velocity data, each velocity value is stored as a two byte, twos complement integer [-32768 to 32767]
with the LSB sent first. Units are mm/s. A value of -32768 = 0x8000 is a bad velocity value
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:param int nbeams: number of acoustic beams
:return: velocity data as a beam x cell numpy array of ints
"""
if bstream[offset+1] != 1:
print("expected velocity ID, instead found %g", bstream[offset+1])
return -1
# start with a numpy array of bad values
data = np.ones((nbeams, ncells), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
for ibeam in range(nbeams):
data[ibeam, icell] = struct.unpack('<h', bstream[offset+ibyte:offset+ibyte+2])[0]
ibyte = ibyte+2
return data
def parse_TRDI_correlation(bstream, offset, ncells, nbeams):
"""
parse the correlation data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:param int nbeams: number of acoustic beams
:return: correlation data as a beam x cell numpy array of ints
"""
if bstream[offset+1] != 2:
print("expected correlation ID, instead found %g", bstream[offset+1])
return -1
# start with a numpy array of bad values
data = np.ones((nbeams, ncells), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
for ibeam in range(nbeams):
data[ibeam, icell] = bstream[offset+ibyte]
ibyte = ibyte+1
return data
def parse_TRDI_intensity(bstream, offset, ncells, nbeams):
"""
parse the intensity data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:param int nbeams: number of acoustic beams
:return: intensity data as a beam x cell numpy array of ints
"""
if bstream[offset+1] != 3:
print("expected intensity ID, instead found %g", bstream[offset+1])
return -1
# start with a numpy array of bad values
data = np.ones((nbeams, ncells), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
for ibeam in range(nbeams):
data[ibeam, icell] = bstream[offset+ibyte]
ibyte = ibyte+1
return data
def parse_TRDI_percent_good(bstream, offset, ncells, nbeams):
"""
parse the Percent Good data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:param int nbeams: number of acoustic beams
:return: percent good data as a beam x cell numpy array of ints
"""
if bstream[offset+1] != 4:
print("expected intensity ID, instead found %g", bstream[offset+1])
return -1
# start with a numpy array of bad values
data = np.ones((nbeams, ncells), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
for ibeam in range(nbeams):
data[ibeam, icell] = bstream[offset+ibyte]
ibyte = ibyte+1
return data
def parse_TRDI_transformation_matrix(bstream, offset, nbeams):
"""
parse the transformation matrix data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int nbeams: number of acoustic beams
:return: transformation matrix data as a beam x 3 numpy array of ints
"""
if bstream[offset+1] != 50: # \x00\x32
print("expected transformation matrix ID, instead found %g", bstream[offset+1])
return -1
# start with a numpy array of bad values
data = np.zeros((nbeams, 3), dtype=int)
ibyte = 2
for iaxis in range(3):
for ibeam in range(nbeams):
data[ibeam, iaxis] = struct.unpack('<h', bstream[offset+ibyte:offset+ibyte+2])[0]
ibyte = ibyte+2
return data
def parse_TRDI_vertical_ping_setup(bstream, offset):
"""
parse the TRDI V ping setup data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: a dict of readable ping setup settings
"""
v_ping_setup_data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 28673: # \x70\x01 stored little endian
print("expected V Series Ping Setup ID, instead found %g" % leader_id)
return -1
v_ping_setup_data['Ensemble_Interval_ms'] = bstream[offset+4]+(bstream[offset+5] << 8) + (
bstream[offset+6] << 16)+(bstream[offset+7] << 24)
v_ping_setup_data['Number_of_Pings'] = struct.unpack('<H', bstream[offset+10:offset+12])[0]
v_ping_setup_data['Time_Between_Pings_ms'] = bstream[offset+10]+(bstream[offset+11] << 8) + (
bstream[offset+12] << 16)+(bstream[offset+13] << 24)
v_ping_setup_data['Offset_Between_Ping_Groups_ms'] = bstream[offset+14]+(bstream[offset+15] << 8) + (
bstream[offset+16] << 16)+(bstream[offset+17] << 24)
v_ping_setup_data['Ping_Sequence_Number'] = struct.unpack('<h', bstream[offset+22:offset+24])[0]
v_ping_setup_data['Ambiguity_Velocity'] = struct.unpack('<h', bstream[offset+24:offset+26])[0]
v_ping_setup_data['RX_Gain'] = bstream[offset+26]
v_ping_setup_data['RX_Beam_Mask'] = bstream[offset+27]
v_ping_setup_data['TX_Beam_Mask'] = bstream[offset+28]
v_ping_setup_data['Ensemble_Offset'] = bstream[offset+30]+(bstream[offset+31] << 8)+(bstream[offset+32] << 16) + (
bstream[offset+33] << 24)
v_ping_setup_data['Ensemble_Count'] = bstream[offset+34]+(bstream[offset+35] << 8)
v_ping_setup_data['Deployment_Start_Century'] = bstream[offset+36]
v_ping_setup_data['Deployment_Start_Year'] = bstream[offset+37]
v_ping_setup_data['Deployment_Start_Month'] = bstream[offset+38]
v_ping_setup_data['Deployment_Start_Day'] = bstream[offset+39]
v_ping_setup_data['Deployment_Start_Hour'] = bstream[offset+40]
v_ping_setup_data['Deployment_Start_Minute'] = bstream[offset+41]
v_ping_setup_data['Deployment_Start_Second'] = bstream[offset+42]
v_ping_setup_data['Deployment_Start_Hundredths'] = bstream[offset+43]
return v_ping_setup_data
def parse_TRDI_vertical_system_configuration(bstream, offset):
"""
parse the TRDI V system configuration data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: a dict of readable system configuration settings
"""
v_sys_config_data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 28672: # \x70\x00 stored little endian
print("expected V Series System Config ID, instead found %g" % leader_id)
return -1
v_sys_config_data['Firmware_Version'] = "%02d:%02d:%02d:%02d" % (bstream[offset+2], bstream[offset+3],
bstream[offset+4], bstream[offset+5])
v_sys_config_data['System_Frequency'] = bstream[offset+6]+(bstream[offset+7] << 8) + (
bstream[offset+8] << 16)+(bstream[offset+9] << 24)
v_sys_config_data['Pressure_Rating'] = struct.unpack('<H', bstream[offset+10:offset+12])[0]
return v_sys_config_data
def parse_TRDI_vertical_beam_leader(bstream, offset):
"""
parse the TRDI V beam leader data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: a dict of readable beam leader settings
"""
v_beam_leader_data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 3841: # \x0f\x01 stored little endian
print("expected Vertical Beam Leader ID, instead found %g" % leader_id)
return -1
v_beam_leader_data['Vertical_Depth_Cells'] = struct.unpack('<H', bstream[offset+2:offset+4])[0]
v_beam_leader_data['Vertical_Pings'] = struct.unpack('<H', bstream[offset+4:offset+6])[0]
v_beam_leader_data['Vertical_Depth_Cell_Size_cm'] = struct.unpack('<H', bstream[offset+6:offset+8])[0]
v_beam_leader_data['Vertical_First_Cell_Range_cm'] = struct.unpack('<H', bstream[offset+8:offset+10])[0]
v_beam_leader_data['Vertical_Mode'] = struct.unpack('<H', bstream[offset+10:offset+12])[0]
# 1 = low resolution slant beam cells = vertical beam cells
# 2 = High resolution, dedicated surface tracking ping with 4:1 transmit/receive ratio or larger
v_beam_leader_data['Vertical_Transmit_cm'] = struct.unpack('<H', bstream[offset+12:offset+14])[0]
v_beam_leader_data['Vertical_Lag_Length_cm'] = struct.unpack('<H', bstream[offset+14:offset+16])[0]
v_beam_leader_data['Transmit_Code_Elements'] = struct.unpack('<H', bstream[offset+16:offset+18])[0]
v_beam_leader_data['Ping_Offset_Time'] = struct.unpack('<H', bstream[offset+30:offset+32])[0]
return v_beam_leader_data
def parse_TRDI_vertical_velocity(bstream, offset, ncells):
"""
parse the vertical beam velocity data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:return: vertical beam velocity data as a numpy array of ints
"""
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 2560: # \x0a\x00 stored little endian
print("expected Vertical Beam velocity ID, instead found %g" % leader_id)
return -1
# start with a numpy array of bad values
data = np.ones(ncells, dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
data[icell] = struct.unpack('<h', bstream[offset+ibyte:offset+ibyte+2])[0]
ibyte += 2
return data
def parse_TRDI_vertical_correlation(bstream, offset, ncells):
"""
parse the vertical beam correlation data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:return: vertical beam correlation data as a numpy array of ints
"""
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 2816: # \x0b\x00 stored little endian
print("expected Vertical Beam correlation ID, instead found %g" % leader_id)
return -1
# start with a numpy array of bad values
data = np.ones((ncells,), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
data[icell] = bstream[offset+ibyte]
ibyte += 1
return data
def parse_TRDI_vertical_intensity(bstream, offset, ncells):
"""
parse the vertical beam intensity data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:return: vertical beam intensity data as a numpy array of ints
"""
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 3072: # \x0c\x00 stored little endian
print("expected Vertical Beam intensity ID, instead found %g" % leader_id)
return -1
# start with a numpy array of bad values
data = np.ones((ncells, ), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
data[icell] = bstream[offset+ibyte]
ibyte += 1
return data
def parse_TRDI_vertical_percent_good(bstream, offset, ncells):
"""
parse the vertical beam percent good data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int ncells: number of cells in the profile
:return: vertical beam percent good data as a numpy array of ints
"""
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 3328: # \x0d\x00 stored little endian
print("expected Vertical Beam percent good ID, instead found %g" % leader_id)
return -1
# start with a numpy array of bad values
data = np.ones((ncells,), dtype=int) * -32768
ibyte = 2
for icell in range(ncells):
data[icell] = bstream[offset+ibyte]
ibyte += 1
return data
def parse_TRDI_event_log(bstream, offset):
"""
parse the event log data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: event log data as a dict
"""
v_event_log_data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 28676: # \x70\x04 stored little endian
print("expected V Series Event Log ID, instead found %g" % leader_id)
return -1
v_event_log_data['Fault_Count'] = struct.unpack('<H', bstream[offset+2:offset+4])[0]
# TODO read the fault codes and output to a text file
return v_event_log_data
def parse_TRDI_wave_parameters(bstream, offset):
"""
parse the wave parameters (wave statistics)
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: wave data as a dict
"""
data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 11: # \x00\x0b stored little endian
print("expected Wave Parameters ID, instead found %g" % leader_id)
return -1
data['Hs'] = struct.unpack('<H', bstream[offset+2:offset+4])[0]
data['Tp'] = struct.unpack('<H', bstream[offset+4:offset+6])[0]
data['Dp'] = struct.unpack('<H', bstream[offset+6:offset+8])[0]
data['Dm'] = struct.unpack('<H', bstream[offset+16:offset+18])[0]
data['SHmax'] = struct.unpack('<H', bstream[offset+30:offset+32])[0]
data['SH13'] = struct.unpack('<H', bstream[offset+32:offset+34])[0]
data['SH10'] = struct.unpack('<H', bstream[offset+34:offset+36])[0]
data['STmax'] = struct.unpack('<H', bstream[offset+36:offset+38])[0]
data['ST13'] = struct.unpack('<H', bstream[offset+38:offset+40])[0]
data['ST10'] = struct.unpack('<H', bstream[offset+40:offset+42])[0]
data['T01'] = struct.unpack('<H', bstream[offset+42:offset+44])[0]
data['Tz'] = struct.unpack('<H', bstream[offset+44:offset+46])[0]
data['Tinv1'] = struct.unpack('<H', bstream[offset+46:offset+48])[0]
data['S0'] = struct.unpack('<H', bstream[offset+48:offset+50])[0]
data['Source'] = bstream[offset+52]
return data
def parse_TRDI_wave_sea_swell(bstream, offset):
"""
parse the wave sea swell parameters (wave statistics)
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:return: wave sea swell data as a dict
"""
data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 12: # \x00\x0c stored little endian
print("expected Wave Sea and Swell ID, instead found %g" % leader_id)
return -1
data['HsSea'] = struct.unpack('<H', bstream[offset+2:offset+4])[0]
data['HsSwell'] = struct.unpack('<H', bstream[offset+4:offset+6])[0]
data['TpSea'] = struct.unpack('<H', bstream[offset+6:offset+8])[0]
data['TpSwell'] = struct.unpack('<H', bstream[offset+8:offset+10])[0]
data['DpSea'] = struct.unpack('<H', bstream[offset+10:offset+12])[0]
data['DpSwell'] = struct.unpack('<H', bstream[offset+12:offset+14])[0]
data['SeaSwellPeriod'] = struct.unpack('<H', bstream[offset+44:offset+46])[0]
return data
def parse_TRDI_bottom_track(bstream, offset, nbeams):
"""
parse the bottom track data
:param bytes bstream: an entire ensemble
:param int offset: the location in the bytes object of the first byte of this data format
:param int nbeams: number of acoustic beams
:return: bottom track data as a dict
"""
data = {}
leader_id = struct.unpack('<H', bstream[offset:offset+2])[0]
if leader_id != 1536: # \x00\x06 stored little endian
print("expected Bottom Track ID, instead found %g" % leader_id)
return -1
data['Pings_per_ensemble'] = struct.unpack('<H', bstream[offset+2:offset+4])[0]
data['delay_before_reacquire'] = struct.unpack('<H', bstream[offset+4:offset+6])[0]
data['Corr_Mag_Min'] = bstream[offset+6]
data['Eval_Amp_Min'] = bstream[offset+7]
data['PGd_Minimum'] = bstream[offset+8]
data['Mode'] = bstream[offset+9]
data['Err_Vel_Max'] = struct.unpack('<H', bstream[offset+10:offset+12])[0]
data['BT_Range_LSB'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 16
for ibeam in range(nbeams):
data['BT_Range_LSB'][ibeam] = struct.unpack('<h', bstream[offset+ibyte:offset+ibyte+2])[0]
ibyte = ibyte+2
# the meaning and direction depends on the coordinate system used
data['BT_Vel'] = np.ones(nbeams, dtype=float) * 1e35
ibyte = 24
for ibeam in range(nbeams):
data['BT_Vel'][ibeam] = struct.unpack('<h', bstream[offset+ibyte:offset+ibyte+2])[0]
ibyte = ibyte+2
data['BT_Corr'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 32
for ibeam in range(nbeams):
data['BT_Corr'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['BT_Amp'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 36
for ibeam in range(nbeams):
data['BT_Amp'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['BT_PGd'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 40
for ibeam in range(nbeams):
data['BT_PGd'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['Ref_Layer_Min'] = struct.unpack('<H', bstream[offset+44:offset+46])[0]
data['Ref_Layer_Near'] = struct.unpack('<H', bstream[offset+46:offset+48])[0]
data['Ref_Layer_Far'] = struct.unpack('<H', bstream[offset+48:offset+50])[0]
data['Ref_Layer_Vel'] = np.ones(nbeams, dtype=float) * 1e35
ibyte = 50
for ibeam in range(nbeams):
data['Ref_Layer_Vel'][ibeam] = struct.unpack('<h', bstream[offset+ibyte:offset+ibyte+2])[0]
ibyte = ibyte+2
data['Ref_Layer_Corr'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 58
for ibeam in range(nbeams):
data['Ref_Layer_Corr'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['Ref_Layer_Amp'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 62
for ibeam in range(nbeams):
data['Ref_Layer_Amp'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['Ref_Layer_PGd'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 66
for ibeam in range(nbeams):
data['Ref_Layer_PGd'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['BT_Max_Depth'] = struct.unpack('<H', bstream[offset+70:offset+72])[0]
data['RSSI_Amp'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 72
for ibeam in range(nbeams):
data['RSSI_Amp'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['GAIN'] = bstream[offset+76]
data['BT_Range_MSB'] = np.ones(nbeams, dtype=int) * -32768
ibyte = 77
for ibeam in range(nbeams):
data['BT_Range_MSB'][ibeam] = bstream[offset+ibyte]
ibyte = ibyte+1
data['BT_Range'] = np.ones(nbeams, dtype=int) * -32768
for ibeam in range(nbeams):
data['BT_Range'][ibeam] = data['BT_Range_LSB'][ibeam]+(data['BT_Range_MSB'][ibeam] << 16)
return data
def __computeChecksum(ensemble):
"""Compute a checksum from header, length, and ensemble"""
cs = 0
for byte in range(len(ensemble)-2):
cs += ensemble[byte]
return cs & 0xffff
def julian(year, month, day, hour, mn, sec, hund):
"""
convert hours, minutes and seconds to decimal hours
reference:
http://stackoverflow.com/questions/31142181/calculating-julian-date-in-python/41769526#41769526
and R. Signell's old matlab conversion code julian.m and hms2h.m
:param int year: year
:param int month: month
:param int day: day
:param int hour: hour
:param int mn: minute
:param int sec: second
:param int hund: hundredth of second
:return: julian day
"""
#
#
decimalsec = sec+hund/100
decimalhrs = hour+mn/60+decimalsec/3600
mo = month+9
yr = year-1
if month > 2:
mo -= 3
yr = year
c = math.floor(yr/100)
yr = yr - c*100
d = day
j = math.floor((146097*c)/4)+math.floor((1461*yr)/4) + \
math.floor((153*mo + 2)/5)+d+1721119
# If you want julian days to start and end at noon,
# replace the following line with:
# j=j+(decimalhrs-12)/24;
j = j+decimalhrs/24
return j
def analyzepd0file(pd0file, verbose=False):
"""
determine the input file size, read some ensembles, make an estimate of the number of ensembles within, return the
data from the first ensemble.
:param str pd0file: path and file name to raw ADC data file in pd0 format
:param bool verbose: output ensemble information
:return: number of ensembles in file, number of bytes in each ensemble, data from the first ensemble,
number of bytes to the start of the data
"""
infile = open(pd0file, 'rb')
while infile.tell() < 3000:
b1 = infile.read(1)
if b1 == b'\x7f':
b2 = infile.read(1)
if b2 == b'\x7f':
break
else:
print('Desired TRDI 7f7f ID not found within 3 kB from beginning of the file')
infile.close()
sys.exit(1)
start_of_data = infile.tell()-2
if start_of_data != 0:
print('data starts %d bytes into the file' % start_of_data)
infile.seek(start_of_data)
# need to read the header from the file to know the ensemble size
header = read_TRDI_header(infile)
if header['sourceID'] != b'\x7f':
print('error - this is not a currents file')
infile.close()
# number of bytes per ensemble in the header does not include the checksum
ens_len = header['nbytesperens']+2
print('ensemble length = %g' % ens_len)
print(header)
# it is faster to define the netCDF file with a known length
# for this we need to estimate how many ensembles we will be reading
# for some reason, sys.getsizeof(infile) does not report the true length
# of the input file, so we will go to the end and see how far we have gone
# there is a problem though. While TRDI's documentation says the V Series
# System Configuration data is always sent, this is not the case, so reading
# only the first ensemble will not give the ensemble size typical over the
# entire file
# rewind and read this several ensembles because further in the ensemble
# length can change on files output from Velocity
infile.seek(start_of_data)
nens2check = 5
nbytesperens = [0 for i in range(nens2check)]
ndatatypes = [0 for i in range(nens2check)]
for i in range(nens2check):
fileposn = infile.tell()
header = read_TRDI_header(infile)
ens_len = header['nbytesperens']+2
infile.seek(fileposn)
ens_data, ens_error = parse_TRDI_ensemble(infile.read(ens_len), verbose)
if ens_error is not None:
print('problem reading the first ensemble: ' + ens_error)
# infile.close()
# sys.exit(1)
if i == 0:
first_ens_data = ens_data
print('ensemble %d has %d bytes and %d datatypes' % (ens_data['VLeader']['Ensemble_Number'],
ens_data['Header']['nbytesperens'],
ens_data['Header']['ndatatypes']))
nbytesperens[i] = ens_data['Header']['nbytesperens']+2
ndatatypes[i] = ens_data['Header']['ndatatypes']
# the guess here is that if the first two ensembles are not the same,
# it's the second ensemble that is representative of the data
if nbytesperens[0] != nbytesperens[1]:
ens_len = nbytesperens[1]
else:
ens_len = nbytesperens[0]
infile.seek(0, 2)
nbytesinfile = infile.tell()
max_ens = (nbytesinfile/ens_len)-1
print('estimating %g ensembles in file using a %d ensemble size' % (max_ens, ens_len))
infile.close()
print(ens_data['Header'])
print('ensemble length = %g' % ens_len)
print('estimating %g ensembles in file' % max_ens)
# return max_ens, ens_len, ens_data, start_of_data
return max_ens, ens_len, first_ens_data, start_of_data
def __main():
print('%s running on python %s' % (sys.argv[0], sys.version))
if len(sys.argv) < 2:
print("%s usage:" % sys.argv[0])
print("TRDIpd0tonetcdf pd0file cdfFile [good_ens] [serial_number] [time_type] [delta_t]")
sys.exit(1)
try:
pd0file = sys.argv[1]
except:
print('error - pd0 input file name missing')
sys.exit(1)
try:
cdfFile = sys.argv[2]
except:
print('error - netcdf output file name missing')
sys.exit(1)
print('Converting %s to %s' % (pd0file, cdfFile))
try:
good_ens = [int(sys.argv[3]), int(sys.argv[4])]
except:
print('No starting and ending ensembles specified, processing entire file')
good_ens = [0, -1]
try:
serial_number = sys.argv[5]
except:
print('No serial number provided')
serial_number = "unknown"
try:
time_type = sys.argv[6]
except:
print('Time type will be CF')
time_type = "CF"
try:
delta_t = sys.argv[7]
except:
print('delta_t will be None')
delta_t = None
print('Start file conversion at ', dt.datetime.now())
convert_pd0_to_netcdf(pd0file, cdfFile, good_ens, serial_number, time_type, delta_t)
print('Finished file conversion at ', dt.datetime.now())
if __name__ == "__main__":
__main()
| ADCPy | /ADCPy-0.1.1.tar.gz/ADCPy-0.1.1/adcpy/TRDIstuff/TRDIpd0tonetcdf.py | TRDIpd0tonetcdf.py |
# ADCT
(A) (D)ata (C)lustering (T)ool
A python package able to perform and illustrate a few clustering methods.
Required Packages:
- numpy
- matplotlib
- random
To use package...
import ADCT
Future updates will include terminal/pipeline prompts
| ADCT | /ADCT-1.1.2.tar.gz/ADCT-1.1.2/README.md | README.md |
from setuptools import setup
def readme():
with open('README.md') as f:
read = f.read()
return read
setup(
name="ADCT",
version="1.1.2",
description="A python package capable of performing and illustrating a few clustering methods.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/JJ8428/ADCT",
author="Naga Venkata Sai Jagjit (JJ) Satti",
author_email="nsatti.sc@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7"
],
packages=["ADCT"],
include_package_data=True,
install_requires=["numpy", "matplotlib"],
)
| ADCT | /ADCT-1.1.2.tar.gz/ADCT-1.1.2/setup.py | setup.py |
from ADDPIO import ADDPIO
import RPi.GPIO as GPIO
import sys
#main function
if __name__ == "__main__":
if(len(sys.argv) < 3) :
print 'Usage : python testADDPIO.py ip_address1 ip_address2 (can be same ip address)'
sys.exit()
GPIO.setmode(GPIO.BCM)
# don't need to have pins hooked up
GPIO.setup(23, GPIO.IN)
GPIO.setup(24, GPIO.IN)
# will display the input from GPIO pin 23
print "GPIO pin 23 = " + str(GPIO.input(23))
# will display the input from GPIO pin 24
print "GPIO pin 24 = " + str(GPIO.input(24))
host1 = sys.argv[1]
host2 = sys.argv[2]
a1 = ADDPIO(host1)
a2 = ADDPIO(host2)
# will display '1' if button 2 on host1 is pressed, '0' otherwise
print "host1 ADDPIO BUTTON_2 = " + a1.input(ADDPIO.BUTTON_2,0)
# will display the input 1 from the accelerometer on host1
print "host1 ADDPIO SENSOR_ACCELEROMETER[1] = " + a1.input(ADDPIO.SENSOR_ACCELEROMETER,1)
# will display the input 1 from the pressure sensor on host1
print "host1 ADDPIO SENSOR_PRESSURE[0] = " + a1.input(ADDPIO.SENSOR_PRESSURE,0)
# will light the fake red LED on host1
print "host1 set ADDPIO LED_RED = " + a1.output(ADDPIO.LED_RED,1)
# will set the x value of the touch position on the touchpad region on host1
print "host1 set ADDPIO TOUCH_PAD_X_OUT 128 = " + a1.output(ADDPIO.TOUCH_PAD_X_OUT,128)
# will set the y value of the touch position on the touchpad region on host1
print "host1 set ADDPIO TOUCH_PAD_Y_OUT 128 = " + a1.output(ADDPIO.TOUCH_PAD_Y_OUT,128)
# will display the input 2 from the sensor type 1234 on host1
print "host1 sensor 1234[2] = " + a1.input(1234,2)
# will display '1' if button 1 on host2 is pressed, '0' otherwise
print "host2 ADDPIO BUTTON_1 = " + a2.input(ADDPIO.BUTTON_1,0)
# will display the input 2 from the gyroscope on host2
print "host2 ADDPIO SENSOR_GYROSCOPE[2] = " + a2.input(ADDPIO.SENSOR_GYROSCOPE,2)
# will display the input 0 from the orientation sensor on host2
print "host2 ADDPIO SENSOR_ORIENTATION[0] = " + a2.input(ADDPIO.SENSOR_ORIENTATION,0)
# will display the x value of the touch position on the touchpad region on host1
print "host2 set ADDPIO TOUCH_PAD_X_IN = " + a2.input(ADDPIO.TOUCH_PAD_X_IN,0)
# will display the y value of the touch position on the touchpad region on host1
print "host2 set ADDPIO TOUCH_PAD_Y_IN = " + a2.input(ADDPIO.TOUCH_PAD_Y_IN,0)
# will light the fake green LED on host2
print "host2 set ADDPIO LED_GREEN = " + a2.output(ADDPIO.LED_GREEN,1)
# will display "17" next to the text prompt on host2
print "host2 set ADDPIO TEXT 17 = " + a2.output(ADDPIO.TEXT,17)
# will display the input 0 from the sensor type 9999 on host2
print "host2 sensor 9999[0] = " + a2.input(9999,0)
| ADDPIO | /ADDPIO-1.0.3b1.tar.gz/ADDPIO-1.0.3b1/testADDPIO.py | testADDPIO.py |
ADDPIO project
==================
This project allows the Raspberry Pi* to access the sensors (accelerometer, gyroscope, ...)
and other IO of an Android* device, similar to the GPIO library. There is a corresponding
Android app (ADDPIO on the Google Play Store) to run on the Android device(s). The Raspberry
Pi and all Android devices must be connected to the same network. This uses UDP port 6297 to
communicate. Create a new ADDPIO object passing the ip address (this is displayed on the
Android app). The object has an input and output function that takes a type number and value.
See below for the standard type number symbols or use the number displayed on the Android app.
The Android sensors return an array of values (e.g. x,y,z). For ADDPIO sensor input the value
parameter represents the index into the array of values returned by the sensor. For other input,
the value is ignored.
The Android app has several widgets for IO:
buttons, LEDs, a touchpad, alarm, notification, and text.
Read the ip address and available sensors from the Android app.
from ADDPIO import ADDPIO
myHost = ADDPIO("192.168.0.0")
myValue = myHost.input(ADDPIO.SENSOR_ACCELEROMETER,1)
myValue = myHost.input(12345,47)
myHost.output(ADDPIO.ALARM,1)
myHost.output(ADDPIO.ALARM,0)
See the testADDPIO.py program for an example.
# Android sensors
SENSOR_ACCELEROMETER
SENSOR_AMBIENT_TEMPERATURE
SENSOR_GAME_ROTATION_VECTOR
SENSOR_GEOMAGNETIC_ROTATION_VECTOR
SENSOR_GRAVITY
SENSOR_GYROSCOPE
SENSOR_GYROSCOPE_UNCALIBRATED
SENSOR_HEART_BEAT
SENSOR_HEART_RATE
SENSOR_LIGHT
SENSOR_LINEAR_ACCELERATION
SENSOR_MAGNETIC_FIELD
SENSOR_MAGNETIC_FIELD_UNCALIBRATED
SENSOR_MOTION_DETECT
SENSOR_ORIENTATION
SENSOR_POSE_6DOF
SENSOR_PRESSURE
SENSOR_PROXIMITY
SENSOR_RELATIVE_HUMIDITY
SENSOR_ROTATION_VECTOR
SENSOR_SIGNIFICANT_MOTION
SENSOR_STATIONARY_DETECT
SENSOR_STEP_COUNTER
SENSOR_STEP_DETECTOR
SENSOR_TEMPERATURE
# Android input/output
BUTTON_1 input 0/1
BUTTON_2 input 0/1
LED_RED output 0/1
LED_GREEN output 0/1
LED_BLUE output 0/1
ALARM output 0/1
NOTIFICATION output any number
TEXT output any number
TOUCH_PAD_X_IN input 0-255
TOUCH_PAD_Y_IN input 0-255
TOUCH_PAD_X_OUT output 0-255
TOUCH_PAD_Y_OUT output 0-255
* Raspberry Pi is a trademark of the Raspberry Pi Foundation - http://www.raspberrypi.org
* Android is a trademark of Google Inc.
| ADDPIO | /ADDPIO-1.0.3b1.tar.gz/ADDPIO-1.0.3b1/README.rst | README.rst |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ADDPIO',
version='1.0.3b1',
description='Android IO project',
long_description=long_description,
#url='https://github.com/pypa/sampleproject',
author='Sipenlatt',
author_email='sipenlatt@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='GPIO Android Raspberry Pi sensors IO',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
py_modules=['ADDPIO'],
)
| ADDPIO | /ADDPIO-1.0.3b1.tar.gz/ADDPIO-1.0.3b1/setup.py | setup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.