summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFuwn <[email protected]>2025-12-01 18:16:09 -0800
committerGitHub <[email protected]>2025-12-01 18:16:09 -0800
commit6c81abf8b182ce677b74cf2c570281ffea7f63f8 (patch)
treec8d289dc53d26b27dac63980fbf332ea97c178d6
downloadrysk-6c81abf8b182ce677b74cf2c570281ffea7f63f8.tar.xz
rysk-6c81abf8b182ce677b74cf2c570281ffea7f63f8.zip
Add files via upload
-rw-r--r--css/bootstrap.min.css7
-rw-r--r--images/after.jpgbin0 -> 369911 bytes
-rw-r--r--images/before.jpgbin0 -> 353605 bytes
-rw-r--r--index.html348
-rw-r--r--js/analysis.js673
-rw-r--r--js/bootstrap.bundle.min.js7
-rw-r--r--js/face-landmarks-detection.js1497
-rw-r--r--js/index.js409
-rw-r--r--js/tf-backend-cpu.js9526
-rw-r--r--js/tf-converter.js29751
-rw-r--r--js/tf-core.js27144
11 files changed, 69362 insertions, 0 deletions
diff --git a/css/bootstrap.min.css b/css/bootstrap.min.css
new file mode 100644
index 0000000..1472dec
--- /dev/null
+++ b/css/bootstrap.min.css
@@ -0,0 +1,7 @@
+@charset "UTF-8";/*!
+ * Bootstrap v5.1.3 (https://getbootstrap.com/)
+ * Copyright 2011-2021 The Bootstrap Authors
+ * Copyright 2011-2021 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
+ */:root{--bs-blue:#0d6efd;--bs-indigo:#6610f2;--bs-purple:#6f42c1;--bs-pink:#d63384;--bs-red:#dc3545;--bs-orange:#fd7e14;--bs-yellow:#ffc107;--bs-green:#198754;--bs-teal:#20c997;--bs-cyan:#0dcaf0;--bs-white:#fff;--bs-gray:#6c757d;--bs-gray-dark:#343a40;--bs-gray-100:#f8f9fa;--bs-gray-200:#e9ecef;--bs-gray-300:#dee2e6;--bs-gray-400:#ced4da;--bs-gray-500:#adb5bd;--bs-gray-600:#6c757d;--bs-gray-700:#495057;--bs-gray-800:#343a40;--bs-gray-900:#212529;--bs-primary:#0d6efd;--bs-secondary:#6c757d;--bs-success:#198754;--bs-info:#0dcaf0;--bs-warning:#ffc107;--bs-danger:#dc3545;--bs-light:#f8f9fa;--bs-dark:#212529;--bs-primary-rgb:13,110,253;--bs-secondary-rgb:108,117,125;--bs-success-rgb:25,135,84;--bs-info-rgb:13,202,240;--bs-warning-rgb:255,193,7;--bs-danger-rgb:220,53,69;--bs-light-rgb:248,249,250;--bs-dark-rgb:33,37,41;--bs-white-rgb:255,255,255;--bs-black-rgb:0,0,0;--bs-body-color-rgb:33,37,41;--bs-body-bg-rgb:255,255,255;--bs-font-sans-serif:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans","Liberation Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--bs-font-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--bs-gradient:linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0));--bs-body-font-family:var(--bs-font-sans-serif);--bs-body-font-size:1rem;--bs-body-font-weight:400;--bs-body-line-height:1.5;--bs-body-color:#212529;--bs-body-bg:#fff}*,::after,::before{box-sizing:border-box}@media (prefers-reduced-motion:no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:var(--bs-body-font-family);font-size:var(--bs-body-font-size);font-weight:var(--bs-body-font-weight);line-height:var(--bs-body-line-height);color:var(--bs-body-color);text-align:var(--bs-body-text-align);background-color:var(--bs-body-bg);-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}hr{margin:1rem 0;color:inherit;background-color:currentColor;border:0;opacity:.25}hr:not([size]){height:1px}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2}.h1,h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width:1200px){.h1,h1{font-size:2.5rem}}.h2,h2{font-size:calc(1.325rem + .9vw)}@media (min-width:1200px){.h2,h2{font-size:2rem}}.h3,h3{font-size:calc(1.3rem + .6vw)}@media (min-width:1200px){.h3,h3{font-size:1.75rem}}.h4,h4{font-size:calc(1.275rem + .3vw)}@media (min-width:1200px){.h4,h4{font-size:1.5rem}}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[data-bs-original-title],abbr[title]{-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}.small,small{font-size:.875em}.mark,mark{padding:.2em;background-color:#fcf8e3}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#0d6efd;text-decoration:underline}a:hover{color:#0a58ca}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:var(--bs-font-monospace);font-size:1em;direction:ltr;unicode-bidi:bidi-override}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:.875em;color:#d63384;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:.875em;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:1em;font-weight:700}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:#6c757d;text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}tbody,td,tfoot,th,thead,tr{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]::-webkit-calendar-picker-indicator{display:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + .3vw);line-height:inherit}@media (min-width:1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-text,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:textfield}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::-webkit-file-upload-button{font:inherit}::file-selector-button{font:inherit}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none!important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-6{font-size:2.5rem}}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:.875em;color:#6c757d}.blockquote-footer::before{content:"— "}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:.875em;color:#6c757d}.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{width:100%;padding-right:var(--bs-gutter-x,.75rem);padding-left:var(--bs-gutter-x,.75rem);margin-right:auto;margin-left:auto}@media (min-width:576px){.container,.container-sm{max-width:540px}}@media (min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}@media (min-width:1400px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{max-width:1320px}}.row{--bs-gutter-x:1.5rem;--bs-gutter-y:0;display:flex;flex-wrap:wrap;margin-top:calc(-1 * var(--bs-gutter-y));margin-right:calc(-.5 * var(--bs-gutter-x));margin-left:calc(-.5 * var(--bs-gutter-x))}.row>*{flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x) * .5);padding-left:calc(var(--bs-gutter-x) * .5);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.6666666667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.33333333%}.col-2{flex:0 0 auto;width:16.66666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.33333333%}.col-5{flex:0 0 auto;width:41.66666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.33333333%}.col-8{flex:0 0 auto;width:66.66666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.33333333%}.col-11{flex:0 0 auto;width:91.66666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.33333333%}.offset-2{margin-left:16.66666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333333%}.offset-5{margin-left:41.66666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333333%}.offset-8{margin-left:66.66666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333333%}.offset-11{margin-left:91.66666667%}.g-0,.gx-0{--bs-gutter-x:0}.g-0,.gy-0{--bs-gutter-y:0}.g-1,.gx-1{--bs-gutter-x:0.25rem}.g-1,.gy-1{--bs-gutter-y:0.25rem}.g-2,.gx-2{--bs-gutter-x:0.5rem}.g-2,.gy-2{--bs-gutter-y:0.5rem}.g-3,.gx-3{--bs-gutter-x:1rem}.g-3,.gy-3{--bs-gutter-y:1rem}.g-4,.gx-4{--bs-gutter-x:1.5rem}.g-4,.gy-4{--bs-gutter-y:1.5rem}.g-5,.gx-5{--bs-gutter-x:3rem}.g-5,.gy-5{--bs-gutter-y:3rem}@media (min-width:576px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.6666666667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.33333333%}.col-sm-2{flex:0 0 auto;width:16.66666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.33333333%}.col-sm-5{flex:0 0 auto;width:41.66666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.33333333%}.col-sm-8{flex:0 0 auto;width:66.66666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.33333333%}.col-sm-11{flex:0 0 auto;width:91.66666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333333%}.offset-sm-2{margin-left:16.66666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333333%}.offset-sm-5{margin-left:41.66666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333333%}.offset-sm-8{margin-left:66.66666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333333%}.offset-sm-11{margin-left:91.66666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x:0}.g-sm-0,.gy-sm-0{--bs-gutter-y:0}.g-sm-1,.gx-sm-1{--bs-gutter-x:0.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y:0.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x:0.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y:0.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x:1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y:1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x:1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y:1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x:3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y:3rem}}@media (min-width:768px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.6666666667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.33333333%}.col-md-2{flex:0 0 auto;width:16.66666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.33333333%}.col-md-5{flex:0 0 auto;width:41.66666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.33333333%}.col-md-8{flex:0 0 auto;width:66.66666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.33333333%}.col-md-11{flex:0 0 auto;width:91.66666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333333%}.offset-md-2{margin-left:16.66666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333333%}.offset-md-5{margin-left:41.66666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333333%}.offset-md-8{margin-left:66.66666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333333%}.offset-md-11{margin-left:91.66666667%}.g-md-0,.gx-md-0{--bs-gutter-x:0}.g-md-0,.gy-md-0{--bs-gutter-y:0}.g-md-1,.gx-md-1{--bs-gutter-x:0.25rem}.g-md-1,.gy-md-1{--bs-gutter-y:0.25rem}.g-md-2,.gx-md-2{--bs-gutter-x:0.5rem}.g-md-2,.gy-md-2{--bs-gutter-y:0.5rem}.g-md-3,.gx-md-3{--bs-gutter-x:1rem}.g-md-3,.gy-md-3{--bs-gutter-y:1rem}.g-md-4,.gx-md-4{--bs-gutter-x:1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y:1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x:3rem}.g-md-5,.gy-md-5{--bs-gutter-y:3rem}}@media (min-width:992px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.6666666667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.33333333%}.col-lg-2{flex:0 0 auto;width:16.66666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.33333333%}.col-lg-5{flex:0 0 auto;width:41.66666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.33333333%}.col-lg-8{flex:0 0 auto;width:66.66666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.33333333%}.col-lg-11{flex:0 0 auto;width:91.66666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333333%}.offset-lg-2{margin-left:16.66666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333333%}.offset-lg-5{margin-left:41.66666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333333%}.offset-lg-8{margin-left:66.66666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333333%}.offset-lg-11{margin-left:91.66666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x:0}.g-lg-0,.gy-lg-0{--bs-gutter-y:0}.g-lg-1,.gx-lg-1{--bs-gutter-x:0.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y:0.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x:0.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y:0.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x:1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y:1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x:1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y:1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x:3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y:3rem}}@media (min-width:1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.33333333%}.col-xl-2{flex:0 0 auto;width:16.66666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.33333333%}.col-xl-5{flex:0 0 auto;width:41.66666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.33333333%}.col-xl-8{flex:0 0 auto;width:66.66666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.33333333%}.col-xl-11{flex:0 0 auto;width:91.66666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333333%}.offset-xl-2{margin-left:16.66666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333333%}.offset-xl-5{margin-left:41.66666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333333%}.offset-xl-8{margin-left:66.66666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333333%}.offset-xl-11{margin-left:91.66666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x:0}.g-xl-0,.gy-xl-0{--bs-gutter-y:0}.g-xl-1,.gx-xl-1{--bs-gutter-x:0.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y:0.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x:0.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y:0.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x:1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y:1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x:1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y:1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x:3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y:3rem}}@media (min-width:1400px){.col-xxl{flex:1 0 0%}.row-cols-xxl-auto>*{flex:0 0 auto;width:auto}.row-cols-xxl-1>*{flex:0 0 auto;width:100%}.row-cols-xxl-2>*{flex:0 0 auto;width:50%}.row-cols-xxl-3>*{flex:0 0 auto;width:33.3333333333%}.row-cols-xxl-4>*{flex:0 0 auto;width:25%}.row-cols-xxl-5>*{flex:0 0 auto;width:20%}.row-cols-xxl-6>*{flex:0 0 auto;width:16.6666666667%}.col-xxl-auto{flex:0 0 auto;width:auto}.col-xxl-1{flex:0 0 auto;width:8.33333333%}.col-xxl-2{flex:0 0 auto;width:16.66666667%}.col-xxl-3{flex:0 0 auto;width:25%}.col-xxl-4{flex:0 0 auto;width:33.33333333%}.col-xxl-5{flex:0 0 auto;width:41.66666667%}.col-xxl-6{flex:0 0 auto;width:50%}.col-xxl-7{flex:0 0 auto;width:58.33333333%}.col-xxl-8{flex:0 0 auto;width:66.66666667%}.col-xxl-9{flex:0 0 auto;width:75%}.col-xxl-10{flex:0 0 auto;width:83.33333333%}.col-xxl-11{flex:0 0 auto;width:91.66666667%}.col-xxl-12{flex:0 0 auto;width:100%}.offset-xxl-0{margin-left:0}.offset-xxl-1{margin-left:8.33333333%}.offset-xxl-2{margin-left:16.66666667%}.offset-xxl-3{margin-left:25%}.offset-xxl-4{margin-left:33.33333333%}.offset-xxl-5{margin-left:41.66666667%}.offset-xxl-6{margin-left:50%}.offset-xxl-7{margin-left:58.33333333%}.offset-xxl-8{margin-left:66.66666667%}.offset-xxl-9{margin-left:75%}.offset-xxl-10{margin-left:83.33333333%}.offset-xxl-11{margin-left:91.66666667%}.g-xxl-0,.gx-xxl-0{--bs-gutter-x:0}.g-xxl-0,.gy-xxl-0{--bs-gutter-y:0}.g-xxl-1,.gx-xxl-1{--bs-gutter-x:0.25rem}.g-xxl-1,.gy-xxl-1{--bs-gutter-y:0.25rem}.g-xxl-2,.gx-xxl-2{--bs-gutter-x:0.5rem}.g-xxl-2,.gy-xxl-2{--bs-gutter-y:0.5rem}.g-xxl-3,.gx-xxl-3{--bs-gutter-x:1rem}.g-xxl-3,.gy-xxl-3{--bs-gutter-y:1rem}.g-xxl-4,.gx-xxl-4{--bs-gutter-x:1.5rem}.g-xxl-4,.gy-xxl-4{--bs-gutter-y:1.5rem}.g-xxl-5,.gx-xxl-5{--bs-gutter-x:3rem}.g-xxl-5,.gy-xxl-5{--bs-gutter-y:3rem}}.table{--bs-table-bg:transparent;--bs-table-accent-bg:transparent;--bs-table-striped-color:#212529;--bs-table-striped-bg:rgba(0, 0, 0, 0.05);--bs-table-active-color:#212529;--bs-table-active-bg:rgba(0, 0, 0, 0.1);--bs-table-hover-color:#212529;--bs-table-hover-bg:rgba(0, 0, 0, 0.075);width:100%;margin-bottom:1rem;color:#212529;vertical-align:top;border-color:#dee2e6}.table>:not(caption)>*>*{padding:.5rem .5rem;background-color:var(--bs-table-bg);border-bottom-width:1px;box-shadow:inset 0 0 0 9999px var(--bs-table-accent-bg)}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table>:not(:first-child){border-top:2px solid currentColor}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:1px 0}.table-bordered>:not(caption)>*>*{border-width:0 1px}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-borderless>:not(:first-child){border-top-width:0}.table-striped>tbody>tr:nth-of-type(odd)>*{--bs-table-accent-bg:var(--bs-table-striped-bg);color:var(--bs-table-striped-color)}.table-active{--bs-table-accent-bg:var(--bs-table-active-bg);color:var(--bs-table-active-color)}.table-hover>tbody>tr:hover>*{--bs-table-accent-bg:var(--bs-table-hover-bg);color:var(--bs-table-hover-color)}.table-primary{--bs-table-bg:#cfe2ff;--bs-table-striped-bg:#c5d7f2;--bs-table-striped-color:#000;--bs-table-active-bg:#bacbe6;--bs-table-active-color:#000;--bs-table-hover-bg:#bfd1ec;--bs-table-hover-color:#000;color:#000;border-color:#bacbe6}.table-secondary{--bs-table-bg:#e2e3e5;--bs-table-striped-bg:#d7d8da;--bs-table-striped-color:#000;--bs-table-active-bg:#cbccce;--bs-table-active-color:#000;--bs-table-hover-bg:#d1d2d4;--bs-table-hover-color:#000;color:#000;border-color:#cbccce}.table-success{--bs-table-bg:#d1e7dd;--bs-table-striped-bg:#c7dbd2;--bs-table-striped-color:#000;--bs-table-active-bg:#bcd0c7;--bs-table-active-color:#000;--bs-table-hover-bg:#c1d6cc;--bs-table-hover-color:#000;color:#000;border-color:#bcd0c7}.table-info{--bs-table-bg:#cff4fc;--bs-table-striped-bg:#c5e8ef;--bs-table-striped-color:#000;--bs-table-active-bg:#badce3;--bs-table-active-color:#000;--bs-table-hover-bg:#bfe2e9;--bs-table-hover-color:#000;color:#000;border-color:#badce3}.table-warning{--bs-table-bg:#fff3cd;--bs-table-striped-bg:#f2e7c3;--bs-table-striped-color:#000;--bs-table-active-bg:#e6dbb9;--bs-table-active-color:#000;--bs-table-hover-bg:#ece1be;--bs-table-hover-color:#000;color:#000;border-color:#e6dbb9}.table-danger{--bs-table-bg:#f8d7da;--bs-table-striped-bg:#eccccf;--bs-table-striped-color:#000;--bs-table-active-bg:#dfc2c4;--bs-table-active-color:#000;--bs-table-hover-bg:#e5c7ca;--bs-table-hover-color:#000;color:#000;border-color:#dfc2c4}.table-light{--bs-table-bg:#f8f9fa;--bs-table-striped-bg:#ecedee;--bs-table-striped-color:#000;--bs-table-active-bg:#dfe0e1;--bs-table-active-color:#000;--bs-table-hover-bg:#e5e6e7;--bs-table-hover-color:#000;color:#000;border-color:#dfe0e1}.table-dark{--bs-table-bg:#212529;--bs-table-striped-bg:#2c3034;--bs-table-striped-color:#fff;--bs-table-active-bg:#373b3e;--bs-table-active-color:#fff;--bs-table-hover-bg:#323539;--bs-table-hover-color:#fff;color:#fff;border-color:#373b3e}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media (max-width:575.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:767.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:991.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1399.98px){.table-responsive-xxl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem}.form-text{margin-top:.25rem;font-size:.875em;color:#6c757d}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;-webkit-appearance:none;-moz-appearance:none;appearance:none;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:#212529;background-color:#fff;border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-control::-webkit-date-and-time-value{height:1.5em}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}.form-control::-webkit-file-upload-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;-webkit-transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}.form-control::file-selector-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::-webkit-file-upload-button{-webkit-transition:none;transition:none}.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button{background-color:#dde0e3}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:#dde0e3}.form-control::-webkit-file-upload-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:#212529;background-color:#e9ecef;pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:1px;border-radius:0;-webkit-transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::-webkit-file-upload-button{-webkit-transition:none;transition:none}}.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button{background-color:#dde0e3}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.form-control-sm::-webkit-file-upload-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-sm::-webkit-file-upload-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.form-control-lg::-webkit-file-upload-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}.form-control-lg::-webkit-file-upload-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + .75rem + 2px)}textarea.form-control-sm{min-height:calc(1.5em + .5rem + 2px)}textarea.form-control-lg{min-height:calc(1.5em + 1rem + 2px)}.form-control-color{width:3rem;height:auto;padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{height:1.5em;border-radius:.25rem}.form-control-color::-webkit-color-swatch{height:1.5em;border-radius:.25rem}.form-select{display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;-moz-padding-start:calc(0.75rem - 3px);font-size:1rem;font-weight:400;line-height:1.5;color:#212529;background-color:#fff;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:1px solid #ced4da;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;-moz-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-select{transition:none}}.form-select:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-select[multiple],.form-select[size]:not([size="1"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:#e9ecef}.form-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #212529}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem;border-radius:.2rem}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem;border-radius:.3rem}.form-check{display:block;min-height:1.5rem;padding-left:1.5em;margin-bottom:.125rem}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-input{width:1em;height:1em;margin-top:.25em;vertical-align:top;background-color:#fff;background-repeat:no-repeat;background-position:center;background-size:contain;border:1px solid rgba(0,0,0,.25);-webkit-appearance:none;-moz-appearance:none;appearance:none;-webkit-print-color-adjust:exact;color-adjust:exact}.form-check-input[type=checkbox]{border-radius:.25em}.form-check-input[type=radio]{border-radius:50%}.form-check-input:active{filter:brightness(90%)}.form-check-input:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-check-input:checked{background-color:#0d6efd;border-color:#0d6efd}.form-check-input:checked[type=checkbox]{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10l3 3l6-6'/%3e%3c/svg%3e")}.form-check-input:checked[type=radio]{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e")}.form-check-input[type=checkbox]:indeterminate{background-color:#0d6efd;border-color:#0d6efd;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e")}.form-check-input:disabled{pointer-events:none;filter:none;opacity:.5}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{width:2em;margin-left:-2.5em;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280, 0, 0, 0.25%29'/%3e%3c/svg%3e");background-position:left center;border-radius:2em;transition:background-position .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%2386b7fe'/%3e%3c/svg%3e")}.form-switch .form-check-input:checked{background-position:right center;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.btn-check:disabled+.btn,.btn-check[disabled]+.btn{pointer-events:none;filter:none;opacity:.65}.form-range{width:100%;height:1.5rem;padding:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#0d6efd;border:0;border-radius:1rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-webkit-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#b6d4fe}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#0d6efd;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;-moz-appearance:none;appearance:none}@media (prefers-reduced-motion:reduce){.form-range::-moz-range-thumb{-moz-transition:none;transition:none}}.form-range::-moz-range-thumb:active{background-color:#b6d4fe}.form-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.form-range:disabled::-moz-range-thumb{background-color:#adb5bd}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-select{height:calc(3.5rem + 2px);line-height:1.25}.form-floating>label{position:absolute;top:0;left:0;height:100%;padding:1rem .75rem;pointer-events:none;border:1px solid transparent;transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out}@media (prefers-reduced-motion:reduce){.form-floating>label{transition:none}}.form-floating>.form-control{padding:1rem .75rem}.form-floating>.form-control::-moz-placeholder{color:transparent}.form-floating>.form-control::placeholder{color:transparent}.form-floating>.form-control:not(:-moz-placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:not(:-moz-placeholder-shown)~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-select~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:-webkit-autofill~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-select{position:relative;flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-select:focus{z-index:3}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:3}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-lg>.btn,.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.input-group-sm>.btn,.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n+3),.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu){border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>.dropdown-toggle:nth-last-child(n+4),.input-group.has-validation>:nth-last-child(n+3):not(.dropdown-toggle):not(.dropdown-menu){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:-1px;border-top-left-radius:0;border-bottom-left-radius:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#198754}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(25,135,84,.9);border-radius:.25rem}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:#198754;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-valid,.was-validated .form-select:valid{border-color:#198754}.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size="1"],.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size="1"]{padding-right:4.125rem;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e"),url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-valid:focus,.was-validated .form-select:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid,.was-validated .form-check-input:valid{border-color:#198754}.form-check-input.is-valid:checked,.was-validated .form-check-input:valid:checked{background-color:#198754}.form-check-input.is-valid:focus,.was-validated .form-check-input:valid:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#198754}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.input-group .form-control.is-valid,.input-group .form-select.is-valid,.was-validated .input-group .form-control:valid,.was-validated .input-group .form-select:valid{z-index:1}.input-group .form-control.is-valid:focus,.input-group .form-select.is-valid:focus,.was-validated .input-group .form-control:valid:focus,.was-validated .input-group .form-select:valid:focus{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:rgba(220,53,69,.9);border-radius:.25rem}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-invalid,.was-validated .form-select:invalid{border-color:#dc3545}.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size="1"],.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size="1"]{padding-right:4.125rem;background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M2 5l6 6 6-6'/%3e%3c/svg%3e"),url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-invalid:focus,.was-validated .form-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid,.was-validated .form-check-input:invalid{border-color:#dc3545}.form-check-input.is-invalid:checked,.was-validated .form-check-input:invalid:checked{background-color:#dc3545}.form-check-input.is-invalid:focus,.was-validated .form-check-input:invalid:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.input-group .form-control.is-invalid,.input-group .form-select.is-invalid,.was-validated .input-group .form-control:invalid,.was-validated .input-group .form-select:invalid{z-index:2}.input-group .form-control.is-invalid:focus,.input-group .form-select.is-invalid:focus,.was-validated .input-group .form-control:invalid:focus,.was-validated .input-group .form-select:invalid:focus{z-index:3}.btn{display:inline-block;font-weight:400;line-height:1.5;color:#212529;text-align:center;text-decoration:none;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529}.btn-check:focus+.btn,.btn:focus{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.btn.disabled,.btn:disabled,fieldset:disabled .btn{pointer-events:none;opacity:.65}.btn-primary{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-primary:hover{color:#fff;background-color:#0b5ed7;border-color:#0a58ca}.btn-check:focus+.btn-primary,.btn-primary:focus{color:#fff;background-color:#0b5ed7;border-color:#0a58ca;box-shadow:0 0 0 .25rem rgba(49,132,253,.5)}.btn-check:active+.btn-primary,.btn-check:checked+.btn-primary,.btn-primary.active,.btn-primary:active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0a58ca;border-color:#0a53be}.btn-check:active+.btn-primary:focus,.btn-check:checked+.btn-primary:focus,.btn-primary.active:focus,.btn-primary:active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(49,132,253,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5c636a;border-color:#565e64}.btn-check:focus+.btn-secondary,.btn-secondary:focus{color:#fff;background-color:#5c636a;border-color:#565e64;box-shadow:0 0 0 .25rem rgba(130,138,145,.5)}.btn-check:active+.btn-secondary,.btn-check:checked+.btn-secondary,.btn-secondary.active,.btn-secondary:active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#565e64;border-color:#51585e}.btn-check:active+.btn-secondary:focus,.btn-check:checked+.btn-secondary:focus,.btn-secondary.active:focus,.btn-secondary:active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(130,138,145,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-success{color:#fff;background-color:#198754;border-color:#198754}.btn-success:hover{color:#fff;background-color:#157347;border-color:#146c43}.btn-check:focus+.btn-success,.btn-success:focus{color:#fff;background-color:#157347;border-color:#146c43;box-shadow:0 0 0 .25rem rgba(60,153,110,.5)}.btn-check:active+.btn-success,.btn-check:checked+.btn-success,.btn-success.active,.btn-success:active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#146c43;border-color:#13653f}.btn-check:active+.btn-success:focus,.btn-check:checked+.btn-success:focus,.btn-success.active:focus,.btn-success:active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(60,153,110,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#198754;border-color:#198754}.btn-info{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-info:hover{color:#000;background-color:#31d2f2;border-color:#25cff2}.btn-check:focus+.btn-info,.btn-info:focus{color:#000;background-color:#31d2f2;border-color:#25cff2;box-shadow:0 0 0 .25rem rgba(11,172,204,.5)}.btn-check:active+.btn-info,.btn-check:checked+.btn-info,.btn-info.active,.btn-info:active,.show>.btn-info.dropdown-toggle{color:#000;background-color:#3dd5f3;border-color:#25cff2}.btn-check:active+.btn-info:focus,.btn-check:checked+.btn-info:focus,.btn-info.active:focus,.btn-info:active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(11,172,204,.5)}.btn-info.disabled,.btn-info:disabled{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-warning{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#000;background-color:#ffca2c;border-color:#ffc720}.btn-check:focus+.btn-warning,.btn-warning:focus{color:#000;background-color:#ffca2c;border-color:#ffc720;box-shadow:0 0 0 .25rem rgba(217,164,6,.5)}.btn-check:active+.btn-warning,.btn-check:checked+.btn-warning,.btn-warning.active,.btn-warning:active,.show>.btn-warning.dropdown-toggle{color:#000;background-color:#ffcd39;border-color:#ffc720}.btn-check:active+.btn-warning:focus,.btn-check:checked+.btn-warning:focus,.btn-warning.active:focus,.btn-warning:active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(217,164,6,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#bb2d3b;border-color:#b02a37}.btn-check:focus+.btn-danger,.btn-danger:focus{color:#fff;background-color:#bb2d3b;border-color:#b02a37;box-shadow:0 0 0 .25rem rgba(225,83,97,.5)}.btn-check:active+.btn-danger,.btn-check:checked+.btn-danger,.btn-danger.active,.btn-danger:active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#b02a37;border-color:#a52834}.btn-check:active+.btn-danger:focus,.btn-check:checked+.btn-danger:focus,.btn-danger.active:focus,.btn-danger:active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-light{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:focus+.btn-light,.btn-light:focus{color:#000;background-color:#f9fafb;border-color:#f9fafb;box-shadow:0 0 0 .25rem rgba(211,212,213,.5)}.btn-check:active+.btn-light,.btn-check:checked+.btn-light,.btn-light.active,.btn-light:active,.show>.btn-light.dropdown-toggle{color:#000;background-color:#f9fafb;border-color:#f9fafb}.btn-check:active+.btn-light:focus,.btn-check:checked+.btn-light:focus,.btn-light.active:focus,.btn-light:active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(211,212,213,.5)}.btn-light.disabled,.btn-light:disabled{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-dark{color:#fff;background-color:#212529;border-color:#212529}.btn-dark:hover{color:#fff;background-color:#1c1f23;border-color:#1a1e21}.btn-check:focus+.btn-dark,.btn-dark:focus{color:#fff;background-color:#1c1f23;border-color:#1a1e21;box-shadow:0 0 0 .25rem rgba(66,70,73,.5)}.btn-check:active+.btn-dark,.btn-check:checked+.btn-dark,.btn-dark.active,.btn-dark:active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1a1e21;border-color:#191c1f}.btn-check:active+.btn-dark:focus,.btn-check:checked+.btn-dark:focus,.btn-dark.active:focus,.btn-dark:active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .25rem rgba(66,70,73,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#212529;border-color:#212529}.btn-outline-primary{color:#0d6efd;border-color:#0d6efd}.btn-outline-primary:hover{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-check:focus+.btn-outline-primary,.btn-outline-primary:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.5)}.btn-check:active+.btn-outline-primary,.btn-check:checked+.btn-outline-primary,.btn-outline-primary.active,.btn-outline-primary.dropdown-toggle.show,.btn-outline-primary:active{color:#fff;background-color:#0d6efd;border-color:#0d6efd}.btn-check:active+.btn-outline-primary:focus,.btn-check:checked+.btn-outline-primary:focus,.btn-outline-primary.active:focus,.btn-outline-primary.dropdown-toggle.show:focus,.btn-outline-primary:active:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#0d6efd;background-color:transparent}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:focus+.btn-outline-secondary,.btn-outline-secondary:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,.5)}.btn-check:active+.btn-outline-secondary,.btn-check:checked+.btn-outline-secondary,.btn-outline-secondary.active,.btn-outline-secondary.dropdown-toggle.show,.btn-outline-secondary:active{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-check:active+.btn-outline-secondary:focus,.btn-check:checked+.btn-outline-secondary:focus,.btn-outline-secondary.active:focus,.btn-outline-secondary.dropdown-toggle.show:focus,.btn-outline-secondary:active:focus{box-shadow:0 0 0 .25rem rgba(108,117,125,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-success{color:#198754;border-color:#198754}.btn-outline-success:hover{color:#fff;background-color:#198754;border-color:#198754}.btn-check:focus+.btn-outline-success,.btn-outline-success:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.5)}.btn-check:active+.btn-outline-success,.btn-check:checked+.btn-outline-success,.btn-outline-success.active,.btn-outline-success.dropdown-toggle.show,.btn-outline-success:active{color:#fff;background-color:#198754;border-color:#198754}.btn-check:active+.btn-outline-success:focus,.btn-check:checked+.btn-outline-success:focus,.btn-outline-success.active:focus,.btn-outline-success.dropdown-toggle.show:focus,.btn-outline-success:active:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#198754;background-color:transparent}.btn-outline-info{color:#0dcaf0;border-color:#0dcaf0}.btn-outline-info:hover{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:focus+.btn-outline-info,.btn-outline-info:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,.5)}.btn-check:active+.btn-outline-info,.btn-check:checked+.btn-outline-info,.btn-outline-info.active,.btn-outline-info.dropdown-toggle.show,.btn-outline-info:active{color:#000;background-color:#0dcaf0;border-color:#0dcaf0}.btn-check:active+.btn-outline-info:focus,.btn-check:checked+.btn-outline-info:focus,.btn-outline-info.active:focus,.btn-outline-info.dropdown-toggle.show:focus,.btn-outline-info:active:focus{box-shadow:0 0 0 .25rem rgba(13,202,240,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#0dcaf0;background-color:transparent}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:focus+.btn-outline-warning,.btn-outline-warning:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,.5)}.btn-check:active+.btn-outline-warning,.btn-check:checked+.btn-outline-warning,.btn-outline-warning.active,.btn-outline-warning.dropdown-toggle.show,.btn-outline-warning:active{color:#000;background-color:#ffc107;border-color:#ffc107}.btn-check:active+.btn-outline-warning:focus,.btn-check:checked+.btn-outline-warning:focus,.btn-outline-warning.active:focus,.btn-outline-warning.dropdown-toggle.show:focus,.btn-outline-warning:active:focus{box-shadow:0 0 0 .25rem rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:focus+.btn-outline-danger,.btn-outline-danger:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.5)}.btn-check:active+.btn-outline-danger,.btn-check:checked+.btn-outline-danger,.btn-outline-danger.active,.btn-outline-danger.dropdown-toggle.show,.btn-outline-danger:active{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-check:active+.btn-outline-danger:focus,.btn-check:checked+.btn-outline-danger:focus,.btn-outline-danger.active:focus,.btn-outline-danger.dropdown-toggle.show:focus,.btn-outline-danger:active:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:focus+.btn-outline-light,.btn-outline-light:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,.5)}.btn-check:active+.btn-outline-light,.btn-check:checked+.btn-outline-light,.btn-outline-light.active,.btn-outline-light.dropdown-toggle.show,.btn-outline-light:active{color:#000;background-color:#f8f9fa;border-color:#f8f9fa}.btn-check:active+.btn-outline-light:focus,.btn-check:checked+.btn-outline-light:focus,.btn-outline-light.active:focus,.btn-outline-light.dropdown-toggle.show:focus,.btn-outline-light:active:focus{box-shadow:0 0 0 .25rem rgba(248,249,250,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-dark{color:#212529;border-color:#212529}.btn-outline-dark:hover{color:#fff;background-color:#212529;border-color:#212529}.btn-check:focus+.btn-outline-dark,.btn-outline-dark:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,.5)}.btn-check:active+.btn-outline-dark,.btn-check:checked+.btn-outline-dark,.btn-outline-dark.active,.btn-outline-dark.dropdown-toggle.show,.btn-outline-dark:active{color:#fff;background-color:#212529;border-color:#212529}.btn-check:active+.btn-outline-dark:focus,.btn-check:checked+.btn-outline-dark:focus,.btn-outline-dark.active:focus,.btn-outline-dark.dropdown-toggle.show:focus,.btn-outline-dark:active:focus{box-shadow:0 0 0 .25rem rgba(33,37,41,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#212529;background-color:transparent}.btn-link{font-weight:400;color:#0d6efd;text-decoration:underline}.btn-link:hover{color:#0a58ca}.btn-link.disabled,.btn-link:disabled{color:#6c757d}.btn-group-lg>.btn,.btn-lg{padding:.5rem 1rem;font-size:1.25rem;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;border-radius:.2rem}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .35s ease}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.collapsing.collapse-horizontal{width:0;height:auto;transition:width .35s ease}@media (prefers-reduced-motion:reduce){.collapsing.collapse-horizontal{transition:none}}.dropdown,.dropend,.dropstart,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;z-index:1000;display:none;min-width:10rem;padding:.5rem 0;margin:0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:.125rem}.dropdown-menu-start{--bs-position:start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position:end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media (min-width:576px){.dropdown-menu-sm-start{--bs-position:start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position:end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media (min-width:768px){.dropdown-menu-md-start{--bs-position:start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position:end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media (min-width:992px){.dropdown-menu-lg-start{--bs-position:start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position:end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1200px){.dropdown-menu-xl-start{--bs-position:start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position:end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1400px){.dropdown-menu-xxl-start{--bs-position:start}.dropdown-menu-xxl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xxl-end{--bs-position:end}.dropdown-menu-xxl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropend .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropend .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-toggle::after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropstart .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropstart .dropdown-toggle::after{display:none}.dropstart .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropstart .dropdown-toggle:empty::after{margin-left:0}.dropstart .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid rgba(0,0,0,.15)}.dropdown-item{display:block;width:100%;padding:.25rem 1rem;clear:both;font-weight:400;color:#212529;text-align:inherit;text-decoration:none;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#1e2125;background-color:#e9ecef}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#0d6efd}.dropdown-item.disabled,.dropdown-item:disabled{color:#adb5bd;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1rem;color:#212529}.dropdown-menu-dark{color:#dee2e6;background-color:#343a40;border-color:rgba(0,0,0,.15)}.dropdown-menu-dark .dropdown-item{color:#dee2e6}.dropdown-menu-dark .dropdown-item:focus,.dropdown-menu-dark .dropdown-item:hover{color:#fff;background-color:rgba(255,255,255,.15)}.dropdown-menu-dark .dropdown-item.active,.dropdown-menu-dark .dropdown-item:active{color:#fff;background-color:#0d6efd}.dropdown-menu-dark .dropdown-item.disabled,.dropdown-menu-dark .dropdown-item:disabled{color:#adb5bd}.dropdown-menu-dark .dropdown-divider{border-color:rgba(0,0,0,.15)}.dropdown-menu-dark .dropdown-item-text{color:#dee2e6}.dropdown-menu-dark .dropdown-header{color:#adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:nth-child(n+3),.btn-group>:not(.btn-check)+.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropend .dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after{margin-left:0}.dropstart .dropdown-toggle-split::before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn~.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem;color:#0d6efd;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media (prefers-reduced-motion:reduce){.nav-link{transition:none}}.nav-link:focus,.nav-link:hover{color:#0a58ca}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-link{margin-bottom:-1px;background:0 0;border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:#e9ecef #e9ecef #dee2e6;isolation:isolate}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{background:0 0;border:0;border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#0d6efd}.nav-fill .nav-item,.nav-fill>.nav-link{flex:1 1 auto;text-align:center}.nav-justified .nav-item,.nav-justified>.nav-link{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding-top:.5rem;padding-bottom:.5rem}.navbar>.container,.navbar>.container-fluid,.navbar>.container-lg,.navbar>.container-md,.navbar>.container-sm,.navbar>.container-xl,.navbar>.container-xxl{display:flex;flex-wrap:inherit;align-items:center;justify-content:space-between}.navbar-brand{padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;text-decoration:none;white-space:nowrap}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem;transition:box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 .25rem}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-repeat:no-repeat;background-position:center;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height,75vh);overflow-y:auto}@media (min-width:576px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .offcanvas-header{display:none}.navbar-expand-sm .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-sm .offcanvas-bottom,.navbar-expand-sm .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-sm .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:768px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .offcanvas-header{display:none}.navbar-expand-md .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-md .offcanvas-bottom,.navbar-expand-md .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-md .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:992px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .offcanvas-header{display:none}.navbar-expand-lg .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-lg .offcanvas-bottom,.navbar-expand-lg .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-lg .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .offcanvas-header{display:none}.navbar-expand-xl .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-xl .offcanvas-bottom,.navbar-expand-xl .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-xl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:1400px){.navbar-expand-xxl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xxl .navbar-nav{flex-direction:row}.navbar-expand-xxl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xxl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xxl .navbar-nav-scroll{overflow:visible}.navbar-expand-xxl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xxl .navbar-toggler{display:none}.navbar-expand-xxl .offcanvas-header{display:none}.navbar-expand-xxl .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand-xxl .offcanvas-bottom,.navbar-expand-xxl .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand-xxl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-expand .offcanvas-header{display:none}.navbar-expand .offcanvas{position:inherit;bottom:0;z-index:1000;flex-grow:1;visibility:visible!important;background-color:transparent;border-right:0;border-left:0;transition:none;transform:none}.navbar-expand .offcanvas-bottom,.navbar-expand .offcanvas-top{height:auto;border-top:0;border-bottom:0}.navbar-expand .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}.navbar-light .navbar-brand{color:rgba(0,0,0,.9)}.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(0,0,0,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,.55)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(0,0,0,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,.3)}.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .show>.nav-link{color:rgba(0,0,0,.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,.55);border-color:rgba(0,0,0,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%280, 0, 0, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-light .navbar-text{color:rgba(0,0,0,.55)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(0,0,0,.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,.55)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:rgba(255,255,255,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,.25)}.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,.55);border-color:rgba(255,255,255,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-dark .navbar-text{color:rgba(255,255,255,.55)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;padding:1rem 1rem}.card-title{margin-bottom:.5rem}.card-subtitle{margin-top:-.25rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link+.card-link{margin-left:1rem}.card-header{padding:.5rem 1rem;margin-bottom:0;background-color:rgba(0,0,0,.03);border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-footer{padding:.5rem 1rem;background-color:rgba(0,0,0,.03);border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.5rem;margin-bottom:-.5rem;margin-left:-.5rem;border-bottom:0}.card-header-pills{margin-right:-.5rem;margin-left:-.5rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom,.card-img-top{width:100%}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-group>.card{margin-bottom:.75rem}@media (min-width:576px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.accordion-button{position:relative;display:flex;align-items:center;width:100%;padding:1rem 1.25rem;font-size:1rem;color:#212529;text-align:left;background-color:#fff;border:0;border-radius:0;overflow-anchor:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out,border-radius .15s ease}@media (prefers-reduced-motion:reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:#0c63e4;background-color:#e7f1ff;box-shadow:inset 0 -1px 0 rgba(0,0,0,.125)}.accordion-button:not(.collapsed)::after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%230c63e4'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");transform:rotate(-180deg)}.accordion-button::after{flex-shrink:0;width:1.25rem;height:1.25rem;margin-left:auto;content:"";background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-size:1.25rem;transition:transform .2s ease-in-out}@media (prefers-reduced-motion:reduce){.accordion-button::after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.accordion-header{margin-bottom:0}.accordion-item{background-color:#fff;border:1px solid rgba(0,0,0,.125)}.accordion-item:first-of-type{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.accordion-item:first-of-type .accordion-button{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-item:last-of-type .accordion-button.collapsed{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.accordion-item:last-of-type .accordion-collapse{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.accordion-body{padding:1rem 1.25rem}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-right:0;border-left:0;border-radius:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}.accordion-flush .accordion-item .accordion-button{border-radius:0}.breadcrumb{display:flex;flex-wrap:wrap;padding:0 0;margin-bottom:1rem;list-style:none}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item::before{float:left;padding-right:.5rem;color:#6c757d;content:var(--bs-breadcrumb-divider, "/")}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;color:#0d6efd;text-decoration:none;background-color:#fff;border:1px solid #dee2e6;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:#0a58ca;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;color:#0a58ca;background-color:#e9ecef;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.page-item:not(:first-child) .page-link{margin-left:-1px}.page-item.active .page-link{z-index:3;color:#fff;background-color:#0d6efd;border-color:#0d6efd}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;background-color:#fff;border-color:#dee2e6}.page-link{padding:.375rem .75rem}.page-item:first-child .page-link{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{position:relative;padding:1rem 1rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-primary{color:#084298;background-color:#cfe2ff;border-color:#b6d4fe}.alert-primary .alert-link{color:#06357a}.alert-secondary{color:#41464b;background-color:#e2e3e5;border-color:#d3d6d8}.alert-secondary .alert-link{color:#34383c}.alert-success{color:#0f5132;background-color:#d1e7dd;border-color:#badbcc}.alert-success .alert-link{color:#0c4128}.alert-info{color:#055160;background-color:#cff4fc;border-color:#b6effb}.alert-info .alert-link{color:#04414d}.alert-warning{color:#664d03;background-color:#fff3cd;border-color:#ffecb5}.alert-warning .alert-link{color:#523e02}.alert-danger{color:#842029;background-color:#f8d7da;border-color:#f5c2c7}.alert-danger .alert-link{color:#6a1a21}.alert-light{color:#636464;background-color:#fefefe;border-color:#fdfdfe}.alert-light .alert-link{color:#4f5050}.alert-dark{color:#141619;background-color:#d3d3d4;border-color:#bcbebf}.alert-dark .alert-link{color:#101214}@-webkit-keyframes progress-bar-stripes{0%{background-position-x:1rem}}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress{display:flex;height:1rem;overflow:hidden;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress-bar{display:flex;flex-direction:column;justify-content:center;overflow:hidden;color:#fff;text-align:center;white-space:nowrap;background-color:#0d6efd;transition:width .6s ease}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:1s linear infinite progress-bar-stripes;animation:1s linear infinite progress-bar-stripes}@media (prefers-reduced-motion:reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>li::before{content:counters(section, ".") ". ";counter-increment:section}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.5rem 1rem;color:#212529;text-decoration:none;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#0d6efd;border-color:#0d6efd}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media (min-width:576px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:992px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1400px){.list-group-horizontal-xxl{flex-direction:row}.list-group-horizontal-xxl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xxl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xxl>.list-group-item.active{margin-top:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 1px}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#084298;background-color:#cfe2ff}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#084298;background-color:#bacbe6}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#084298;border-color:#084298}.list-group-item-secondary{color:#41464b;background-color:#e2e3e5}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#41464b;background-color:#cbccce}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#41464b;border-color:#41464b}.list-group-item-success{color:#0f5132;background-color:#d1e7dd}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#0f5132;background-color:#bcd0c7}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#0f5132;border-color:#0f5132}.list-group-item-info{color:#055160;background-color:#cff4fc}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#055160;background-color:#badce3}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#055160;border-color:#055160}.list-group-item-warning{color:#664d03;background-color:#fff3cd}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#664d03;background-color:#e6dbb9}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#664d03;border-color:#664d03}.list-group-item-danger{color:#842029;background-color:#f8d7da}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#842029;background-color:#dfc2c4}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#842029;border-color:#842029}.list-group-item-light{color:#636464;background-color:#fefefe}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#636464;background-color:#e5e5e5}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#636464;border-color:#636464}.list-group-item-dark{color:#141619;background-color:#d3d3d4}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#141619;background-color:#bebebf}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#141619;border-color:#141619}.btn-close{box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:#000;background:transparent url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 011.414 0L8 6.586 14.293.293a1 1 0 111.414 1.414L9.414 8l6.293 6.293a1 1 0 01-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 01-1.414-1.414L6.586 8 .293 1.707a1 1 0 010-1.414z'/%3e%3c/svg%3e") center/1em auto no-repeat;border:0;border-radius:.25rem;opacity:.5}.btn-close:hover{color:#000;text-decoration:none;opacity:.75}.btn-close:focus{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25);opacity:1}.btn-close.disabled,.btn-close:disabled{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;opacity:.25}.btn-close-white{filter:invert(1) grayscale(100%) brightness(200%)}.toast{width:350px;max-width:100%;font-size:.875rem;pointer-events:auto;background-color:rgba(255,255,255,.85);background-clip:padding-box;border:1px solid rgba(0,0,0,.1);box-shadow:0 .5rem 1rem rgba(0,0,0,.15);border-radius:.25rem}.toast.showing{opacity:0}.toast:not(.show){display:none}.toast-container{width:-webkit-max-content;width:-moz-max-content;width:max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:.75rem}.toast-header{display:flex;align-items:center;padding:.5rem .75rem;color:#6c757d;background-color:rgba(255,255,255,.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,.05);border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.toast-header .btn-close{margin-right:-.375rem;margin-left:.75rem}.toast-body{padding:.75rem;word-wrap:break-word}.modal{position:fixed;top:0;left:0;z-index:1055;display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translate(0,-50px)}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1050;width:100vw;height:100vh;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;flex-shrink:0;align-items:center;justify-content:space-between;padding:1rem 1rem;border-bottom:1px solid #dee2e6;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.modal-header .btn-close{padding:.5rem .5rem;margin:-.5rem -.5rem -.5rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;flex-shrink:0;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #dee2e6;border-bottom-right-radius:calc(.3rem - 1px);border-bottom-left-radius:calc(.3rem - 1px)}.modal-footer>*{margin:.25rem}@media (min-width:576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{height:calc(100% - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-sm{max-width:300px}}@media (min-width:992px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width:1200px){.modal-xl{max-width:1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen .modal-header{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}.modal-fullscreen .modal-footer{border-radius:0}@media (max-width:575.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-sm-down .modal-header{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}.modal-fullscreen-sm-down .modal-footer{border-radius:0}}@media (max-width:767.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-md-down .modal-header{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}.modal-fullscreen-md-down .modal-footer{border-radius:0}}@media (max-width:991.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-lg-down .modal-header{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}.modal-fullscreen-lg-down .modal-footer{border-radius:0}}@media (max-width:1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xl-down .modal-header{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}.modal-fullscreen-xl-down .modal-footer{border-radius:0}}@media (max-width:1399.98px){.modal-fullscreen-xxl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xxl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xxl-down .modal-header{border-radius:0}.modal-fullscreen-xxl-down .modal-body{overflow-y:auto}.modal-fullscreen-xxl-down .modal-footer{border-radius:0}}.tooltip{position:absolute;z-index:1080;display:block;margin:0;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .tooltip-arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .tooltip-arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[data-popper-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow,.bs-tooltip-top .tooltip-arrow{bottom:0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow::before,.bs-tooltip-top .tooltip-arrow::before{top:-1px;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-auto[data-popper-placement^=right],.bs-tooltip-end{padding:0 .4rem}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow,.bs-tooltip-end .tooltip-arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow::before,.bs-tooltip-end .tooltip-arrow::before{right:-1px;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-auto[data-popper-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow,.bs-tooltip-bottom .tooltip-arrow{top:0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow::before,.bs-tooltip-bottom .tooltip-arrow::before{bottom:-1px;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-auto[data-popper-placement^=left],.bs-tooltip-start{padding:0 .4rem}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow,.bs-tooltip-start .tooltip-arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow::before,.bs-tooltip-start .tooltip-arrow::before{left:-1px;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0;z-index:1070;display:block;max-width:276px;font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem}.popover .popover-arrow{position:absolute;display:block;width:1rem;height:.5rem}.popover .popover-arrow::after,.popover .popover-arrow::before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow,.bs-popover-top>.popover-arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before,.bs-popover-top>.popover-arrow::before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after,.bs-popover-top>.popover-arrow::after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow,.bs-popover-end>.popover-arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before,.bs-popover-end>.popover-arrow::before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after,.bs-popover-end>.popover-arrow::after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow,.bs-popover-bottom>.popover-arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before,.bs-popover-bottom>.popover-arrow::before{top:0;border-width:0 .5rem .5rem .5rem;border-bottom-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after,.bs-popover-bottom>.popover-arrow::after{top:1px;border-width:0 .5rem .5rem .5rem;border-bottom-color:#fff}.bs-popover-auto[data-popper-placement^=bottom] .popover-header::before,.bs-popover-bottom .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f0f0f0}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow,.bs-popover-start>.popover-arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before,.bs-popover-start>.popover-arrow::before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(0,0,0,.25)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after,.bs-popover-start>.popover-arrow::after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem 1rem;margin-bottom:0;font-size:1rem;background-color:#f0f0f0;border-bottom:1px solid rgba(0,0,0,.2);border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:1rem 1rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-end,.carousel-item-next:not(.carousel-item-start){transform:translateX(100%)}.active.carousel-item-start,.carousel-item-prev:not(.carousel-item-end){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:0 0;border:0;opacity:.5;transition:opacity .15s ease}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%;list-style:none}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media (prefers-reduced-motion:reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-next-icon,.carousel-dark .carousel-control-prev-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}@-webkit-keyframes spinner-border{to{transform:rotate(360deg)}}@keyframes spinner-border{to{transform:rotate(360deg)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;border:.25em solid currentColor;border-right-color:transparent;border-radius:50%;-webkit-animation:.75s linear infinite spinner-border;animation:.75s linear infinite spinner-border}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:-.125em;background-color:currentColor;border-radius:50%;opacity:0;-webkit-animation:.75s linear infinite spinner-grow;animation:.75s linear infinite spinner-grow}.spinner-grow-sm{width:1rem;height:1rem}@media (prefers-reduced-motion:reduce){.spinner-border,.spinner-grow{-webkit-animation-duration:1.5s;animation-duration:1.5s}}.offcanvas{position:fixed;bottom:0;z-index:1045;display:flex;flex-direction:column;max-width:100%;visibility:hidden;background-color:#fff;background-clip:padding-box;outline:0;transition:transform .3s ease-in-out}@media (prefers-reduced-motion:reduce){.offcanvas{transition:none}}.offcanvas-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.offcanvas-backdrop.fade{opacity:0}.offcanvas-backdrop.show{opacity:.5}.offcanvas-header{display:flex;align-items:center;justify-content:space-between;padding:1rem 1rem}.offcanvas-header .btn-close{padding:.5rem .5rem;margin-top:-.5rem;margin-right:-.5rem;margin-bottom:-.5rem}.offcanvas-title{margin-bottom:0;line-height:1.5}.offcanvas-body{flex-grow:1;padding:1rem 1rem;overflow-y:auto}.offcanvas-start{top:0;left:0;width:400px;border-right:1px solid rgba(0,0,0,.2);transform:translateX(-100%)}.offcanvas-end{top:0;right:0;width:400px;border-left:1px solid rgba(0,0,0,.2);transform:translateX(100%)}.offcanvas-top{top:0;right:0;left:0;height:30vh;max-height:100%;border-bottom:1px solid rgba(0,0,0,.2);transform:translateY(-100%)}.offcanvas-bottom{right:0;left:0;height:30vh;max-height:100%;border-top:1px solid rgba(0,0,0,.2);transform:translateY(100%)}.offcanvas.show{transform:none}.placeholder{display:inline-block;min-height:1em;vertical-align:middle;cursor:wait;background-color:currentColor;opacity:.5}.placeholder.btn::before{display:inline-block;content:""}.placeholder-xs{min-height:.6em}.placeholder-sm{min-height:.8em}.placeholder-lg{min-height:1.2em}.placeholder-glow .placeholder{-webkit-animation:placeholder-glow 2s ease-in-out infinite;animation:placeholder-glow 2s ease-in-out infinite}@-webkit-keyframes placeholder-glow{50%{opacity:.2}}@keyframes placeholder-glow{50%{opacity:.2}}.placeholder-wave{-webkit-mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,0.8) 75%,#000 95%);mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,0.8) 75%,#000 95%);-webkit-mask-size:200% 100%;mask-size:200% 100%;-webkit-animation:placeholder-wave 2s linear infinite;animation:placeholder-wave 2s linear infinite}@-webkit-keyframes placeholder-wave{100%{-webkit-mask-position:-200% 0%;mask-position:-200% 0%}}@keyframes placeholder-wave{100%{-webkit-mask-position:-200% 0%;mask-position:-200% 0%}}.clearfix::after{display:block;clear:both;content:""}.link-primary{color:#0d6efd}.link-primary:focus,.link-primary:hover{color:#0a58ca}.link-secondary{color:#6c757d}.link-secondary:focus,.link-secondary:hover{color:#565e64}.link-success{color:#198754}.link-success:focus,.link-success:hover{color:#146c43}.link-info{color:#0dcaf0}.link-info:focus,.link-info:hover{color:#3dd5f3}.link-warning{color:#ffc107}.link-warning:focus,.link-warning:hover{color:#ffcd39}.link-danger{color:#dc3545}.link-danger:focus,.link-danger:hover{color:#b02a37}.link-light{color:#f8f9fa}.link-light:focus,.link-light:hover{color:#f9fafb}.link-dark{color:#212529}.link-dark:focus,.link-dark:hover{color:#1a1e21}.ratio{position:relative;width:100%}.ratio::before{display:block;padding-top:var(--bs-aspect-ratio);content:""}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio:100%}.ratio-4x3{--bs-aspect-ratio:75%}.ratio-16x9{--bs-aspect-ratio:56.25%}.ratio-21x9{--bs-aspect-ratio:42.8571428571%}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}@media (min-width:576px){.sticky-sm-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:768px){.sticky-md-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:992px){.sticky-lg-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:1200px){.sticky-xl-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}@media (min-width:1400px){.sticky-xxl-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.hstack{display:flex;flex-direction:row;align-items:center;align-self:stretch}.vstack{display:flex;flex:1 1 auto;flex-direction:column;align-self:stretch}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){position:absolute!important;width:1px!important;height:1px!important;padding:0!important;margin:-1px!important;overflow:hidden!important;clip:rect(0,0,0,0)!important;white-space:nowrap!important;border:0!important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.vr{display:inline-block;align-self:stretch;width:1px;min-height:1em;background-color:currentColor;opacity:.25}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.float-start{float:left!important}.float-end{float:right!important}.float-none{float:none!important}.opacity-0{opacity:0!important}.opacity-25{opacity:.25!important}.opacity-50{opacity:.5!important}.opacity-75{opacity:.75!important}.opacity-100{opacity:1!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.overflow-visible{overflow:visible!important}.overflow-scroll{overflow:scroll!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-grid{display:grid!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}.d-none{display:none!important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15)!important}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075)!important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175)!important}.shadow-none{box-shadow:none!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:-webkit-sticky!important;position:sticky!important}.top-0{top:0!important}.top-50{top:50%!important}.top-100{top:100%!important}.bottom-0{bottom:0!important}.bottom-50{bottom:50%!important}.bottom-100{bottom:100%!important}.start-0{left:0!important}.start-50{left:50%!important}.start-100{left:100%!important}.end-0{right:0!important}.end-50{right:50%!important}.end-100{right:100%!important}.translate-middle{transform:translate(-50%,-50%)!important}.translate-middle-x{transform:translateX(-50%)!important}.translate-middle-y{transform:translateY(-50%)!important}.border{border:1px solid #dee2e6!important}.border-0{border:0!important}.border-top{border-top:1px solid #dee2e6!important}.border-top-0{border-top:0!important}.border-end{border-right:1px solid #dee2e6!important}.border-end-0{border-right:0!important}.border-bottom{border-bottom:1px solid #dee2e6!important}.border-bottom-0{border-bottom:0!important}.border-start{border-left:1px solid #dee2e6!important}.border-start-0{border-left:0!important}.border-primary{border-color:#0d6efd!important}.border-secondary{border-color:#6c757d!important}.border-success{border-color:#198754!important}.border-info{border-color:#0dcaf0!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f8f9fa!important}.border-dark{border-color:#212529!important}.border-white{border-color:#fff!important}.border-1{border-width:1px!important}.border-2{border-width:2px!important}.border-3{border-width:3px!important}.border-4{border-width:4px!important}.border-5{border-width:5px!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.mw-100{max-width:100%!important}.vw-100{width:100vw!important}.min-vw-100{min-width:100vw!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mh-100{max-height:100%!important}.vh-100{height:100vh!important}.min-vh-100{min-height:100vh!important}.flex-fill{flex:1 1 auto!important}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-0{gap:0!important}.gap-1{gap:.25rem!important}.gap-2{gap:.5rem!important}.gap-3{gap:1rem!important}.gap-4{gap:1.5rem!important}.gap-5{gap:3rem!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.justify-content-evenly{justify-content:space-evenly!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}.order-first{order:-1!important}.order-0{order:0!important}.order-1{order:1!important}.order-2{order:2!important}.order-3{order:3!important}.order-4{order:4!important}.order-5{order:5!important}.order-last{order:6!important}.m-0{margin:0!important}.m-1{margin:.25rem!important}.m-2{margin:.5rem!important}.m-3{margin:1rem!important}.m-4{margin:1.5rem!important}.m-5{margin:3rem!important}.m-auto{margin:auto!important}.mx-0{margin-right:0!important;margin-left:0!important}.mx-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-3{margin-right:1rem!important;margin-left:1rem!important}.mx-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-5{margin-right:3rem!important;margin-left:3rem!important}.mx-auto{margin-right:auto!important;margin-left:auto!important}.my-0{margin-top:0!important;margin-bottom:0!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-0{margin-top:0!important}.mt-1{margin-top:.25rem!important}.mt-2{margin-top:.5rem!important}.mt-3{margin-top:1rem!important}.mt-4{margin-top:1.5rem!important}.mt-5{margin-top:3rem!important}.mt-auto{margin-top:auto!important}.me-0{margin-right:0!important}.me-1{margin-right:.25rem!important}.me-2{margin-right:.5rem!important}.me-3{margin-right:1rem!important}.me-4{margin-right:1.5rem!important}.me-5{margin-right:3rem!important}.me-auto{margin-right:auto!important}.mb-0{margin-bottom:0!important}.mb-1{margin-bottom:.25rem!important}.mb-2{margin-bottom:.5rem!important}.mb-3{margin-bottom:1rem!important}.mb-4{margin-bottom:1.5rem!important}.mb-5{margin-bottom:3rem!important}.mb-auto{margin-bottom:auto!important}.ms-0{margin-left:0!important}.ms-1{margin-left:.25rem!important}.ms-2{margin-left:.5rem!important}.ms-3{margin-left:1rem!important}.ms-4{margin-left:1.5rem!important}.ms-5{margin-left:3rem!important}.ms-auto{margin-left:auto!important}.p-0{padding:0!important}.p-1{padding:.25rem!important}.p-2{padding:.5rem!important}.p-3{padding:1rem!important}.p-4{padding:1.5rem!important}.p-5{padding:3rem!important}.px-0{padding-right:0!important;padding-left:0!important}.px-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-3{padding-right:1rem!important;padding-left:1rem!important}.px-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-5{padding-right:3rem!important;padding-left:3rem!important}.py-0{padding-top:0!important;padding-bottom:0!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-0{padding-top:0!important}.pt-1{padding-top:.25rem!important}.pt-2{padding-top:.5rem!important}.pt-3{padding-top:1rem!important}.pt-4{padding-top:1.5rem!important}.pt-5{padding-top:3rem!important}.pe-0{padding-right:0!important}.pe-1{padding-right:.25rem!important}.pe-2{padding-right:.5rem!important}.pe-3{padding-right:1rem!important}.pe-4{padding-right:1.5rem!important}.pe-5{padding-right:3rem!important}.pb-0{padding-bottom:0!important}.pb-1{padding-bottom:.25rem!important}.pb-2{padding-bottom:.5rem!important}.pb-3{padding-bottom:1rem!important}.pb-4{padding-bottom:1.5rem!important}.pb-5{padding-bottom:3rem!important}.ps-0{padding-left:0!important}.ps-1{padding-left:.25rem!important}.ps-2{padding-left:.5rem!important}.ps-3{padding-left:1rem!important}.ps-4{padding-left:1.5rem!important}.ps-5{padding-left:3rem!important}.font-monospace{font-family:var(--bs-font-monospace)!important}.fs-1{font-size:calc(1.375rem + 1.5vw)!important}.fs-2{font-size:calc(1.325rem + .9vw)!important}.fs-3{font-size:calc(1.3rem + .6vw)!important}.fs-4{font-size:calc(1.275rem + .3vw)!important}.fs-5{font-size:1.25rem!important}.fs-6{font-size:1rem!important}.fst-italic{font-style:italic!important}.fst-normal{font-style:normal!important}.fw-light{font-weight:300!important}.fw-lighter{font-weight:lighter!important}.fw-normal{font-weight:400!important}.fw-bold{font-weight:700!important}.fw-bolder{font-weight:bolder!important}.lh-1{line-height:1!important}.lh-sm{line-height:1.25!important}.lh-base{line-height:1.5!important}.lh-lg{line-height:2!important}.text-start{text-align:left!important}.text-end{text-align:right!important}.text-center{text-align:center!important}.text-decoration-none{text-decoration:none!important}.text-decoration-underline{text-decoration:underline!important}.text-decoration-line-through{text-decoration:line-through!important}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-break{word-wrap:break-word!important;word-break:break-word!important}.text-primary{--bs-text-opacity:1;color:rgba(var(--bs-primary-rgb),var(--bs-text-opacity))!important}.text-secondary{--bs-text-opacity:1;color:rgba(var(--bs-secondary-rgb),var(--bs-text-opacity))!important}.text-success{--bs-text-opacity:1;color:rgba(var(--bs-success-rgb),var(--bs-text-opacity))!important}.text-info{--bs-text-opacity:1;color:rgba(var(--bs-info-rgb),var(--bs-text-opacity))!important}.text-warning{--bs-text-opacity:1;color:rgba(var(--bs-warning-rgb),var(--bs-text-opacity))!important}.text-danger{--bs-text-opacity:1;color:rgba(var(--bs-danger-rgb),var(--bs-text-opacity))!important}.text-light{--bs-text-opacity:1;color:rgba(var(--bs-light-rgb),var(--bs-text-opacity))!important}.text-dark{--bs-text-opacity:1;color:rgba(var(--bs-dark-rgb),var(--bs-text-opacity))!important}.text-black{--bs-text-opacity:1;color:rgba(var(--bs-black-rgb),var(--bs-text-opacity))!important}.text-white{--bs-text-opacity:1;color:rgba(var(--bs-white-rgb),var(--bs-text-opacity))!important}.text-body{--bs-text-opacity:1;color:rgba(var(--bs-body-color-rgb),var(--bs-text-opacity))!important}.text-muted{--bs-text-opacity:1;color:#6c757d!important}.text-black-50{--bs-text-opacity:1;color:rgba(0,0,0,.5)!important}.text-white-50{--bs-text-opacity:1;color:rgba(255,255,255,.5)!important}.text-reset{--bs-text-opacity:1;color:inherit!important}.text-opacity-25{--bs-text-opacity:0.25}.text-opacity-50{--bs-text-opacity:0.5}.text-opacity-75{--bs-text-opacity:0.75}.text-opacity-100{--bs-text-opacity:1}.bg-primary{--bs-bg-opacity:1;background-color:rgba(var(--bs-primary-rgb),var(--bs-bg-opacity))!important}.bg-secondary{--bs-bg-opacity:1;background-color:rgba(var(--bs-secondary-rgb),var(--bs-bg-opacity))!important}.bg-success{--bs-bg-opacity:1;background-color:rgba(var(--bs-success-rgb),var(--bs-bg-opacity))!important}.bg-info{--bs-bg-opacity:1;background-color:rgba(var(--bs-info-rgb),var(--bs-bg-opacity))!important}.bg-warning{--bs-bg-opacity:1;background-color:rgba(var(--bs-warning-rgb),var(--bs-bg-opacity))!important}.bg-danger{--bs-bg-opacity:1;background-color:rgba(var(--bs-danger-rgb),var(--bs-bg-opacity))!important}.bg-light{--bs-bg-opacity:1;background-color:rgba(var(--bs-light-rgb),var(--bs-bg-opacity))!important}.bg-dark{--bs-bg-opacity:1;background-color:rgba(var(--bs-dark-rgb),var(--bs-bg-opacity))!important}.bg-black{--bs-bg-opacity:1;background-color:rgba(var(--bs-black-rgb),var(--bs-bg-opacity))!important}.bg-white{--bs-bg-opacity:1;background-color:rgba(var(--bs-white-rgb),var(--bs-bg-opacity))!important}.bg-body{--bs-bg-opacity:1;background-color:rgba(var(--bs-body-bg-rgb),var(--bs-bg-opacity))!important}.bg-transparent{--bs-bg-opacity:1;background-color:transparent!important}.bg-opacity-10{--bs-bg-opacity:0.1}.bg-opacity-25{--bs-bg-opacity:0.25}.bg-opacity-50{--bs-bg-opacity:0.5}.bg-opacity-75{--bs-bg-opacity:0.75}.bg-opacity-100{--bs-bg-opacity:1}.bg-gradient{background-image:var(--bs-gradient)!important}.user-select-all{-webkit-user-select:all!important;-moz-user-select:all!important;user-select:all!important}.user-select-auto{-webkit-user-select:auto!important;-moz-user-select:auto!important;user-select:auto!important}.user-select-none{-webkit-user-select:none!important;-moz-user-select:none!important;user-select:none!important}.pe-none{pointer-events:none!important}.pe-auto{pointer-events:auto!important}.rounded{border-radius:.25rem!important}.rounded-0{border-radius:0!important}.rounded-1{border-radius:.2rem!important}.rounded-2{border-radius:.25rem!important}.rounded-3{border-radius:.3rem!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-top{border-top-left-radius:.25rem!important;border-top-right-radius:.25rem!important}.rounded-end{border-top-right-radius:.25rem!important;border-bottom-right-radius:.25rem!important}.rounded-bottom{border-bottom-right-radius:.25rem!important;border-bottom-left-radius:.25rem!important}.rounded-start{border-bottom-left-radius:.25rem!important;border-top-left-radius:.25rem!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media (min-width:576px){.float-sm-start{float:left!important}.float-sm-end{float:right!important}.float-sm-none{float:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-grid{display:grid!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}.d-sm-none{display:none!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-sm-0{gap:0!important}.gap-sm-1{gap:.25rem!important}.gap-sm-2{gap:.5rem!important}.gap-sm-3{gap:1rem!important}.gap-sm-4{gap:1.5rem!important}.gap-sm-5{gap:3rem!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.justify-content-sm-evenly{justify-content:space-evenly!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}.order-sm-first{order:-1!important}.order-sm-0{order:0!important}.order-sm-1{order:1!important}.order-sm-2{order:2!important}.order-sm-3{order:3!important}.order-sm-4{order:4!important}.order-sm-5{order:5!important}.order-sm-last{order:6!important}.m-sm-0{margin:0!important}.m-sm-1{margin:.25rem!important}.m-sm-2{margin:.5rem!important}.m-sm-3{margin:1rem!important}.m-sm-4{margin:1.5rem!important}.m-sm-5{margin:3rem!important}.m-sm-auto{margin:auto!important}.mx-sm-0{margin-right:0!important;margin-left:0!important}.mx-sm-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-sm-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-sm-3{margin-right:1rem!important;margin-left:1rem!important}.mx-sm-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-sm-5{margin-right:3rem!important;margin-left:3rem!important}.mx-sm-auto{margin-right:auto!important;margin-left:auto!important}.my-sm-0{margin-top:0!important;margin-bottom:0!important}.my-sm-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-sm-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-sm-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-sm-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-sm-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-sm-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-sm-0{margin-top:0!important}.mt-sm-1{margin-top:.25rem!important}.mt-sm-2{margin-top:.5rem!important}.mt-sm-3{margin-top:1rem!important}.mt-sm-4{margin-top:1.5rem!important}.mt-sm-5{margin-top:3rem!important}.mt-sm-auto{margin-top:auto!important}.me-sm-0{margin-right:0!important}.me-sm-1{margin-right:.25rem!important}.me-sm-2{margin-right:.5rem!important}.me-sm-3{margin-right:1rem!important}.me-sm-4{margin-right:1.5rem!important}.me-sm-5{margin-right:3rem!important}.me-sm-auto{margin-right:auto!important}.mb-sm-0{margin-bottom:0!important}.mb-sm-1{margin-bottom:.25rem!important}.mb-sm-2{margin-bottom:.5rem!important}.mb-sm-3{margin-bottom:1rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.mb-sm-5{margin-bottom:3rem!important}.mb-sm-auto{margin-bottom:auto!important}.ms-sm-0{margin-left:0!important}.ms-sm-1{margin-left:.25rem!important}.ms-sm-2{margin-left:.5rem!important}.ms-sm-3{margin-left:1rem!important}.ms-sm-4{margin-left:1.5rem!important}.ms-sm-5{margin-left:3rem!important}.ms-sm-auto{margin-left:auto!important}.p-sm-0{padding:0!important}.p-sm-1{padding:.25rem!important}.p-sm-2{padding:.5rem!important}.p-sm-3{padding:1rem!important}.p-sm-4{padding:1.5rem!important}.p-sm-5{padding:3rem!important}.px-sm-0{padding-right:0!important;padding-left:0!important}.px-sm-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-sm-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-sm-3{padding-right:1rem!important;padding-left:1rem!important}.px-sm-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-sm-5{padding-right:3rem!important;padding-left:3rem!important}.py-sm-0{padding-top:0!important;padding-bottom:0!important}.py-sm-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-sm-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-sm-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-sm-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-sm-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-sm-0{padding-top:0!important}.pt-sm-1{padding-top:.25rem!important}.pt-sm-2{padding-top:.5rem!important}.pt-sm-3{padding-top:1rem!important}.pt-sm-4{padding-top:1.5rem!important}.pt-sm-5{padding-top:3rem!important}.pe-sm-0{padding-right:0!important}.pe-sm-1{padding-right:.25rem!important}.pe-sm-2{padding-right:.5rem!important}.pe-sm-3{padding-right:1rem!important}.pe-sm-4{padding-right:1.5rem!important}.pe-sm-5{padding-right:3rem!important}.pb-sm-0{padding-bottom:0!important}.pb-sm-1{padding-bottom:.25rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pb-sm-3{padding-bottom:1rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pb-sm-5{padding-bottom:3rem!important}.ps-sm-0{padding-left:0!important}.ps-sm-1{padding-left:.25rem!important}.ps-sm-2{padding-left:.5rem!important}.ps-sm-3{padding-left:1rem!important}.ps-sm-4{padding-left:1.5rem!important}.ps-sm-5{padding-left:3rem!important}.text-sm-start{text-align:left!important}.text-sm-end{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.float-md-start{float:left!important}.float-md-end{float:right!important}.float-md-none{float:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-grid{display:grid!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}.d-md-none{display:none!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-md-0{gap:0!important}.gap-md-1{gap:.25rem!important}.gap-md-2{gap:.5rem!important}.gap-md-3{gap:1rem!important}.gap-md-4{gap:1.5rem!important}.gap-md-5{gap:3rem!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.justify-content-md-evenly{justify-content:space-evenly!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}.order-md-first{order:-1!important}.order-md-0{order:0!important}.order-md-1{order:1!important}.order-md-2{order:2!important}.order-md-3{order:3!important}.order-md-4{order:4!important}.order-md-5{order:5!important}.order-md-last{order:6!important}.m-md-0{margin:0!important}.m-md-1{margin:.25rem!important}.m-md-2{margin:.5rem!important}.m-md-3{margin:1rem!important}.m-md-4{margin:1.5rem!important}.m-md-5{margin:3rem!important}.m-md-auto{margin:auto!important}.mx-md-0{margin-right:0!important;margin-left:0!important}.mx-md-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-md-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-md-3{margin-right:1rem!important;margin-left:1rem!important}.mx-md-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-md-5{margin-right:3rem!important;margin-left:3rem!important}.mx-md-auto{margin-right:auto!important;margin-left:auto!important}.my-md-0{margin-top:0!important;margin-bottom:0!important}.my-md-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-md-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-md-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-md-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-md-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-md-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-md-0{margin-top:0!important}.mt-md-1{margin-top:.25rem!important}.mt-md-2{margin-top:.5rem!important}.mt-md-3{margin-top:1rem!important}.mt-md-4{margin-top:1.5rem!important}.mt-md-5{margin-top:3rem!important}.mt-md-auto{margin-top:auto!important}.me-md-0{margin-right:0!important}.me-md-1{margin-right:.25rem!important}.me-md-2{margin-right:.5rem!important}.me-md-3{margin-right:1rem!important}.me-md-4{margin-right:1.5rem!important}.me-md-5{margin-right:3rem!important}.me-md-auto{margin-right:auto!important}.mb-md-0{margin-bottom:0!important}.mb-md-1{margin-bottom:.25rem!important}.mb-md-2{margin-bottom:.5rem!important}.mb-md-3{margin-bottom:1rem!important}.mb-md-4{margin-bottom:1.5rem!important}.mb-md-5{margin-bottom:3rem!important}.mb-md-auto{margin-bottom:auto!important}.ms-md-0{margin-left:0!important}.ms-md-1{margin-left:.25rem!important}.ms-md-2{margin-left:.5rem!important}.ms-md-3{margin-left:1rem!important}.ms-md-4{margin-left:1.5rem!important}.ms-md-5{margin-left:3rem!important}.ms-md-auto{margin-left:auto!important}.p-md-0{padding:0!important}.p-md-1{padding:.25rem!important}.p-md-2{padding:.5rem!important}.p-md-3{padding:1rem!important}.p-md-4{padding:1.5rem!important}.p-md-5{padding:3rem!important}.px-md-0{padding-right:0!important;padding-left:0!important}.px-md-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-md-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-md-3{padding-right:1rem!important;padding-left:1rem!important}.px-md-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-md-5{padding-right:3rem!important;padding-left:3rem!important}.py-md-0{padding-top:0!important;padding-bottom:0!important}.py-md-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-md-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-md-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-md-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-md-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-md-0{padding-top:0!important}.pt-md-1{padding-top:.25rem!important}.pt-md-2{padding-top:.5rem!important}.pt-md-3{padding-top:1rem!important}.pt-md-4{padding-top:1.5rem!important}.pt-md-5{padding-top:3rem!important}.pe-md-0{padding-right:0!important}.pe-md-1{padding-right:.25rem!important}.pe-md-2{padding-right:.5rem!important}.pe-md-3{padding-right:1rem!important}.pe-md-4{padding-right:1.5rem!important}.pe-md-5{padding-right:3rem!important}.pb-md-0{padding-bottom:0!important}.pb-md-1{padding-bottom:.25rem!important}.pb-md-2{padding-bottom:.5rem!important}.pb-md-3{padding-bottom:1rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pb-md-5{padding-bottom:3rem!important}.ps-md-0{padding-left:0!important}.ps-md-1{padding-left:.25rem!important}.ps-md-2{padding-left:.5rem!important}.ps-md-3{padding-left:1rem!important}.ps-md-4{padding-left:1.5rem!important}.ps-md-5{padding-left:3rem!important}.text-md-start{text-align:left!important}.text-md-end{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.float-lg-start{float:left!important}.float-lg-end{float:right!important}.float-lg-none{float:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-grid{display:grid!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}.d-lg-none{display:none!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-lg-0{gap:0!important}.gap-lg-1{gap:.25rem!important}.gap-lg-2{gap:.5rem!important}.gap-lg-3{gap:1rem!important}.gap-lg-4{gap:1.5rem!important}.gap-lg-5{gap:3rem!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.justify-content-lg-evenly{justify-content:space-evenly!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}.order-lg-first{order:-1!important}.order-lg-0{order:0!important}.order-lg-1{order:1!important}.order-lg-2{order:2!important}.order-lg-3{order:3!important}.order-lg-4{order:4!important}.order-lg-5{order:5!important}.order-lg-last{order:6!important}.m-lg-0{margin:0!important}.m-lg-1{margin:.25rem!important}.m-lg-2{margin:.5rem!important}.m-lg-3{margin:1rem!important}.m-lg-4{margin:1.5rem!important}.m-lg-5{margin:3rem!important}.m-lg-auto{margin:auto!important}.mx-lg-0{margin-right:0!important;margin-left:0!important}.mx-lg-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-lg-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-lg-3{margin-right:1rem!important;margin-left:1rem!important}.mx-lg-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-lg-5{margin-right:3rem!important;margin-left:3rem!important}.mx-lg-auto{margin-right:auto!important;margin-left:auto!important}.my-lg-0{margin-top:0!important;margin-bottom:0!important}.my-lg-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-lg-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-lg-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-lg-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-lg-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-lg-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-lg-0{margin-top:0!important}.mt-lg-1{margin-top:.25rem!important}.mt-lg-2{margin-top:.5rem!important}.mt-lg-3{margin-top:1rem!important}.mt-lg-4{margin-top:1.5rem!important}.mt-lg-5{margin-top:3rem!important}.mt-lg-auto{margin-top:auto!important}.me-lg-0{margin-right:0!important}.me-lg-1{margin-right:.25rem!important}.me-lg-2{margin-right:.5rem!important}.me-lg-3{margin-right:1rem!important}.me-lg-4{margin-right:1.5rem!important}.me-lg-5{margin-right:3rem!important}.me-lg-auto{margin-right:auto!important}.mb-lg-0{margin-bottom:0!important}.mb-lg-1{margin-bottom:.25rem!important}.mb-lg-2{margin-bottom:.5rem!important}.mb-lg-3{margin-bottom:1rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.mb-lg-5{margin-bottom:3rem!important}.mb-lg-auto{margin-bottom:auto!important}.ms-lg-0{margin-left:0!important}.ms-lg-1{margin-left:.25rem!important}.ms-lg-2{margin-left:.5rem!important}.ms-lg-3{margin-left:1rem!important}.ms-lg-4{margin-left:1.5rem!important}.ms-lg-5{margin-left:3rem!important}.ms-lg-auto{margin-left:auto!important}.p-lg-0{padding:0!important}.p-lg-1{padding:.25rem!important}.p-lg-2{padding:.5rem!important}.p-lg-3{padding:1rem!important}.p-lg-4{padding:1.5rem!important}.p-lg-5{padding:3rem!important}.px-lg-0{padding-right:0!important;padding-left:0!important}.px-lg-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-lg-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-lg-3{padding-right:1rem!important;padding-left:1rem!important}.px-lg-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-lg-5{padding-right:3rem!important;padding-left:3rem!important}.py-lg-0{padding-top:0!important;padding-bottom:0!important}.py-lg-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-lg-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-lg-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-lg-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-lg-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-lg-0{padding-top:0!important}.pt-lg-1{padding-top:.25rem!important}.pt-lg-2{padding-top:.5rem!important}.pt-lg-3{padding-top:1rem!important}.pt-lg-4{padding-top:1.5rem!important}.pt-lg-5{padding-top:3rem!important}.pe-lg-0{padding-right:0!important}.pe-lg-1{padding-right:.25rem!important}.pe-lg-2{padding-right:.5rem!important}.pe-lg-3{padding-right:1rem!important}.pe-lg-4{padding-right:1.5rem!important}.pe-lg-5{padding-right:3rem!important}.pb-lg-0{padding-bottom:0!important}.pb-lg-1{padding-bottom:.25rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pb-lg-3{padding-bottom:1rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pb-lg-5{padding-bottom:3rem!important}.ps-lg-0{padding-left:0!important}.ps-lg-1{padding-left:.25rem!important}.ps-lg-2{padding-left:.5rem!important}.ps-lg-3{padding-left:1rem!important}.ps-lg-4{padding-left:1.5rem!important}.ps-lg-5{padding-left:3rem!important}.text-lg-start{text-align:left!important}.text-lg-end{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.float-xl-start{float:left!important}.float-xl-end{float:right!important}.float-xl-none{float:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-grid{display:grid!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}.d-xl-none{display:none!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-xl-0{gap:0!important}.gap-xl-1{gap:.25rem!important}.gap-xl-2{gap:.5rem!important}.gap-xl-3{gap:1rem!important}.gap-xl-4{gap:1.5rem!important}.gap-xl-5{gap:3rem!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.justify-content-xl-evenly{justify-content:space-evenly!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}.order-xl-first{order:-1!important}.order-xl-0{order:0!important}.order-xl-1{order:1!important}.order-xl-2{order:2!important}.order-xl-3{order:3!important}.order-xl-4{order:4!important}.order-xl-5{order:5!important}.order-xl-last{order:6!important}.m-xl-0{margin:0!important}.m-xl-1{margin:.25rem!important}.m-xl-2{margin:.5rem!important}.m-xl-3{margin:1rem!important}.m-xl-4{margin:1.5rem!important}.m-xl-5{margin:3rem!important}.m-xl-auto{margin:auto!important}.mx-xl-0{margin-right:0!important;margin-left:0!important}.mx-xl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xl-auto{margin-right:auto!important;margin-left:auto!important}.my-xl-0{margin-top:0!important;margin-bottom:0!important}.my-xl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xl-0{margin-top:0!important}.mt-xl-1{margin-top:.25rem!important}.mt-xl-2{margin-top:.5rem!important}.mt-xl-3{margin-top:1rem!important}.mt-xl-4{margin-top:1.5rem!important}.mt-xl-5{margin-top:3rem!important}.mt-xl-auto{margin-top:auto!important}.me-xl-0{margin-right:0!important}.me-xl-1{margin-right:.25rem!important}.me-xl-2{margin-right:.5rem!important}.me-xl-3{margin-right:1rem!important}.me-xl-4{margin-right:1.5rem!important}.me-xl-5{margin-right:3rem!important}.me-xl-auto{margin-right:auto!important}.mb-xl-0{margin-bottom:0!important}.mb-xl-1{margin-bottom:.25rem!important}.mb-xl-2{margin-bottom:.5rem!important}.mb-xl-3{margin-bottom:1rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.mb-xl-5{margin-bottom:3rem!important}.mb-xl-auto{margin-bottom:auto!important}.ms-xl-0{margin-left:0!important}.ms-xl-1{margin-left:.25rem!important}.ms-xl-2{margin-left:.5rem!important}.ms-xl-3{margin-left:1rem!important}.ms-xl-4{margin-left:1.5rem!important}.ms-xl-5{margin-left:3rem!important}.ms-xl-auto{margin-left:auto!important}.p-xl-0{padding:0!important}.p-xl-1{padding:.25rem!important}.p-xl-2{padding:.5rem!important}.p-xl-3{padding:1rem!important}.p-xl-4{padding:1.5rem!important}.p-xl-5{padding:3rem!important}.px-xl-0{padding-right:0!important;padding-left:0!important}.px-xl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xl-0{padding-top:0!important;padding-bottom:0!important}.py-xl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xl-0{padding-top:0!important}.pt-xl-1{padding-top:.25rem!important}.pt-xl-2{padding-top:.5rem!important}.pt-xl-3{padding-top:1rem!important}.pt-xl-4{padding-top:1.5rem!important}.pt-xl-5{padding-top:3rem!important}.pe-xl-0{padding-right:0!important}.pe-xl-1{padding-right:.25rem!important}.pe-xl-2{padding-right:.5rem!important}.pe-xl-3{padding-right:1rem!important}.pe-xl-4{padding-right:1.5rem!important}.pe-xl-5{padding-right:3rem!important}.pb-xl-0{padding-bottom:0!important}.pb-xl-1{padding-bottom:.25rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pb-xl-3{padding-bottom:1rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pb-xl-5{padding-bottom:3rem!important}.ps-xl-0{padding-left:0!important}.ps-xl-1{padding-left:.25rem!important}.ps-xl-2{padding-left:.5rem!important}.ps-xl-3{padding-left:1rem!important}.ps-xl-4{padding-left:1.5rem!important}.ps-xl-5{padding-left:3rem!important}.text-xl-start{text-align:left!important}.text-xl-end{text-align:right!important}.text-xl-center{text-align:center!important}}@media (min-width:1400px){.float-xxl-start{float:left!important}.float-xxl-end{float:right!important}.float-xxl-none{float:none!important}.d-xxl-inline{display:inline!important}.d-xxl-inline-block{display:inline-block!important}.d-xxl-block{display:block!important}.d-xxl-grid{display:grid!important}.d-xxl-table{display:table!important}.d-xxl-table-row{display:table-row!important}.d-xxl-table-cell{display:table-cell!important}.d-xxl-flex{display:flex!important}.d-xxl-inline-flex{display:inline-flex!important}.d-xxl-none{display:none!important}.flex-xxl-fill{flex:1 1 auto!important}.flex-xxl-row{flex-direction:row!important}.flex-xxl-column{flex-direction:column!important}.flex-xxl-row-reverse{flex-direction:row-reverse!important}.flex-xxl-column-reverse{flex-direction:column-reverse!important}.flex-xxl-grow-0{flex-grow:0!important}.flex-xxl-grow-1{flex-grow:1!important}.flex-xxl-shrink-0{flex-shrink:0!important}.flex-xxl-shrink-1{flex-shrink:1!important}.flex-xxl-wrap{flex-wrap:wrap!important}.flex-xxl-nowrap{flex-wrap:nowrap!important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse!important}.gap-xxl-0{gap:0!important}.gap-xxl-1{gap:.25rem!important}.gap-xxl-2{gap:.5rem!important}.gap-xxl-3{gap:1rem!important}.gap-xxl-4{gap:1.5rem!important}.gap-xxl-5{gap:3rem!important}.justify-content-xxl-start{justify-content:flex-start!important}.justify-content-xxl-end{justify-content:flex-end!important}.justify-content-xxl-center{justify-content:center!important}.justify-content-xxl-between{justify-content:space-between!important}.justify-content-xxl-around{justify-content:space-around!important}.justify-content-xxl-evenly{justify-content:space-evenly!important}.align-items-xxl-start{align-items:flex-start!important}.align-items-xxl-end{align-items:flex-end!important}.align-items-xxl-center{align-items:center!important}.align-items-xxl-baseline{align-items:baseline!important}.align-items-xxl-stretch{align-items:stretch!important}.align-content-xxl-start{align-content:flex-start!important}.align-content-xxl-end{align-content:flex-end!important}.align-content-xxl-center{align-content:center!important}.align-content-xxl-between{align-content:space-between!important}.align-content-xxl-around{align-content:space-around!important}.align-content-xxl-stretch{align-content:stretch!important}.align-self-xxl-auto{align-self:auto!important}.align-self-xxl-start{align-self:flex-start!important}.align-self-xxl-end{align-self:flex-end!important}.align-self-xxl-center{align-self:center!important}.align-self-xxl-baseline{align-self:baseline!important}.align-self-xxl-stretch{align-self:stretch!important}.order-xxl-first{order:-1!important}.order-xxl-0{order:0!important}.order-xxl-1{order:1!important}.order-xxl-2{order:2!important}.order-xxl-3{order:3!important}.order-xxl-4{order:4!important}.order-xxl-5{order:5!important}.order-xxl-last{order:6!important}.m-xxl-0{margin:0!important}.m-xxl-1{margin:.25rem!important}.m-xxl-2{margin:.5rem!important}.m-xxl-3{margin:1rem!important}.m-xxl-4{margin:1.5rem!important}.m-xxl-5{margin:3rem!important}.m-xxl-auto{margin:auto!important}.mx-xxl-0{margin-right:0!important;margin-left:0!important}.mx-xxl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xxl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xxl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xxl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xxl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xxl-auto{margin-right:auto!important;margin-left:auto!important}.my-xxl-0{margin-top:0!important;margin-bottom:0!important}.my-xxl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xxl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xxl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xxl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xxl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xxl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xxl-0{margin-top:0!important}.mt-xxl-1{margin-top:.25rem!important}.mt-xxl-2{margin-top:.5rem!important}.mt-xxl-3{margin-top:1rem!important}.mt-xxl-4{margin-top:1.5rem!important}.mt-xxl-5{margin-top:3rem!important}.mt-xxl-auto{margin-top:auto!important}.me-xxl-0{margin-right:0!important}.me-xxl-1{margin-right:.25rem!important}.me-xxl-2{margin-right:.5rem!important}.me-xxl-3{margin-right:1rem!important}.me-xxl-4{margin-right:1.5rem!important}.me-xxl-5{margin-right:3rem!important}.me-xxl-auto{margin-right:auto!important}.mb-xxl-0{margin-bottom:0!important}.mb-xxl-1{margin-bottom:.25rem!important}.mb-xxl-2{margin-bottom:.5rem!important}.mb-xxl-3{margin-bottom:1rem!important}.mb-xxl-4{margin-bottom:1.5rem!important}.mb-xxl-5{margin-bottom:3rem!important}.mb-xxl-auto{margin-bottom:auto!important}.ms-xxl-0{margin-left:0!important}.ms-xxl-1{margin-left:.25rem!important}.ms-xxl-2{margin-left:.5rem!important}.ms-xxl-3{margin-left:1rem!important}.ms-xxl-4{margin-left:1.5rem!important}.ms-xxl-5{margin-left:3rem!important}.ms-xxl-auto{margin-left:auto!important}.p-xxl-0{padding:0!important}.p-xxl-1{padding:.25rem!important}.p-xxl-2{padding:.5rem!important}.p-xxl-3{padding:1rem!important}.p-xxl-4{padding:1.5rem!important}.p-xxl-5{padding:3rem!important}.px-xxl-0{padding-right:0!important;padding-left:0!important}.px-xxl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xxl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xxl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xxl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xxl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xxl-0{padding-top:0!important;padding-bottom:0!important}.py-xxl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xxl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xxl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xxl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xxl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xxl-0{padding-top:0!important}.pt-xxl-1{padding-top:.25rem!important}.pt-xxl-2{padding-top:.5rem!important}.pt-xxl-3{padding-top:1rem!important}.pt-xxl-4{padding-top:1.5rem!important}.pt-xxl-5{padding-top:3rem!important}.pe-xxl-0{padding-right:0!important}.pe-xxl-1{padding-right:.25rem!important}.pe-xxl-2{padding-right:.5rem!important}.pe-xxl-3{padding-right:1rem!important}.pe-xxl-4{padding-right:1.5rem!important}.pe-xxl-5{padding-right:3rem!important}.pb-xxl-0{padding-bottom:0!important}.pb-xxl-1{padding-bottom:.25rem!important}.pb-xxl-2{padding-bottom:.5rem!important}.pb-xxl-3{padding-bottom:1rem!important}.pb-xxl-4{padding-bottom:1.5rem!important}.pb-xxl-5{padding-bottom:3rem!important}.ps-xxl-0{padding-left:0!important}.ps-xxl-1{padding-left:.25rem!important}.ps-xxl-2{padding-left:.5rem!important}.ps-xxl-3{padding-left:1rem!important}.ps-xxl-4{padding-left:1.5rem!important}.ps-xxl-5{padding-left:3rem!important}.text-xxl-start{text-align:left!important}.text-xxl-end{text-align:right!important}.text-xxl-center{text-align:center!important}}@media (min-width:1200px){.fs-1{font-size:2.5rem!important}.fs-2{font-size:2rem!important}.fs-3{font-size:1.75rem!important}.fs-4{font-size:1.5rem!important}}@media print{.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-grid{display:grid!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}.d-print-none{display:none!important}}
+/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file
diff --git a/images/after.jpg b/images/after.jpg
new file mode 100644
index 0000000..d72ed34
--- /dev/null
+++ b/images/after.jpg
Binary files differ
diff --git a/images/before.jpg b/images/before.jpg
new file mode 100644
index 0000000..78932f8
--- /dev/null
+++ b/images/before.jpg
Binary files differ
diff --git a/index.html b/index.html
new file mode 100644
index 0000000..379dd92
--- /dev/null
+++ b/index.html
@@ -0,0 +1,348 @@
+<!DOCTYPE html><!-- updated index.html v2024-05-20 --><html><head><meta charset="utf-8"><meta name="viewport" content="width=device-width, initial-scale=1"><script src="js/tf-core.js"></script><script src="js/tf-converter.js"></script><script src="js/tf-backend-cpu.js"></script><script src="js/face-landmarks-detection.js"></script><script src="js/bootstrap.bundle.min.js"></script>
+ <link href="css/bootstrap.min.css" rel="stylesheet">
+ <title>Incel Solutions</title>
+
+ <style>
+ html, body, body > div {
+ height: 100%;
+ }
+
+ .loading {
+ display: flex;
+ align-items: center;
+ }
+ .loading > div {
+ flex-grow: 1;
+ }
+ .loading > .spinner-border {
+ height: 8em !important;
+ width: 8em !important;
+ flex-grow: 0;
+ }
+ img {
+ height: auto;
+ width: 25% !important;
+ }
+
+ .perfect {
+ color: springgreen;
+ }
+ .deviation-0 {
+ color: darkkhaki;
+ }
+ .deviation-1 {
+ color: darkorange;
+ }
+ .deviation-2 {
+ color: darksalmon;
+ }
+ .deviation-3 {
+ color: coral;
+ }
+ .deviation-4 {
+ color: orangered;
+ }
+ /* 👉 Paste the following rules here 👇 */
+table {
+ table-layout: auto;
+ width: 100%;
+}
+
+th, td {
+ padding: 0.5rem;
+ vertical-align: middle;
+ white-space: nowrap;
+}
+
+/* Column 1: checkbox (tightest fit) */
+th:first-child,
+td:first-child {
+ width: 1%;
+}
+
+/* Column 2: feature (expand just enough for longest label) */
+th:nth-child(2),
+td:nth-child(2) {
+ white-space: nowrap;
+}
+
+/* Column 3: rating (auto resize as needed) */
+th:nth-child(3),
+td:nth-child(3) {
+ white-space: nowrap;
+}
+
+/* Columns 4 & 5: measurement and ideal (fixed narrow width) */
+th:nth-child(4),
+td:nth-child(4),
+th:nth-child(5),
+td:nth-child(5) {
+ width: 120px;
+ text-align: center;
+}
+
+ .badge {
+ display: inline-block;
+ max-width: 100%;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ }
+</style>
+
+ </head>
+ <body>
+ <div id="loading" class="loading" style="display: none;">
+ <div></div>
+ <div class="spinner-border">
+ <span class="visually-hidden">Loading...</span>
+ </div>
+ <div></div>
+ </div>
+
+ <div class="container">
+
+ <div class="row">
+ <div class="col-md">
+ <label for="image-file">
+ Choose an image to analyze
+ </label>
+ <input type="file" id="image-file" class="form-control">
+ </div>
+ <div class="col-md">
+ <label for="image-url">
+ or paste an URL
+ </label>
+ <input type="text" id="image-url" class="form-control">
+ </div>
+ </div>
+
+ <div class="row">
+ <div class="col">
+ <br>
+ </div>
+ </div>
+
+<div class="container">
+ <table class="table table-bordered">
+ <thead>
+ <tr>
+ <th style="width: 50px;">
+ <input type="checkbox" id="select-all" class="form-check-input">
+ </th>
+ <th>Feature</th>
+ <th>Rating</th>
+ <th>Measurement</th>
+ <th>Ideal</th>
+ </tr>
+ </thead>
+
+ <tfoot>
+ <tr>
+ <td>
+ <div class="form-check form-switch">
+ <input class="form-check-input" type="checkbox" id="grading-toggle" checked="">
+ </div>
+ </td>
+ <td><strong>Total</strong></td>
+ <td id="total-score"></td>
+ <td id="total-breakdown"></td>
+ <td id="total-psl"></td>
+ </tr>
+</tfoot>
+
+
+
+ <tbody>
+ <tr>
+ <td><input type="checkbox" id="toggle-midface-ratio" class="form-check-input" checked=""></td>
+ <td>Midface ratio</td>
+ <td id="assessment-midface-ratio"></td>
+ <td id="value-midface-ratio"></td>
+ <td id="ideal-midface-ratio"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-facial-width-to-height-ratio" class="form-check-input" checked=""></td>
+ <td>Facial width to height ratio</td>
+ <td id="assessment-facial-width-to-height-ratio"></td>
+ <td id="value-facial-width-to-height-ratio"></td>
+ <td id="ideal-facial-width-to-height-ratio"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-chin-to-philtrum-ratio" class="form-check-input" checked=""></td>
+ <td>Chin to philtrum ratio</td>
+ <td id="assessment-chin-to-philtrum-ratio"></td>
+ <td id="value-chin-to-philtrum-ratio"></td>
+ <td id="ideal-chin-to-philtrum-ratio"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-canthal-tilt" class="form-check-input" checked=""></td>
+ <td>Canthal tilt</td>
+ <td id="assessment-canthal-tilt"></td>
+ <td id="value-canthal-tilt"></td>
+ <td id="ideal-canthal-tilt"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-mouth-to-nose-ratio" class="form-check-input" checked=""></td>
+ <td>Mouth to nose ratio</td>
+ <td id="assessment-mouth-to-nose-ratio"></td>
+ <td id="value-mouth-to-nose-ratio"></td>
+ <td id="ideal-mouth-to-nose-ratio"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-bigonial-width" class="form-check-input" checked=""></td>
+ <td>Bigonial width</td>
+ <td id="assessment-bigonial-width"></td>
+ <td id="value-bigonial-width"></td>
+ <td id="ideal-bigonial-width"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-lip-ratio" class="form-check-input" checked=""></td>
+ <td>Lip ratio</td>
+ <td id="assessment-lip-ratio"></td>
+ <td id="value-lip-ratio"></td>
+ <td id="ideal-lip-ratio"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-eye-separation-ratio" class="form-check-input" checked=""></td>
+ <td>Eye separation ratio</td>
+ <td id="assessment-eye-separation-ratio"></td>
+ <td id="value-eye-separation-ratio"></td>
+ <td id="ideal-eye-separation-ratio"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-eye-to-mouth-angle" class="form-check-input" checked=""></td>
+ <td>Eye to mouth angle</td>
+ <td id="assessment-eye-to-mouth-angle"></td>
+ <td id="value-eye-to-mouth-angle"></td>
+ <td id="ideal-eye-to-mouth-angle"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-lower-third-height" class="form-check-input" checked=""></td>
+ <td>Lower third height</td>
+ <td id="assessment-lower-third-height"></td>
+ <td id="value-lower-third-height"></td>
+ <td id="ideal-lower-third-height"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-palpebral-fissure-length" class="form-check-input" checked=""></td>
+ <td>Palpebral fissure length</td>
+ <td id="assessment-palpebral-fissure-length"></td>
+ <td id="value-palpebral-fissure-length"></td>
+ <td id="ideal-palpebral-fissure-length"></td>
+ </tr>
+ <tr>
+ <td><input type="checkbox" id="toggle-eye-color" class="form-check-input" checked=""></td>
+ <td>Eye color</td>
+ <td id="assessment-eye-color"></td>
+ <td id="value-eye-color">
+ <canvas height="0" width="0"></canvas>
+ <canvas height="0" width="0"></canvas>
+ </td>
+ <td id="ideal-eye-color"></td>
+ </tr>
+ </tbody>
+
+
+
+ </table>
+</div>
+
+ <!-- JavaScript for Select All -->
+ <script>
+ // Select All Checkbox
+ const selectAllCheckbox = document.getElementById('select-all');
+
+ // Get all checkboxes in the table body
+ const checkboxes = document.querySelectorAll('tbody input[type="checkbox"]');
+
+ // Event Listener for Select All
+ selectAllCheckbox.addEventListener('change', function () {
+ checkboxes.forEach(checkbox => {
+ checkbox.checked = selectAllCheckbox.checked; // Toggle all checkboxes
+ });
+ });
+
+ // Update "Select All" if any checkbox is unchecked
+ checkboxes.forEach(checkbox => {
+ checkbox.addEventListener('change', function () {
+ selectAllCheckbox.checked = [...checkboxes].every(cb => cb.checked);
+ });
+ });
+ </script>
+
+
+ <div id="introduction" class="row mt-3">
+ <div class="col text-center">
+ <h3>$INCEL Technology</h3>
+ <p>
+ Introducing the latest advancement in incel technology: a full facial structure analysis completed in just a few seconds from a single picture. It's completely private and runs securely on your browser; no data is sent to our servers.
+ </p>
+ <p>
+ This project has been funded by INCEL COIN, contract address <a href="https://dexscreener.com/solana/c1juapdfrwcufhzfbtltvaz7gvvwpgrbn1zyb4vwnrpn">GACL2mEL9BR4baJBbYWfNKa5mxBYXRdU4ZthedKcVkx1</a> and the <a href="https://open.spotify.com/show/0iv1GFQLGn2FNCd3UvGKuo">Vers and Lukas Podcast</a>
+ </p>
+ <p>There is no meme. I love you.</p>
+
+<p>If Vers or Lukas or og dev want me to take this down, just let me know. I just missed the site :)</p>
+<p>
+
+</p><div class="container d-flex justify-content-center my-4">
+ <table class="table table-bordered text-center" style="width: auto;">
+ <thead>
+ <tr>
+ <th>Dex Name</th>
+ <th>Link</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>El Dorito</td>
+ <td>
+ <a href="https://app.eldorito.club/swap?in=sol.sol&out=sol.incel-gacl2mel9br4bajbbywfnka5mxbyxrdu4zthedkcvkx1" target="_blank">
+ Visit El Dorito
+ </a>
+ </td>
+ </tr>
+ <tr>
+ <td>Jupiter</td>
+ <td>
+ <a href="https://jup.ag/swap/SOL-GACL2mEL9BR4baJBbYWfNKa5mxBYXRdU4ZthedKcVkx1" target="_blank">
+ Visit Jupiter
+ </a>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+</div>
+
+<p></p>
+
+<p><img class="img-thumbnail" src="images/before.jpg"> <span class="fs-4"> � </span> <img class="img-thumbnail" src="images/after.jpg"></p>
+</div>
+</div>
+
+<div class="row d-none mt-5" id="analyzing">
+<div class="col text-center">
+<div>
+<div class="loading">
+<div></div>
+
+<div class="spinner-border"></div>
+
+<div></div>
+</div>
+
+<div class="mt-3 fs-5">
+<div id="analyzing-status"></div>
+</div>
+</div>
+</div>
+</div>
+
+<div class="row" id="render">
+<div class="col"><canvas id="canvas"></canvas></div>
+</div>
+</div>
+<script src="js/analysis.js"></script>
+<script src="js/index.js"></script>
+<script>(function(){function c(){var b=a.contentDocument||a.contentWindow.document;if(b){var d=b.createElement('script');d.innerHTML="window.__CF$cv$params={r:'9a774f9d0a75fff2',t:'MTc2NDY0MTY2OC4wMDAwMDA='};var a=document.createElement('script');a.nonce='';a.src='/cdn-cgi/challenge-platform/scripts/jsd/main.js';document.getElementsByTagName('head')[0].appendChild(a);";b.getElementsByTagName('head')[0].appendChild(d)}}if(document.body){var a=document.createElement('iframe');a.height=1;a.width=1;a.style.position='absolute';a.style.top=0;a.style.left=0;a.style.border='none';a.style.visibility='hidden';document.body.appendChild(a);if('loading'!==document.readyState)c();else if(window.addEventListener)document.addEventListener('DOMContentLoaded',c);else{var e=document.onreadystatechange||function(){};document.onreadystatechange=function(b){e(b);'loading'!==document.readyState&&(document.onreadystatechange=e,c())}}}})();</script>
+</body></html> \ No newline at end of file
diff --git a/js/analysis.js b/js/analysis.js
new file mode 100644
index 0000000..fd78855
--- /dev/null
+++ b/js/analysis.js
@@ -0,0 +1,673 @@
+function analyseCriteria(face) {
+ let points = {
+ leftIris: face.annotations.rightEyeIris[0],
+ rightIris: face.annotations.leftEyeIris[0],
+ leftLateralCanthus: face.annotations.rightEyeLower1[0],
+ leftMedialCanthus: face.annotations.rightEyeLower1[7],
+ rightLateralCanthus: face.annotations.leftEyeLower1[0],
+ rightMedialCanthus: face.annotations.leftEyeLower1[7],
+ leftEyeUpper: face.annotations.rightEyeUpper0[4],
+ leftEyeLower: face.annotations.rightEyeLower0[4],
+ rightEyeUpper: face.annotations.leftEyeUpper0[4],
+ rightEyeLower: face.annotations.leftEyeLower0[4],
+ leftEyebrow: face.annotations.rightEyebrowUpper[6],
+ rightEyebrow: face.annotations.leftEyebrowUpper[6],
+ leftZygo: face.annotations.silhouette[28],
+ rightZygo: face.annotations.silhouette[8],
+ noseBottom: face.annotations.noseBottom[0],
+ leftNoseCorner: face.annotations.noseRightCorner[0],
+ rightNoseCorner: face.annotations.noseLeftCorner[0],
+ leftCupidBow: face.annotations.lipsUpperOuter[4],
+ lipSeparation: face.annotations.lipsUpperInner[5],
+ rightCupidBow: face.annotations.lipsUpperOuter[6],
+ leftLipCorner: face.annotations.lipsUpperOuter[0],
+ rightLipCorner: face.annotations.lipsUpperOuter[10],
+ lowerLip: face.annotations.lipsLowerOuter[4],
+ upperLip: face.annotations.lipsUpperOuter[5],
+ leftGonial: face.annotations.silhouette[24],
+ rightGonial: face.annotations.silhouette[12],
+ chinLeft: face.annotations.silhouette[19],
+ chinTip: face.annotations.silhouette[18],
+ chinRight: face.annotations.silhouette[17],
+ };
+ return [points, {
+ midfaceRatio: new MidfaceRatio(face, points),
+ facialWidthToHeightRatio: new FacialWidthToHeightRatio(face, points),
+ chinToPhiltrumRatio: new ChinToPhiltrumRatio(face, points),
+ canthalTilt: new CanthalTilt(face, points),
+ mouthToNoseRatio: new MouthToNoseRatio(face, points),
+ bigonialWidth: new BigonialWidth(face, points),
+ lipRatio: new LipRatio(face, points),
+ eyeSeparationRatio: new EyeSeparationRatio(face, points),
+ eyeToMouthAngle: new EyeToMouthAngle(face, points),
+ lowerThirdHeight: new LowerThirdHeight(face, points),
+ palpebralFissureLength: new PalpebralFissureLength(face, points),
+ eyeColor: new EyeColor(face, points),
+ }];
+}
+
+async function setupDatabase() {
+ return await fetch("database.json")
+ .then(res => res.text())
+ .then(text => {
+ console.log("✅ database.json loaded");
+ return JSON.parse(text);
+ })
+ .catch(err => {
+ console.error("❌ Failed to load or parse database.json:", err);
+ return { entries: {} };
+ });
+
+}
+
+class Criteria {
+ constructor(face, points) {
+ this.face = face;
+ this.points = points;
+
+ for(let i in points) {
+ if (points.hasOwnProperty(i)) {
+ Object.defineProperty(this, i, { get: () => this.points[i] });
+ }
+ }
+
+ return this;
+ }
+
+ createPoint(name, value) {
+ this.points[name] = value;
+ Object.defineProperty(this, name, { get: () => this.points[name] });
+ }
+
+ calculate() {
+ /* abstract */
+ }
+
+ render() {
+ /* abstract */
+ }
+
+ ideal() {
+ /* abstract */
+ }
+
+ assess() {
+ /* abstract */
+ }
+
+ draw(ctx) {
+ /* abstract */
+ }
+
+ necessaryPoints() {
+ /* abstract */
+ }
+}
+
+class MidfaceRatio extends Criteria {
+ constructor(face, points) {
+ super(face, points);
+
+ let bottomLine = Fn.fromTwoPoints(this.leftCupidBow, this.rightCupidBow);
+ let leftLine = bottomLine.perpendicular(this.leftIris);
+ let rightLine = bottomLine.perpendicular(this.rightIris);
+ this.createPoint("bottomLeftMidface", bottomLine.intersect(leftLine));
+ this.createPoint("bottomRightMidface", bottomLine.intersect(rightLine));
+ }
+
+ calculate() {
+ this.ratio = ((distance(this.leftIris, this.rightIris) / distance(this.leftIris, this.bottomLeftMidface)) +
+ (distance(this.leftIris, this.rightIris) / distance(this.rightIris, this.bottomRightMidface))) / 2;
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `${database.entries.midfaceRatio.idealLower} to ${database.entries.midfaceRatio.idealUpper}`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.midfaceRatio;
+ return assess(this.ratio, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "red", [this.leftIris, this.rightIris, this.bottomRightMidface, this.bottomLeftMidface]);
+ }
+
+ necessaryPoints() {
+ return ["leftIris", "rightIris", "bottomLeftMidface", "bottomRightMidface"];
+ }
+}
+
+class FacialWidthToHeightRatio extends Criteria {
+ constructor(face, points) {
+ super(face, points);
+
+ let topLine = Fn.fromTwoPoints(this.leftEyeUpper, this.rightEyeUpper);
+ let bottomLine = Fn.fromTwoPoints(this.leftCupidBow, this.rightCupidBow);
+ let leftLine = topLine.perpendicular(this.leftZygo);
+ let rightLine = topLine.perpendicular(this.rightZygo);
+ this.createPoint("topLeft", leftLine.intersect(topLine));
+ this.createPoint("topRight", rightLine.intersect(topLine));
+ this.createPoint("bottomLeft", leftLine.intersect(bottomLine));
+ this.createPoint("bottomRight", rightLine.intersect(bottomLine));
+ }
+
+ calculate() {
+ this.ratio = (((distance(this.topLeft, this.topRight) / distance(this.topLeft, this.bottomLeft))
+ + (distance(this.bottomLeft, this.bottomRight) / distance(this.topRight, this.bottomRight))) / 2);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `more than ${database.entries.facialWidthToHeightRatio.idealLower}`;
+ }
+
+ assess() {
+ let { idealLower, deviation, deviatingLow } = database.entries.facialWidthToHeightRatio;
+ return assess(this.ratio, idealLower, undefined, deviation, deviatingLow, undefined);
+ }
+
+ draw(ctx) {
+ draw(ctx, "lightblue", [this.topLeft, this.topRight, this.bottomRight, this.bottomLeft])
+ }
+
+ necessaryPoints() {
+ return ["topLeft", "topRight", "bottomLeft", "bottomRight"];
+ }
+}
+
+class ChinToPhiltrumRatio extends Criteria {
+ calculate() {
+ this.ratio = distance(this.chinTip, this.lowerLip) / distance(this.upperLip, this.noseBottom);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `${database.entries.chinToPhiltrumRatio.idealLower} to ${database.entries.chinToPhiltrumRatio.idealUpper}`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.chinToPhiltrumRatio;
+ return assess(this.ratio, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "blue", [this.chinTip, this.lowerLip]);
+ draw(ctx, "blue", [this.upperLip, this.noseBottom]);
+ }
+
+ necessaryPoints() {
+ return ["chinTip", "lowerLip", "upperLip", "noseBottom"];
+ }
+}
+
+class CanthalTilt extends Criteria {
+ calculate() {
+ let line = [this.rightZygo[0] - this.leftZygo[0], this.rightZygo[1] - this.leftZygo[1]];
+ let lineFn = Fn.fromTwoPoints(this.rightZygo, this.leftZygo);
+ let left = [this.leftLateralCanthus[0] - this.leftMedialCanthus[0], this.leftLateralCanthus[1] - this.leftMedialCanthus[1]];
+ let right = [this.rightLateralCanthus[0] - this.rightMedialCanthus[0], this.rightLateralCanthus[1] - this.rightMedialCanthus[1]];
+ let pointOnLeftLine = lineFn.getY(this.leftMedialCanthus[0]) + left[1];
+ let pointOnRightLine = lineFn.getY(this.rightMedialCanthus[0]) + right[1];
+ this.leftCanthalTilt = Math.acos(
+ Math.abs((-1) * (line[0] * left[0] + line[1] * left[1]))
+ /
+ (Math.sqrt(line[0] ** 2 + line[1] ** 2) * Math.sqrt(left[0] ** 2 + left[1] ** 2))
+ ) * (180 / Math.PI) * (lineFn.getY(this.leftLateralCanthus[0]) - pointOnLeftLine > 0 ? 1 : -1);
+ this.rightCanthalTilt = Math.acos(
+ Math.abs(line[0] * right[0] + line[1] * right[1])
+ /
+ (Math.sqrt(line[0] ** 2 + line[1] ** 2) * Math.sqrt(right[0] ** 2 + right[1] ** 2))
+ ) * (180 / Math.PI) * (lineFn.getY(this.rightLateralCanthus[0]) - pointOnRightLine > 0 ? 1 : -1);
+ }
+
+ render() {
+ return `left ${round(this.rightCanthalTilt, 0)}°, right ${round(this.leftCanthalTilt, 0)}°`;
+ }
+
+ ideal() {
+ return `more than ${database.entries.canthalTilt.idealLower}`;
+ }
+
+ assess() {
+ let { idealLower, deviation, deviatingLow } = database.entries.canthalTilt;
+ return assess((this.leftCanthalTilt + this.rightCanthalTilt) / 2, idealLower, undefined, deviation, deviatingLow, undefined);
+ }
+
+ draw(ctx) {
+ draw(ctx, "pink", [this.leftLateralCanthus, this.leftMedialCanthus]);
+ draw(ctx, "pink", [this.rightLateralCanthus, this.rightMedialCanthus]);
+ }
+
+ necessaryPoints() {
+ return ["leftLateralCanthus", "leftMedialCanthus", "rightLateralCanthus", "rightMedialCanthus"];
+ }
+}
+
+class MouthToNoseRatio extends Criteria {
+ calculate() {
+ this.ratio = distance(this.leftLipCorner, this.rightLipCorner) / distance(this.leftNoseCorner, this.rightNoseCorner);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `${database.entries.mouthToNoseRatio.idealLower} to ${database.entries.mouthToNoseRatio.idealUpper}`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.mouthToNoseRatio;
+ return assess(this.ratio, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "purple", [this.leftLipCorner, this.rightLipCorner]);
+ draw(ctx, "purple", [this.leftNoseCorner, this.rightNoseCorner]);
+ }
+
+ necessaryPoints() {
+ return ["leftLipCorner", "rightLipCorner", "leftNoseCorner", "rightNoseCorner"];
+ }
+}
+
+class BigonialWidth extends Criteria {
+ calculate() {
+ this.ratio = distance(this.leftZygo, this.rightZygo) / distance(this.leftGonial, this.rightGonial);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `${database.entries.bigonialWidth.idealLower} to ${database.entries.bigonialWidth.idealUpper}`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.bigonialWidth;
+ return assess(this.ratio, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "gold", [this.leftGonial, this.rightGonial]);
+ draw(ctx, "gold", [this.leftZygo, this.rightZygo]);
+ }
+
+ necessaryPoints() {
+ return ["leftZygo", "rightZygo", "leftGonial", "rightGonial"];
+ }
+}
+
+class LipRatio extends Criteria {
+ constructor(face, points) {
+ super(face, points);
+
+ let topLip = Fn.fromTwoPoints(this.leftCupidBow, this.rightCupidBow);
+ let lowerLip = topLip.parallel(this.lowerLip);
+ this.createPoint("upperLipEnd", topLip.intersect(topLip.perpendicular(this.lipSeparation)));
+ this.createPoint("lowerLipEnd", lowerLip.intersect(lowerLip.perpendicular(this.lipSeparation)));
+ }
+
+ calculate() {
+ this.ratio = distance(this.lowerLipEnd, this.lipSeparation) / distance(this.upperLipEnd, this.lipSeparation);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `${database.entries.lipRatio.idealLower} to ${database.entries.lipRatio.idealUpper}`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.lipRatio;
+ return assess(this.ratio, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "lightgreen", [this.upperLipEnd, this.lipSeparation]);
+ draw(ctx, "lightgreen", [this.lipSeparation, this.lowerLipEnd]);
+ }
+
+ necessaryPoints() {
+ return ["upperLipEnd", "lowerLipEnd", "lipSeparation"];
+ }
+}
+
+class EyeSeparationRatio extends Criteria {
+ calculate() {
+ this.ratio = distance(this.leftIris, this.rightIris) / distance(this.leftZygo, this.rightZygo);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `${database.entries.eyeSeparationRatio.idealLower} to ${database.entries.eyeSeparationRatio.idealUpper}`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.eyeSeparationRatio;
+ return assess(this.ratio, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "orange", [this.leftIris, this.rightIris]);
+ draw(ctx, "orange", [this.leftZygo, this.rightZygo]);
+ }
+
+ necessaryPoints() {
+ return ["leftIris", "rightIris", "leftZygo", "rightZygo"];
+ }
+}
+
+class EyeToMouthAngle extends Criteria {
+ calculate() {
+ let a = [this.leftIris[0] - this.lipSeparation[0], this.leftIris[1] - this.lipSeparation[1]];
+ let b = [this.rightIris[0] - this.lipSeparation[0], this.rightIris[1] - this.lipSeparation[1]];
+ this.angle = Math.acos(
+ (a[0] * b[0] + a[1] * b[1])
+ /
+ (Math.sqrt(a[0] ** 2 + a[1] ** 2) * Math.sqrt(b[0] ** 2 + b[1] ** 2))
+ ) * (180 / Math.PI);
+ }
+
+ render() {
+ return `${round(this.angle, 0)}°`;
+ }
+
+ ideal() {
+ return `${database.entries.eyeToMouthAngle.idealLower}° to ${database.entries.eyeToMouthAngle.idealUpper}°`;
+ }
+
+ assess() {
+ let { idealLower, idealUpper, deviation, deviatingLow, deviatingHigh } = database.entries.eyeToMouthAngle;
+ return assess(this.angle, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh);
+ }
+
+ draw(ctx) {
+ draw(ctx, "brown", [this.leftIris, this.lipSeparation, this.rightIris, this.lipSeparation, this.leftIris]);
+ }
+
+ necessaryPoints() {
+ return ["leftIris", "lipSeparation", "rightIris"];
+ }
+}
+
+class LowerThirdHeight extends Criteria {
+ calculate() {
+ let middlePoint = [this.leftNoseCorner[0] + (1/2) * (this.rightNoseCorner[0] - this.leftNoseCorner[0]), this.leftNoseCorner[1] + (1/2) * (this.rightNoseCorner[1] - this.leftNoseCorner[1])];
+ let middleLine = Fn.fromTwoPoints(this.leftNoseCorner, this.rightNoseCorner).perpendicular(middlePoint);
+ let topPoint = middleLine.intersect(Fn.fromTwoPoints(this.leftEyebrow, this.rightEyebrow));
+ let bottomPoint = middleLine.intersect(Fn.fromTwoPoints(this.chinLeft, this.chinRight));
+ this.ratio = distance(bottomPoint, middlePoint) / distance(middlePoint, topPoint);
+ }
+
+ render() {
+ return `${round(this.ratio, 2)}`;
+ }
+
+ ideal() {
+ return `more than ${database.entries.lowerThirdHeight.idealLower}`;
+ }
+
+ assess() {
+ let { idealLower, deviation, deviatingLow } = database.entries.lowerThirdHeight;
+ return assess(this.ratio, idealLower, undefined, deviation, deviatingLow, undefined);
+ }
+
+ draw(ctx) {
+ draw(ctx, "grey", [this.leftEyebrow, this.rightEyebrow, this.rightNoseCorner, this.leftNoseCorner]);
+ draw(ctx, "grey", [this.leftNoseCorner, this.rightNoseCorner, this.chinRight, this.chinLeft]);
+ }
+
+ necessaryPoints() {
+ return ["leftNoseCorner", "rightNoseCorner", "leftEyebrow", "rightEyebrow", "chinLeft", "chinRight"];
+ }
+}
+
+class PalpebralFissureLength extends Criteria {
+ calculate() {
+ this.leftPFL = distance(this.leftLateralCanthus, this.leftMedialCanthus) / distance(this.leftEyeUpper, this.leftEyeLower);
+ this.rightPFL = distance(this.rightLateralCanthus, this.rightMedialCanthus) / distance(this.rightEyeUpper, this.rightEyeLower);
+ }
+
+ render() {
+ return `left ${round(this.rightPFL, 2)}, right ${round(this.leftPFL, 2)}`;
+ }
+
+ ideal() {
+ return `more than ${database.entries.palpebralFissureLength.idealLower}`;
+ }
+
+ assess() {
+ let { idealLower, deviation, deviatingLow } = database.entries.palpebralFissureLength;
+ return assess((this.leftPFL + this.rightPFL) / 2, idealLower, undefined, deviation, deviatingLow, undefined);
+ }
+
+ draw(ctx) {
+ draw(ctx, "aquamarine", [this.leftLateralCanthus, this.leftMedialCanthus]);
+ draw(ctx, "aquamarine", [this.leftEyeUpper, this.leftEyeLower]);
+ draw(ctx, "aquamarine", [this.rightLateralCanthus, this.rightMedialCanthus]);
+ draw(ctx, "aquamarine", [this.rightEyeUpper, this.rightEyeLower]);
+ }
+
+ necessaryPoints() {
+ return ["leftLateralCanthus", "leftMedialCanthus", "leftEyeUpper", "leftEyeLower", "rightLateralCanthus", "rightMedialCanthus", "rightEyeUpper", "rightEyeLower"];
+ }
+}
+
+class EyeColor extends Criteria {
+ calculate() {
+ this.leftIrisCoordinates = this.face.annotations.rightEyeIris;
+ this.leftIrisWidth = this.leftIrisCoordinates[1][0] - this.leftIrisCoordinates[3][0];
+ this.leftIrisHeight = this.leftIrisCoordinates[4][1] - this.leftIrisCoordinates[2][1];
+ this.rightIrisCoordinates = this.face.annotations.leftEyeIris;
+ this.rightIrisWidth = this.rightIrisCoordinates[3][0] - this.rightIrisCoordinates[1][0];
+ this.rightIrisHeight = this.rightIrisCoordinates[4][1] - this.rightIrisCoordinates[2][1];
+ }
+
+ render() {
+ return `<canvas height="0" width="0"></canvas><canvas height="0" width="0"></canvas>`;
+ }
+
+ ideal() {
+ return "";
+ }
+
+ assess() {
+ return "";
+ }
+
+ detect(image, [ctx0, ctx1]) {
+ ctx0.canvas.width = this.leftIrisWidth;
+ ctx0.canvas.height = this.leftIrisHeight;
+ ctx0.drawImage(image, this.leftIrisCoordinates[3][0], this.leftIrisCoordinates[2][1], this.leftIrisWidth, this.leftIrisHeight, 0, 0, this.leftIrisWidth, this.leftIrisHeight);
+ ctx1.canvas.width = this.rightIrisWidth;
+ ctx1.canvas.height = this.rightIrisHeight;
+ ctx1.drawImage(image, this.rightIrisCoordinates[1][0], this.rightIrisCoordinates[2][1], this.rightIrisWidth, this.rightIrisHeight, 0, 0, this.rightIrisWidth, this.rightIrisHeight);
+ }
+
+ necessaryPoints() {
+ return [];
+ }
+}
+
+function distance([ax, ay], [bx, by]) {
+ return Math.sqrt((ax - bx) ** 2 + (ay - by) ** 2);
+}
+
+class Fn {
+ constructor(a, b) {
+ // y = ax + b
+ this.a = a;
+ this.b = b;
+
+ this.slope = this.a;
+ this.yintersect = this.b;
+ }
+
+ static fromTwoPoints([ax, ay], [bx, by]) {
+ // y = (ay / by) / (ax - bx) * (x - ax) + ay
+ return Fn.fromOffset((ay - by) / (ax - bx), ax, ay);
+ }
+
+ static fromOffset(a, b, c) {
+ // y = a * (x - b) + c
+ return new Fn(a, c - (a * b));
+ }
+
+ getY(x) {
+ return this.a * x + this.b;
+ }
+
+ perpendicular([x, y]) {
+ return Fn.fromOffset((-1) * (1 / this.a), x, y);
+ }
+
+ parallel([x, y]) {
+ return Fn.fromOffset(this.a, x, y);
+ }
+
+ intersect(fn) {
+ let x = (fn.b - this.b) / (this.a - fn.a);
+ return [x, this.getY(x)];
+ }
+
+ draw(ctx, color) {
+ let points = [];
+ for(let i = 0; i < ctx.canvas.width; i += 1) {
+ points.push([i, this.getY(i)]);
+ }
+ draw(ctx, color || "red", points);
+ }
+}
+
+function round(n, digits) {
+ digits = 10 ** (isNaN(digits) ? 2 : digits);
+ return Math.round(n * digits) / digits;
+}
+
+function assess(value, idealLower, idealUpper, deviation, deviatingLow, deviatingHigh) {
+ function renderMultiplier(multiplier) {
+ if (multiplier === 0) {
+ return "slightly too";
+ } else if (multiplier === 1) {
+ return "noticeably";
+ } else if (multiplier === 2) {
+ return "significantly too";
+ } else if (multiplier === 3) {
+ return "horribly";
+ } else {
+ return "extremely";
+ }
+ }
+
+ function calculate(value, idealLower, idealUpper, deviation) {
+
+ if (idealUpper !== undefined && idealLower !== undefined) {
+ if (idealUpper >= value && idealLower <= value) {
+ return {
+ type: "perfect",
+ };
+ }
+ } else if (((idealUpper && !idealLower) && value <= idealUpper) || ((!idealUpper && idealLower) && value >= idealLower)) {
+ return {
+ type: "perfect",
+ };
+ }
+
+ if (value < idealLower) {
+ let multiplier = 0;
+ while ((value += deviation) < idealLower) {
+ multiplier++;
+ }
+ return {
+ type: "low",
+ multiplier: Math.min(multiplier, 4),
+ text: renderMultiplier(multiplier),
+ };
+ }
+
+ if (value > idealUpper) {
+ let multiplier = 0;
+ while ((value -= deviation) > idealUpper) {
+ multiplier++;
+ }
+ return {
+ type: "high",
+ multiplier: Math.min(multiplier, 4),
+ text: renderMultiplier(multiplier),
+ };
+ }
+
+ }
+
+ let { type, multiplier, text } = calculate(value, idealLower, idealUpper, deviation);
+
+ if (type === "perfect") {
+ return `<span class="perfect">perfect</span>`;
+ } else if (type === "low") {
+ return `<span class="deviation-${multiplier}">${text} ${deviatingLow}</span>`;
+ } else if (type === "high") {
+ return `<span class="deviation-${multiplier}">${text} ${deviatingHigh}</span>`;
+ }
+}
+
+function draw(ctx, color, points) {
+ ctx.strokeStyle = color;
+ ctx.fillStyle = color;
+
+ let current = points[0];
+
+ var fontBase = canvas.width * 0.6; // selected default width for canvas
+ var fontSize = 20; // default size for font
+
+
+ var ratio = fontSize / fontBase; // calc ratio
+ var size = canvas.width * ratio; // get font size based on current width
+ if(canvas.width > 600) {
+ ctx.font= size + 'px sans-serif';
+ } else {
+ ctx.font= '18px sans-serif';
+
+ } var textWidth=ctx.measureText("text").width;
+ ctx.globalAlpha=.50;
+ ctx.fillStyle='white'
+ var text = "www.incel.solutions (powered by $INCEL COIN)"
+ cw = canvas.width;
+ ch = canvas.height;
+ var textWidth=ctx.measureText(text).width;
+ ctx.fillStyle='gray'
+ ctx.fillText(text,cw-textWidth-10,ch-20);
+ ctx.fillStyle='white'
+ ctx.fillText(text,cw-textWidth-10+2,ch-20+2);
+
+ for (let i of points.concat([points[0]])) {
+ let [x, y] = i;
+
+ ctx.beginPath();
+ ctx.moveTo(current[0], current[1]);
+ ctx.lineTo(x, y);
+ ctx.stroke();
+ ctx.beginPath();
+ ctx.arc(x, y, ctx.arcRadius, 0, 2 * Math.PI);
+ ctx.fill();
+
+ current = i;
+ }
+}
diff --git a/js/bootstrap.bundle.min.js b/js/bootstrap.bundle.min.js
new file mode 100644
index 0000000..cc0a255
--- /dev/null
+++ b/js/bootstrap.bundle.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v5.1.3 (https://getbootstrap.com/)
+ * Copyright 2011-2021 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
+ */
+!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap=e()}(this,(function(){"use strict";const t="transitionend",e=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return e},i=t=>{const i=e(t);return i&&document.querySelector(i)?i:null},n=t=>{const i=e(t);return i?document.querySelector(i):null},s=e=>{e.dispatchEvent(new Event(t))},o=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),r=t=>o(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(t):null,a=(t,e,i)=>{Object.keys(i).forEach((n=>{const s=i[n],r=e[n],a=r&&o(r)?"element":null==(l=r)?`${l}`:{}.toString.call(l).match(/\s([a-z]+)/i)[1].toLowerCase();var l;if(!new RegExp(s).test(a))throw new TypeError(`${t.toUpperCase()}: Option "${n}" provided type "${a}" but expected type "${s}".`)}))},l=t=>!(!o(t)||0===t.getClientRects().length)&&"visible"===getComputedStyle(t).getPropertyValue("visibility"),c=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),h=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?h(t.parentNode):null},d=()=>{},u=t=>{t.offsetHeight},f=()=>{const{jQuery:t}=window;return t&&!document.body.hasAttribute("data-bs-no-jquery")?t:null},p=[],m=()=>"rtl"===document.documentElement.dir,g=t=>{var e;e=()=>{const e=f();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(p.length||document.addEventListener("DOMContentLoaded",(()=>{p.forEach((t=>t()))})),p.push(e)):e()},_=t=>{"function"==typeof t&&t()},b=(e,i,n=!0)=>{if(!n)return void _(e);const o=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(i)+5;let r=!1;const a=({target:n})=>{n===i&&(r=!0,i.removeEventListener(t,a),_(e))};i.addEventListener(t,a),setTimeout((()=>{r||s(i)}),o)},v=(t,e,i,n)=>{let s=t.indexOf(e);if(-1===s)return t[!i&&n?t.length-1:0];const o=t.length;return s+=i?1:-1,n&&(s=(s+o)%o),t[Math.max(0,Math.min(s,o-1))]},y=/[^.]*(?=\..*)\.|.*/,w=/\..*/,E=/::\d+$/,A={};let T=1;const O={mouseenter:"mouseover",mouseleave:"mouseout"},C=/^(mouseenter|mouseleave)/i,k=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function L(t,e){return e&&`${e}::${T++}`||t.uidEvent||T++}function x(t){const e=L(t);return t.uidEvent=e,A[e]=A[e]||{},A[e]}function D(t,e,i=null){const n=Object.keys(t);for(let s=0,o=n.length;s<o;s++){const o=t[n[s]];if(o.originalHandler===e&&o.delegationSelector===i)return o}return null}function S(t,e,i){const n="string"==typeof e,s=n?i:e;let o=P(t);return k.has(o)||(o=t),[n,s,o]}function N(t,e,i,n,s){if("string"!=typeof e||!t)return;if(i||(i=n,n=null),C.test(e)){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};n?n=t(n):i=t(i)}const[o,r,a]=S(e,i,n),l=x(t),c=l[a]||(l[a]={}),h=D(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=L(r,e.replace(y,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(let a=o.length;a--;)if(o[a]===r)return s.delegateTarget=r,n.oneOff&&j.off(t,s.type,e,i),i.apply(r,[s]);return null}}(t,i,n):function(t,e){return function i(n){return n.delegateTarget=t,i.oneOff&&j.off(t,n.type,e),e.apply(t,[n])}}(t,i);u.delegationSelector=o?i:null,u.originalHandler=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function I(t,e,i,n,s){const o=D(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function P(t){return t=t.replace(w,""),O[t]||t}const j={on(t,e,i,n){N(t,e,i,n,!1)},one(t,e,i,n){N(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=S(e,i,n),a=r!==e,l=x(t),c=e.startsWith(".");if(void 0!==o){if(!l||!l[r])return;return void I(t,l,r,o,s?i:null)}c&&Object.keys(l).forEach((i=>{!function(t,e,i,n){const s=e[i]||{};Object.keys(s).forEach((o=>{if(o.includes(n)){const n=s[o];I(t,e,i,n.originalHandler,n.delegationSelector)}}))}(t,l,i,e.slice(1))}));const h=l[r]||{};Object.keys(h).forEach((i=>{const n=i.replace(E,"");if(!a||e.includes(n)){const e=h[i];I(t,l,r,e.originalHandler,e.delegationSelector)}}))},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=f(),s=P(e),o=e!==s,r=k.has(s);let a,l=!0,c=!0,h=!1,d=null;return o&&n&&(a=n.Event(e,i),n(t).trigger(a),l=!a.isPropagationStopped(),c=!a.isImmediatePropagationStopped(),h=a.isDefaultPrevented()),r?(d=document.createEvent("HTMLEvents"),d.initEvent(s,l,!0)):d=new CustomEvent(e,{bubbles:l,cancelable:!0}),void 0!==i&&Object.keys(i).forEach((t=>{Object.defineProperty(d,t,{get:()=>i[t]})})),h&&d.preventDefault(),c&&t.dispatchEvent(d),d.defaultPrevented&&void 0!==a&&a.preventDefault(),d}},M=new Map,H={set(t,e,i){M.has(t)||M.set(t,new Map);const n=M.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>M.has(t)&&M.get(t).get(e)||null,remove(t,e){if(!M.has(t))return;const i=M.get(t);i.delete(e),0===i.size&&M.delete(t)}};class B{constructor(t){(t=r(t))&&(this._element=t,H.set(this._element,this.constructor.DATA_KEY,this))}dispose(){H.remove(this._element,this.constructor.DATA_KEY),j.off(this._element,this.constructor.EVENT_KEY),Object.getOwnPropertyNames(this).forEach((t=>{this[t]=null}))}_queueCallback(t,e,i=!0){b(t,e,i)}static getInstance(t){return H.get(r(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.1.3"}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}}const R=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,s=t.NAME;j.on(document,i,`[data-bs-dismiss="${s}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),c(this))return;const o=n(this)||this.closest(`.${s}`);t.getOrCreateInstance(o)[e]()}))};class W extends B{static get NAME(){return"alert"}close(){if(j.trigger(this._element,"close.bs.alert").defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),j.trigger(this._element,"closed.bs.alert"),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=W.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}R(W,"close"),g(W);const $='[data-bs-toggle="button"]';class z extends B{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=z.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}function q(t){return"true"===t||"false"!==t&&(t===Number(t).toString()?Number(t):""===t||"null"===t?null:t)}function F(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}j.on(document,"click.bs.button.data-api",$,(t=>{t.preventDefault();const e=t.target.closest($);z.getOrCreateInstance(e).toggle()})),g(z);const U={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${F(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${F(e)}`)},getDataAttributes(t){if(!t)return{};const e={};return Object.keys(t.dataset).filter((t=>t.startsWith("bs"))).forEach((i=>{let n=i.replace(/^bs/,"");n=n.charAt(0).toLowerCase()+n.slice(1,n.length),e[n]=q(t.dataset[i])})),e},getDataAttribute:(t,e)=>q(t.getAttribute(`data-bs-${F(e)}`)),offset(t){const e=t.getBoundingClientRect();return{top:e.top+window.pageYOffset,left:e.left+window.pageXOffset}},position:t=>({top:t.offsetTop,left:t.offsetLeft})},V={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode;for(;n&&n.nodeType===Node.ELEMENT_NODE&&3!==n.nodeType;)n.matches(e)&&i.push(n),n=n.parentNode;return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(", ");return this.find(e,t).filter((t=>!c(t)&&l(t)))}},K="carousel",X={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},Y={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},Q="next",G="prev",Z="left",J="right",tt={ArrowLeft:J,ArrowRight:Z},et="slid.bs.carousel",it="active",nt=".active.carousel-item";class st extends B{constructor(t,e){super(t),this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(e),this._indicatorsElement=V.findOne(".carousel-indicators",this._element),this._touchSupported="ontouchstart"in document.documentElement||navigator.maxTouchPoints>0,this._pointerEvent=Boolean(window.PointerEvent),this._addEventListeners()}static get Default(){return X}static get NAME(){return K}next(){this._slide(Q)}nextWhenVisible(){!document.hidden&&l(this._element)&&this.next()}prev(){this._slide(G)}pause(t){t||(this._isPaused=!0),V.findOne(".carousel-item-next, .carousel-item-prev",this._element)&&(s(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null}cycle(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config&&this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))}to(t){this._activeElement=V.findOne(nt,this._element);const e=this._getItemIndex(this._activeElement);if(t>this._items.length-1||t<0)return;if(this._isSliding)return void j.one(this._element,et,(()=>this.to(t)));if(e===t)return this.pause(),void this.cycle();const i=t>e?Q:G;this._slide(i,this._items[t])}_getConfig(t){return t={...X,...U.getDataAttributes(this._element),..."object"==typeof t?t:{}},a(K,t,Y),t}_handleSwipe(){const t=Math.abs(this.touchDeltaX);if(t<=40)return;const e=t/this.touchDeltaX;this.touchDeltaX=0,e&&this._slide(e>0?J:Z)}_addEventListeners(){this._config.keyboard&&j.on(this._element,"keydown.bs.carousel",(t=>this._keydown(t))),"hover"===this._config.pause&&(j.on(this._element,"mouseenter.bs.carousel",(t=>this.pause(t))),j.on(this._element,"mouseleave.bs.carousel",(t=>this.cycle(t)))),this._config.touch&&this._touchSupported&&this._addTouchEventListeners()}_addTouchEventListeners(){const t=t=>this._pointerEvent&&("pen"===t.pointerType||"touch"===t.pointerType),e=e=>{t(e)?this.touchStartX=e.clientX:this._pointerEvent||(this.touchStartX=e.touches[0].clientX)},i=t=>{this.touchDeltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this.touchStartX},n=e=>{t(e)&&(this.touchDeltaX=e.clientX-this.touchStartX),this._handleSwipe(),"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((t=>this.cycle(t)),500+this._config.interval))};V.find(".carousel-item img",this._element).forEach((t=>{j.on(t,"dragstart.bs.carousel",(t=>t.preventDefault()))})),this._pointerEvent?(j.on(this._element,"pointerdown.bs.carousel",(t=>e(t))),j.on(this._element,"pointerup.bs.carousel",(t=>n(t))),this._element.classList.add("pointer-event")):(j.on(this._element,"touchstart.bs.carousel",(t=>e(t))),j.on(this._element,"touchmove.bs.carousel",(t=>i(t))),j.on(this._element,"touchend.bs.carousel",(t=>n(t))))}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=tt[t.key];e&&(t.preventDefault(),this._slide(e))}_getItemIndex(t){return this._items=t&&t.parentNode?V.find(".carousel-item",t.parentNode):[],this._items.indexOf(t)}_getItemByOrder(t,e){const i=t===Q;return v(this._items,e,i,this._config.wrap)}_triggerSlideEvent(t,e){const i=this._getItemIndex(t),n=this._getItemIndex(V.findOne(nt,this._element));return j.trigger(this._element,"slide.bs.carousel",{relatedTarget:t,direction:e,from:n,to:i})}_setActiveIndicatorElement(t){if(this._indicatorsElement){const e=V.findOne(".active",this._indicatorsElement);e.classList.remove(it),e.removeAttribute("aria-current");const i=V.find("[data-bs-target]",this._indicatorsElement);for(let e=0;e<i.length;e++)if(Number.parseInt(i[e].getAttribute("data-bs-slide-to"),10)===this._getItemIndex(t)){i[e].classList.add(it),i[e].setAttribute("aria-current","true");break}}}_updateInterval(){const t=this._activeElement||V.findOne(nt,this._element);if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);e?(this._config.defaultInterval=this._config.defaultInterval||this._config.interval,this._config.interval=e):this._config.interval=this._config.defaultInterval||this._config.interval}_slide(t,e){const i=this._directionToOrder(t),n=V.findOne(nt,this._element),s=this._getItemIndex(n),o=e||this._getItemByOrder(i,n),r=this._getItemIndex(o),a=Boolean(this._interval),l=i===Q,c=l?"carousel-item-start":"carousel-item-end",h=l?"carousel-item-next":"carousel-item-prev",d=this._orderToDirection(i);if(o&&o.classList.contains(it))return void(this._isSliding=!1);if(this._isSliding)return;if(this._triggerSlideEvent(o,d).defaultPrevented)return;if(!n||!o)return;this._isSliding=!0,a&&this.pause(),this._setActiveIndicatorElement(o),this._activeElement=o;const f=()=>{j.trigger(this._element,et,{relatedTarget:o,direction:d,from:s,to:r})};if(this._element.classList.contains("slide")){o.classList.add(h),u(o),n.classList.add(c),o.classList.add(c);const t=()=>{o.classList.remove(c,h),o.classList.add(it),n.classList.remove(it,h,c),this._isSliding=!1,setTimeout(f,0)};this._queueCallback(t,n,!0)}else n.classList.remove(it),o.classList.add(it),this._isSliding=!1,f();a&&this.cycle()}_directionToOrder(t){return[J,Z].includes(t)?m()?t===Z?G:Q:t===Z?Q:G:t}_orderToDirection(t){return[Q,G].includes(t)?m()?t===G?Z:J:t===G?J:Z:t}static carouselInterface(t,e){const i=st.getOrCreateInstance(t,e);let{_config:n}=i;"object"==typeof e&&(n={...n,...e});const s="string"==typeof e?e:n.slide;if("number"==typeof e)i.to(e);else if("string"==typeof s){if(void 0===i[s])throw new TypeError(`No method named "${s}"`);i[s]()}else n.interval&&n.ride&&(i.pause(),i.cycle())}static jQueryInterface(t){return this.each((function(){st.carouselInterface(this,t)}))}static dataApiClickHandler(t){const e=n(this);if(!e||!e.classList.contains("carousel"))return;const i={...U.getDataAttributes(e),...U.getDataAttributes(this)},s=this.getAttribute("data-bs-slide-to");s&&(i.interval=!1),st.carouselInterface(e,i),s&&st.getInstance(e).to(s),t.preventDefault()}}j.on(document,"click.bs.carousel.data-api","[data-bs-slide], [data-bs-slide-to]",st.dataApiClickHandler),j.on(window,"load.bs.carousel.data-api",(()=>{const t=V.find('[data-bs-ride="carousel"]');for(let e=0,i=t.length;e<i;e++)st.carouselInterface(t[e],st.getInstance(t[e]))})),g(st);const ot="collapse",rt={toggle:!0,parent:null},at={toggle:"boolean",parent:"(null|element)"},lt="show",ct="collapse",ht="collapsing",dt="collapsed",ut=":scope .collapse .collapse",ft='[data-bs-toggle="collapse"]';class pt extends B{constructor(t,e){super(t),this._isTransitioning=!1,this._config=this._getConfig(e),this._triggerArray=[];const n=V.find(ft);for(let t=0,e=n.length;t<e;t++){const e=n[t],s=i(e),o=V.find(s).filter((t=>t===this._element));null!==s&&o.length&&(this._selector=s,this._triggerArray.push(e))}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return rt}static get NAME(){return ot}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t,e=[];if(this._config.parent){const t=V.find(ut,this._config.parent);e=V.find(".collapse.show, .collapse.collapsing",this._config.parent).filter((e=>!t.includes(e)))}const i=V.findOne(this._selector);if(e.length){const n=e.find((t=>i!==t));if(t=n?pt.getInstance(n):null,t&&t._isTransitioning)return}if(j.trigger(this._element,"show.bs.collapse").defaultPrevented)return;e.forEach((e=>{i!==e&&pt.getOrCreateInstance(e,{toggle:!1}).hide(),t||H.set(e,"bs.collapse",null)}));const n=this._getDimension();this._element.classList.remove(ct),this._element.classList.add(ht),this._element.style[n]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const s=`scroll${n[0].toUpperCase()+n.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(ht),this._element.classList.add(ct,lt),this._element.style[n]="",j.trigger(this._element,"shown.bs.collapse")}),this._element,!0),this._element.style[n]=`${this._element[s]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(j.trigger(this._element,"hide.bs.collapse").defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,u(this._element),this._element.classList.add(ht),this._element.classList.remove(ct,lt);const e=this._triggerArray.length;for(let t=0;t<e;t++){const e=this._triggerArray[t],i=n(e);i&&!this._isShown(i)&&this._addAriaAndCollapsedClass([e],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(ht),this._element.classList.add(ct),j.trigger(this._element,"hidden.bs.collapse")}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(lt)}_getConfig(t){return(t={...rt,...U.getDataAttributes(this._element),...t}).toggle=Boolean(t.toggle),t.parent=r(t.parent),a(ot,t,at),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=V.find(ut,this._config.parent);V.find(ft,this._config.parent).filter((e=>!t.includes(e))).forEach((t=>{const e=n(t);e&&this._addAriaAndCollapsedClass([t],this._isShown(e))}))}_addAriaAndCollapsedClass(t,e){t.length&&t.forEach((t=>{e?t.classList.remove(dt):t.classList.add(dt),t.setAttribute("aria-expanded",e)}))}static jQueryInterface(t){return this.each((function(){const e={};"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1);const i=pt.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}j.on(document,"click.bs.collapse.data-api",ft,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();const e=i(this);V.find(e).forEach((t=>{pt.getOrCreateInstance(t,{toggle:!1}).toggle()}))})),g(pt);var mt="top",gt="bottom",_t="right",bt="left",vt="auto",yt=[mt,gt,_t,bt],wt="start",Et="end",At="clippingParents",Tt="viewport",Ot="popper",Ct="reference",kt=yt.reduce((function(t,e){return t.concat([e+"-"+wt,e+"-"+Et])}),[]),Lt=[].concat(yt,[vt]).reduce((function(t,e){return t.concat([e,e+"-"+wt,e+"-"+Et])}),[]),xt="beforeRead",Dt="read",St="afterRead",Nt="beforeMain",It="main",Pt="afterMain",jt="beforeWrite",Mt="write",Ht="afterWrite",Bt=[xt,Dt,St,Nt,It,Pt,jt,Mt,Ht];function Rt(t){return t?(t.nodeName||"").toLowerCase():null}function Wt(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function $t(t){return t instanceof Wt(t).Element||t instanceof Element}function zt(t){return t instanceof Wt(t).HTMLElement||t instanceof HTMLElement}function qt(t){return"undefined"!=typeof ShadowRoot&&(t instanceof Wt(t).ShadowRoot||t instanceof ShadowRoot)}const Ft={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];zt(s)&&Rt(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});zt(n)&&Rt(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function Ut(t){return t.split("-")[0]}function Vt(t,e){var i=t.getBoundingClientRect();return{width:i.width/1,height:i.height/1,top:i.top/1,right:i.right/1,bottom:i.bottom/1,left:i.left/1,x:i.left/1,y:i.top/1}}function Kt(t){var e=Vt(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function Xt(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&qt(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function Yt(t){return Wt(t).getComputedStyle(t)}function Qt(t){return["table","td","th"].indexOf(Rt(t))>=0}function Gt(t){return(($t(t)?t.ownerDocument:t.document)||window.document).documentElement}function Zt(t){return"html"===Rt(t)?t:t.assignedSlot||t.parentNode||(qt(t)?t.host:null)||Gt(t)}function Jt(t){return zt(t)&&"fixed"!==Yt(t).position?t.offsetParent:null}function te(t){for(var e=Wt(t),i=Jt(t);i&&Qt(i)&&"static"===Yt(i).position;)i=Jt(i);return i&&("html"===Rt(i)||"body"===Rt(i)&&"static"===Yt(i).position)?e:i||function(t){var e=-1!==navigator.userAgent.toLowerCase().indexOf("firefox");if(-1!==navigator.userAgent.indexOf("Trident")&&zt(t)&&"fixed"===Yt(t).position)return null;for(var i=Zt(t);zt(i)&&["html","body"].indexOf(Rt(i))<0;){var n=Yt(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function ee(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}var ie=Math.max,ne=Math.min,se=Math.round;function oe(t,e,i){return ie(t,ne(e,i))}function re(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function ae(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const le={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,n=t.name,s=t.options,o=i.elements.arrow,r=i.modifiersData.popperOffsets,a=Ut(i.placement),l=ee(a),c=[bt,_t].indexOf(a)>=0?"height":"width";if(o&&r){var h=function(t,e){return re("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:ae(t,yt))}(s.padding,i),d=Kt(o),u="y"===l?mt:bt,f="y"===l?gt:_t,p=i.rects.reference[c]+i.rects.reference[l]-r[l]-i.rects.popper[c],m=r[l]-i.rects.reference[l],g=te(o),_=g?"y"===l?g.clientHeight||0:g.clientWidth||0:0,b=p/2-m/2,v=h[u],y=_-d[c]-h[f],w=_/2-d[c]/2+b,E=oe(v,w,y),A=l;i.modifiersData[n]=((e={})[A]=E,e.centerOffset=E-w,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&Xt(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function ce(t){return t.split("-")[1]}var he={top:"auto",right:"auto",bottom:"auto",left:"auto"};function de(t){var e,i=t.popper,n=t.popperRect,s=t.placement,o=t.variation,r=t.offsets,a=t.position,l=t.gpuAcceleration,c=t.adaptive,h=t.roundOffsets,d=!0===h?function(t){var e=t.x,i=t.y,n=window.devicePixelRatio||1;return{x:se(se(e*n)/n)||0,y:se(se(i*n)/n)||0}}(r):"function"==typeof h?h(r):r,u=d.x,f=void 0===u?0:u,p=d.y,m=void 0===p?0:p,g=r.hasOwnProperty("x"),_=r.hasOwnProperty("y"),b=bt,v=mt,y=window;if(c){var w=te(i),E="clientHeight",A="clientWidth";w===Wt(i)&&"static"!==Yt(w=Gt(i)).position&&"absolute"===a&&(E="scrollHeight",A="scrollWidth"),w=w,s!==mt&&(s!==bt&&s!==_t||o!==Et)||(v=gt,m-=w[E]-n.height,m*=l?1:-1),s!==bt&&(s!==mt&&s!==gt||o!==Et)||(b=_t,f-=w[A]-n.width,f*=l?1:-1)}var T,O=Object.assign({position:a},c&&he);return l?Object.assign({},O,((T={})[v]=_?"0":"",T[b]=g?"0":"",T.transform=(y.devicePixelRatio||1)<=1?"translate("+f+"px, "+m+"px)":"translate3d("+f+"px, "+m+"px, 0)",T)):Object.assign({},O,((e={})[v]=_?m+"px":"",e[b]=g?f+"px":"",e.transform="",e))}const ue={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:Ut(e.placement),variation:ce(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,de(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,de(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var fe={passive:!0};const pe={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=Wt(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,fe)})),a&&l.addEventListener("resize",i.update,fe),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,fe)})),a&&l.removeEventListener("resize",i.update,fe)}},data:{}};var me={left:"right",right:"left",bottom:"top",top:"bottom"};function ge(t){return t.replace(/left|right|bottom|top/g,(function(t){return me[t]}))}var _e={start:"end",end:"start"};function be(t){return t.replace(/start|end/g,(function(t){return _e[t]}))}function ve(t){var e=Wt(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ye(t){return Vt(Gt(t)).left+ve(t).scrollLeft}function we(t){var e=Yt(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function Ee(t){return["html","body","#document"].indexOf(Rt(t))>=0?t.ownerDocument.body:zt(t)&&we(t)?t:Ee(Zt(t))}function Ae(t,e){var i;void 0===e&&(e=[]);var n=Ee(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=Wt(n),r=s?[o].concat(o.visualViewport||[],we(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(Ae(Zt(r)))}function Te(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function Oe(t,e){return e===Tt?Te(function(t){var e=Wt(t),i=Gt(t),n=e.visualViewport,s=i.clientWidth,o=i.clientHeight,r=0,a=0;return n&&(s=n.width,o=n.height,/^((?!chrome|android).)*safari/i.test(navigator.userAgent)||(r=n.offsetLeft,a=n.offsetTop)),{width:s,height:o,x:r+ye(t),y:a}}(t)):zt(e)?function(t){var e=Vt(t);return e.top=e.top+t.clientTop,e.left=e.left+t.clientLeft,e.bottom=e.top+t.clientHeight,e.right=e.left+t.clientWidth,e.width=t.clientWidth,e.height=t.clientHeight,e.x=e.left,e.y=e.top,e}(e):Te(function(t){var e,i=Gt(t),n=ve(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=ie(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=ie(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ye(t),l=-n.scrollTop;return"rtl"===Yt(s||i).direction&&(a+=ie(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(Gt(t)))}function Ce(t){var e,i=t.reference,n=t.element,s=t.placement,o=s?Ut(s):null,r=s?ce(s):null,a=i.x+i.width/2-n.width/2,l=i.y+i.height/2-n.height/2;switch(o){case mt:e={x:a,y:i.y-n.height};break;case gt:e={x:a,y:i.y+i.height};break;case _t:e={x:i.x+i.width,y:l};break;case bt:e={x:i.x-n.width,y:l};break;default:e={x:i.x,y:i.y}}var c=o?ee(o):null;if(null!=c){var h="y"===c?"height":"width";switch(r){case wt:e[c]=e[c]-(i[h]/2-n[h]/2);break;case Et:e[c]=e[c]+(i[h]/2-n[h]/2)}}return e}function ke(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=void 0===n?t.placement:n,o=i.boundary,r=void 0===o?At:o,a=i.rootBoundary,l=void 0===a?Tt:a,c=i.elementContext,h=void 0===c?Ot:c,d=i.altBoundary,u=void 0!==d&&d,f=i.padding,p=void 0===f?0:f,m=re("number"!=typeof p?p:ae(p,yt)),g=h===Ot?Ct:Ot,_=t.rects.popper,b=t.elements[u?g:h],v=function(t,e,i){var n="clippingParents"===e?function(t){var e=Ae(Zt(t)),i=["absolute","fixed"].indexOf(Yt(t).position)>=0&&zt(t)?te(t):t;return $t(i)?e.filter((function(t){return $t(t)&&Xt(t,i)&&"body"!==Rt(t)})):[]}(t):[].concat(e),s=[].concat(n,[i]),o=s[0],r=s.reduce((function(e,i){var n=Oe(t,i);return e.top=ie(n.top,e.top),e.right=ne(n.right,e.right),e.bottom=ne(n.bottom,e.bottom),e.left=ie(n.left,e.left),e}),Oe(t,o));return r.width=r.right-r.left,r.height=r.bottom-r.top,r.x=r.left,r.y=r.top,r}($t(b)?b:b.contextElement||Gt(t.elements.popper),r,l),y=Vt(t.elements.reference),w=Ce({reference:y,element:_,strategy:"absolute",placement:s}),E=Te(Object.assign({},_,w)),A=h===Ot?E:y,T={top:v.top-A.top+m.top,bottom:A.bottom-v.bottom+m.bottom,left:v.left-A.left+m.left,right:A.right-v.right+m.right},O=t.modifiersData.offset;if(h===Ot&&O){var C=O[s];Object.keys(T).forEach((function(t){var e=[_t,gt].indexOf(t)>=0?1:-1,i=[mt,gt].indexOf(t)>=0?"y":"x";T[t]+=C[i]*e}))}return T}function Le(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,l=i.allowedAutoPlacements,c=void 0===l?Lt:l,h=ce(n),d=h?a?kt:kt.filter((function(t){return ce(t)===h})):yt,u=d.filter((function(t){return c.indexOf(t)>=0}));0===u.length&&(u=d);var f=u.reduce((function(e,i){return e[i]=ke(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[Ut(i)],e}),{});return Object.keys(f).sort((function(t,e){return f[t]-f[e]}))}const xe={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name;if(!e.modifiersData[n]._skip){for(var s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0===r||r,l=i.fallbackPlacements,c=i.padding,h=i.boundary,d=i.rootBoundary,u=i.altBoundary,f=i.flipVariations,p=void 0===f||f,m=i.allowedAutoPlacements,g=e.options.placement,_=Ut(g),b=l||(_!==g&&p?function(t){if(Ut(t)===vt)return[];var e=ge(t);return[be(t),e,be(e)]}(g):[ge(g)]),v=[g].concat(b).reduce((function(t,i){return t.concat(Ut(i)===vt?Le(e,{placement:i,boundary:h,rootBoundary:d,padding:c,flipVariations:p,allowedAutoPlacements:m}):i)}),[]),y=e.rects.reference,w=e.rects.popper,E=new Map,A=!0,T=v[0],O=0;O<v.length;O++){var C=v[O],k=Ut(C),L=ce(C)===wt,x=[mt,gt].indexOf(k)>=0,D=x?"width":"height",S=ke(e,{placement:C,boundary:h,rootBoundary:d,altBoundary:u,padding:c}),N=x?L?_t:bt:L?gt:mt;y[D]>w[D]&&(N=ge(N));var I=ge(N),P=[];if(o&&P.push(S[k]<=0),a&&P.push(S[N]<=0,S[I]<=0),P.every((function(t){return t}))){T=C,A=!1;break}E.set(C,P)}if(A)for(var j=function(t){var e=v.find((function(e){var i=E.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return T=e,"break"},M=p?3:1;M>0&&"break"!==j(M);M--);e.placement!==T&&(e.modifiersData[n]._skip=!0,e.placement=T,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function De(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function Se(t){return[mt,_t,gt,bt].some((function(e){return t[e]>=0}))}const Ne={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=ke(e,{elementContext:"reference"}),a=ke(e,{altBoundary:!0}),l=De(r,n),c=De(a,s,o),h=Se(l),d=Se(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},Ie={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.offset,o=void 0===s?[0,0]:s,r=Lt.reduce((function(t,i){return t[i]=function(t,e,i){var n=Ut(t),s=[bt,mt].indexOf(n)>=0?-1:1,o="function"==typeof i?i(Object.assign({},e,{placement:t})):i,r=o[0],a=o[1];return r=r||0,a=(a||0)*s,[bt,_t].indexOf(n)>=0?{x:a,y:r}:{x:r,y:a}}(i,e.rects,o),t}),{}),a=r[e.placement],l=a.x,c=a.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=l,e.modifiersData.popperOffsets.y+=c),e.modifiersData[n]=r}},Pe={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=Ce({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},je={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0!==r&&r,l=i.boundary,c=i.rootBoundary,h=i.altBoundary,d=i.padding,u=i.tether,f=void 0===u||u,p=i.tetherOffset,m=void 0===p?0:p,g=ke(e,{boundary:l,rootBoundary:c,padding:d,altBoundary:h}),_=Ut(e.placement),b=ce(e.placement),v=!b,y=ee(_),w="x"===y?"y":"x",E=e.modifiersData.popperOffsets,A=e.rects.reference,T=e.rects.popper,O="function"==typeof m?m(Object.assign({},e.rects,{placement:e.placement})):m,C={x:0,y:0};if(E){if(o||a){var k="y"===y?mt:bt,L="y"===y?gt:_t,x="y"===y?"height":"width",D=E[y],S=E[y]+g[k],N=E[y]-g[L],I=f?-T[x]/2:0,P=b===wt?A[x]:T[x],j=b===wt?-T[x]:-A[x],M=e.elements.arrow,H=f&&M?Kt(M):{width:0,height:0},B=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},R=B[k],W=B[L],$=oe(0,A[x],H[x]),z=v?A[x]/2-I-$-R-O:P-$-R-O,q=v?-A[x]/2+I+$+W+O:j+$+W+O,F=e.elements.arrow&&te(e.elements.arrow),U=F?"y"===y?F.clientTop||0:F.clientLeft||0:0,V=e.modifiersData.offset?e.modifiersData.offset[e.placement][y]:0,K=E[y]+z-V-U,X=E[y]+q-V;if(o){var Y=oe(f?ne(S,K):S,D,f?ie(N,X):N);E[y]=Y,C[y]=Y-D}if(a){var Q="x"===y?mt:bt,G="x"===y?gt:_t,Z=E[w],J=Z+g[Q],tt=Z-g[G],et=oe(f?ne(J,K):J,Z,f?ie(tt,X):tt);E[w]=et,C[w]=et-Z}}e.modifiersData[n]=C}},requiresIfExists:["offset"]};function Me(t,e,i){void 0===i&&(i=!1);var n=zt(e);zt(e)&&function(t){var e=t.getBoundingClientRect();e.width,t.offsetWidth,e.height,t.offsetHeight}(e);var s,o,r=Gt(e),a=Vt(t),l={scrollLeft:0,scrollTop:0},c={x:0,y:0};return(n||!n&&!i)&&(("body"!==Rt(e)||we(r))&&(l=(s=e)!==Wt(s)&&zt(s)?{scrollLeft:(o=s).scrollLeft,scrollTop:o.scrollTop}:ve(s)),zt(e)?((c=Vt(e)).x+=e.clientLeft,c.y+=e.clientTop):r&&(c.x=ye(r))),{x:a.left+l.scrollLeft-c.x,y:a.top+l.scrollTop-c.y,width:a.width,height:a.height}}function He(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var Be={placement:"bottom",modifiers:[],strategy:"absolute"};function Re(){for(var t=arguments.length,e=new Array(t),i=0;i<t;i++)e[i]=arguments[i];return!e.some((function(t){return!(t&&"function"==typeof t.getBoundingClientRect)}))}function We(t){void 0===t&&(t={});var e=t,i=e.defaultModifiers,n=void 0===i?[]:i,s=e.defaultOptions,o=void 0===s?Be:s;return function(t,e,i){void 0===i&&(i=o);var s,r,a={placement:"bottom",orderedModifiers:[],options:Object.assign({},Be,o),modifiersData:{},elements:{reference:t,popper:e},attributes:{},styles:{}},l=[],c=!1,h={state:a,setOptions:function(i){var s="function"==typeof i?i(a.options):i;d(),a.options=Object.assign({},o,a.options,s),a.scrollParents={reference:$t(t)?Ae(t):t.contextElement?Ae(t.contextElement):[],popper:Ae(e)};var r,c,u=function(t){var e=He(t);return Bt.reduce((function(t,i){return t.concat(e.filter((function(t){return t.phase===i})))}),[])}((r=[].concat(n,a.options.modifiers),c=r.reduce((function(t,e){var i=t[e.name];return t[e.name]=i?Object.assign({},i,e,{options:Object.assign({},i.options,e.options),data:Object.assign({},i.data,e.data)}):e,t}),{}),Object.keys(c).map((function(t){return c[t]}))));return a.orderedModifiers=u.filter((function(t){return t.enabled})),a.orderedModifiers.forEach((function(t){var e=t.name,i=t.options,n=void 0===i?{}:i,s=t.effect;if("function"==typeof s){var o=s({state:a,name:e,instance:h,options:n});l.push(o||function(){})}})),h.update()},forceUpdate:function(){if(!c){var t=a.elements,e=t.reference,i=t.popper;if(Re(e,i)){a.rects={reference:Me(e,te(i),"fixed"===a.options.strategy),popper:Kt(i)},a.reset=!1,a.placement=a.options.placement,a.orderedModifiers.forEach((function(t){return a.modifiersData[t.name]=Object.assign({},t.data)}));for(var n=0;n<a.orderedModifiers.length;n++)if(!0!==a.reset){var s=a.orderedModifiers[n],o=s.fn,r=s.options,l=void 0===r?{}:r,d=s.name;"function"==typeof o&&(a=o({state:a,options:l,name:d,instance:h})||a)}else a.reset=!1,n=-1}}},update:(s=function(){return new Promise((function(t){h.forceUpdate(),t(a)}))},function(){return r||(r=new Promise((function(t){Promise.resolve().then((function(){r=void 0,t(s())}))}))),r}),destroy:function(){d(),c=!0}};if(!Re(t,e))return h;function d(){l.forEach((function(t){return t()})),l=[]}return h.setOptions(i).then((function(t){!c&&i.onFirstUpdate&&i.onFirstUpdate(t)})),h}}var $e=We(),ze=We({defaultModifiers:[pe,Pe,ue,Ft]}),qe=We({defaultModifiers:[pe,Pe,ue,Ft,Ie,xe,je,le,Ne]});const Fe=Object.freeze({__proto__:null,popperGenerator:We,detectOverflow:ke,createPopperBase:$e,createPopper:qe,createPopperLite:ze,top:mt,bottom:gt,right:_t,left:bt,auto:vt,basePlacements:yt,start:wt,end:Et,clippingParents:At,viewport:Tt,popper:Ot,reference:Ct,variationPlacements:kt,placements:Lt,beforeRead:xt,read:Dt,afterRead:St,beforeMain:Nt,main:It,afterMain:Pt,beforeWrite:jt,write:Mt,afterWrite:Ht,modifierPhases:Bt,applyStyles:Ft,arrow:le,computeStyles:ue,eventListeners:pe,flip:xe,hide:Ne,offset:Ie,popperOffsets:Pe,preventOverflow:je}),Ue="dropdown",Ve="Escape",Ke="Space",Xe="ArrowUp",Ye="ArrowDown",Qe=new RegExp("ArrowUp|ArrowDown|Escape"),Ge="click.bs.dropdown.data-api",Ze="keydown.bs.dropdown.data-api",Je="show",ti='[data-bs-toggle="dropdown"]',ei=".dropdown-menu",ii=m()?"top-end":"top-start",ni=m()?"top-start":"top-end",si=m()?"bottom-end":"bottom-start",oi=m()?"bottom-start":"bottom-end",ri=m()?"left-start":"right-start",ai=m()?"right-start":"left-start",li={offset:[0,2],boundary:"clippingParents",reference:"toggle",display:"dynamic",popperConfig:null,autoClose:!0},ci={offset:"(array|string|function)",boundary:"(string|element)",reference:"(string|element|object)",display:"string",popperConfig:"(null|object|function)",autoClose:"(boolean|string)"};class hi extends B{constructor(t,e){super(t),this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar()}static get Default(){return li}static get DefaultType(){return ci}static get NAME(){return Ue}toggle(){return this._isShown()?this.hide():this.show()}show(){if(c(this._element)||this._isShown(this._menu))return;const t={relatedTarget:this._element};if(j.trigger(this._element,"show.bs.dropdown",t).defaultPrevented)return;const e=hi.getParentFromElement(this._element);this._inNavbar?U.setDataAttribute(this._menu,"popper","none"):this._createPopper(e),"ontouchstart"in document.documentElement&&!e.closest(".navbar-nav")&&[].concat(...document.body.children).forEach((t=>j.on(t,"mouseover",d))),this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Je),this._element.classList.add(Je),j.trigger(this._element,"shown.bs.dropdown",t)}hide(){if(c(this._element)||!this._isShown(this._menu))return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){j.trigger(this._element,"hide.bs.dropdown",t).defaultPrevented||("ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach((t=>j.off(t,"mouseover",d))),this._popper&&this._popper.destroy(),this._menu.classList.remove(Je),this._element.classList.remove(Je),this._element.setAttribute("aria-expanded","false"),U.removeDataAttribute(this._menu,"popper"),j.trigger(this._element,"hidden.bs.dropdown",t))}_getConfig(t){if(t={...this.constructor.Default,...U.getDataAttributes(this._element),...t},a(Ue,t,this.constructor.DefaultType),"object"==typeof t.reference&&!o(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${Ue.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(t){if(void 0===Fe)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let e=this._element;"parent"===this._config.reference?e=t:o(this._config.reference)?e=r(this._config.reference):"object"==typeof this._config.reference&&(e=this._config.reference);const i=this._getPopperConfig(),n=i.modifiers.find((t=>"applyStyles"===t.name&&!1===t.enabled));this._popper=qe(e,this._menu,i),n&&U.setDataAttribute(this._menu,"popper","static")}_isShown(t=this._element){return t.classList.contains(Je)}_getMenuElement(){return V.next(this._element,ei)[0]}_getPlacement(){const t=this._element.parentNode;if(t.classList.contains("dropend"))return ri;if(t.classList.contains("dropstart"))return ai;const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?ni:ii:e?oi:si}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return"static"===this._config.display&&(t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,..."function"==typeof this._config.popperConfig?this._config.popperConfig(t):this._config.popperConfig}}_selectMenuItem({key:t,target:e}){const i=V.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter(l);i.length&&v(i,e,t===Ye,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=hi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(t&&(2===t.button||"keyup"===t.type&&"Tab"!==t.key))return;const e=V.find(ti);for(let i=0,n=e.length;i<n;i++){const n=hi.getInstance(e[i]);if(!n||!1===n._config.autoClose)continue;if(!n._isShown())continue;const s={relatedTarget:n._element};if(t){const e=t.composedPath(),i=e.includes(n._menu);if(e.includes(n._element)||"inside"===n._config.autoClose&&!i||"outside"===n._config.autoClose&&i)continue;if(n._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;"click"===t.type&&(s.clickEvent=t)}n._completeHide(s)}}static getParentFromElement(t){return n(t)||t.parentNode}static dataApiKeydownHandler(t){if(/input|textarea/i.test(t.target.tagName)?t.key===Ke||t.key!==Ve&&(t.key!==Ye&&t.key!==Xe||t.target.closest(ei)):!Qe.test(t.key))return;const e=this.classList.contains(Je);if(!e&&t.key===Ve)return;if(t.preventDefault(),t.stopPropagation(),c(this))return;const i=this.matches(ti)?this:V.prev(this,ti)[0],n=hi.getOrCreateInstance(i);if(t.key!==Ve)return t.key===Xe||t.key===Ye?(e||n.show(),void n._selectMenuItem(t)):void(e&&t.key!==Ke||hi.clearMenus());n.hide()}}j.on(document,Ze,ti,hi.dataApiKeydownHandler),j.on(document,Ze,ei,hi.dataApiKeydownHandler),j.on(document,Ge,hi.clearMenus),j.on(document,"keyup.bs.dropdown.data-api",hi.clearMenus),j.on(document,Ge,ti,(function(t){t.preventDefault(),hi.getOrCreateInstance(this).toggle()})),g(hi);const di=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",ui=".sticky-top";class fi{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,"paddingRight",(e=>e+t)),this._setElementAttributes(di,"paddingRight",(e=>e+t)),this._setElementAttributes(ui,"marginRight",(e=>e-t))}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t)[e];t.style[e]=`${i(Number.parseFloat(s))}px`}))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,"paddingRight"),this._resetElementAttributes(di,"paddingRight"),this._resetElementAttributes(ui,"marginRight")}_saveInitialAttribute(t,e){const i=t.style[e];i&&U.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=U.getDataAttribute(t,e);void 0===i?t.style.removeProperty(e):(U.removeDataAttribute(t,e),t.style[e]=i)}))}_applyManipulationCallback(t,e){o(t)?e(t):V.find(t,this._element).forEach(e)}isOverflowing(){return this.getWidth()>0}}const pi={className:"modal-backdrop",isVisible:!0,isAnimated:!1,rootElement:"body",clickCallback:null},mi={className:"string",isVisible:"boolean",isAnimated:"boolean",rootElement:"(element|string)",clickCallback:"(function|null)"},gi="show",_i="mousedown.bs.backdrop";class bi{constructor(t){this._config=this._getConfig(t),this._isAppended=!1,this._element=null}show(t){this._config.isVisible?(this._append(),this._config.isAnimated&&u(this._getElement()),this._getElement().classList.add(gi),this._emulateAnimation((()=>{_(t)}))):_(t)}hide(t){this._config.isVisible?(this._getElement().classList.remove(gi),this._emulateAnimation((()=>{this.dispose(),_(t)}))):_(t)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_getConfig(t){return(t={...pi,..."object"==typeof t?t:{}}).rootElement=r(t.rootElement),a("backdrop",t,mi),t}_append(){this._isAppended||(this._config.rootElement.append(this._getElement()),j.on(this._getElement(),_i,(()=>{_(this._config.clickCallback)})),this._isAppended=!0)}dispose(){this._isAppended&&(j.off(this._element,_i),this._element.remove(),this._isAppended=!1)}_emulateAnimation(t){b(t,this._getElement(),this._config.isAnimated)}}const vi={trapElement:null,autofocus:!0},yi={trapElement:"element",autofocus:"boolean"},wi=".bs.focustrap",Ei="backward";class Ai{constructor(t){this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}activate(){const{trapElement:t,autofocus:e}=this._config;this._isActive||(e&&t.focus(),j.off(document,wi),j.on(document,"focusin.bs.focustrap",(t=>this._handleFocusin(t))),j.on(document,"keydown.tab.bs.focustrap",(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,j.off(document,wi))}_handleFocusin(t){const{target:e}=t,{trapElement:i}=this._config;if(e===document||e===i||i.contains(e))return;const n=V.focusableChildren(i);0===n.length?i.focus():this._lastTabNavDirection===Ei?n[n.length-1].focus():n[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?Ei:"forward")}_getConfig(t){return t={...vi,..."object"==typeof t?t:{}},a("focustrap",t,yi),t}}const Ti="modal",Oi="Escape",Ci={backdrop:!0,keyboard:!0,focus:!0},ki={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean"},Li="hidden.bs.modal",xi="show.bs.modal",Di="resize.bs.modal",Si="click.dismiss.bs.modal",Ni="keydown.dismiss.bs.modal",Ii="mousedown.dismiss.bs.modal",Pi="modal-open",ji="show",Mi="modal-static";class Hi extends B{constructor(t,e){super(t),this._config=this._getConfig(e),this._dialog=V.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1,this._scrollBar=new fi}static get Default(){return Ci}static get NAME(){return Ti}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||j.trigger(this._element,xi,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isAnimated()&&(this._isTransitioning=!0),this._scrollBar.hide(),document.body.classList.add(Pi),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),j.on(this._dialog,Ii,(()=>{j.one(this._element,"mouseup.dismiss.bs.modal",(t=>{t.target===this._element&&(this._ignoreBackdropClick=!0)}))})),this._showBackdrop((()=>this._showElement(t))))}hide(){if(!this._isShown||this._isTransitioning)return;if(j.trigger(this._element,"hide.bs.modal").defaultPrevented)return;this._isShown=!1;const t=this._isAnimated();t&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),this._focustrap.deactivate(),this._element.classList.remove(ji),j.off(this._element,Si),j.off(this._dialog,Ii),this._queueCallback((()=>this._hideModal()),this._element,t)}dispose(){[window,this._dialog].forEach((t=>j.off(t,".bs.modal"))),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new bi({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new Ai({trapElement:this._element})}_getConfig(t){return t={...Ci,...U.getDataAttributes(this._element),..."object"==typeof t?t:{}},a(Ti,t,ki),t}_showElement(t){const e=this._isAnimated(),i=V.findOne(".modal-body",this._dialog);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0,i&&(i.scrollTop=0),e&&u(this._element),this._element.classList.add(ji),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,j.trigger(this._element,"shown.bs.modal",{relatedTarget:t})}),this._dialog,e)}_setEscapeEvent(){this._isShown?j.on(this._element,Ni,(t=>{this._config.keyboard&&t.key===Oi?(t.preventDefault(),this.hide()):this._config.keyboard||t.key!==Oi||this._triggerBackdropTransition()})):j.off(this._element,Ni)}_setResizeEvent(){this._isShown?j.on(window,Di,(()=>this._adjustDialog())):j.off(window,Di)}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(Pi),this._resetAdjustments(),this._scrollBar.reset(),j.trigger(this._element,Li)}))}_showBackdrop(t){j.on(this._element,Si,(t=>{this._ignoreBackdropClick?this._ignoreBackdropClick=!1:t.target===t.currentTarget&&(!0===this._config.backdrop?this.hide():"static"===this._config.backdrop&&this._triggerBackdropTransition())})),this._backdrop.show(t)}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(j.trigger(this._element,"hidePrevented.bs.modal").defaultPrevented)return;const{classList:t,scrollHeight:e,style:i}=this._element,n=e>document.documentElement.clientHeight;!n&&"hidden"===i.overflowY||t.contains(Mi)||(n||(i.overflowY="hidden"),t.add(Mi),this._queueCallback((()=>{t.remove(Mi),n||this._queueCallback((()=>{i.overflowY=""}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;(!i&&t&&!m()||i&&!t&&m())&&(this._element.style.paddingLeft=`${e}px`),(i&&!t&&!m()||!i&&t&&m())&&(this._element.style.paddingRight=`${e}px`)}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=Hi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}j.on(document,"click.bs.modal.data-api",'[data-bs-toggle="modal"]',(function(t){const e=n(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),j.one(e,xi,(t=>{t.defaultPrevented||j.one(e,Li,(()=>{l(this)&&this.focus()}))}));const i=V.findOne(".modal.show");i&&Hi.getInstance(i).hide(),Hi.getOrCreateInstance(e).toggle(this)})),R(Hi),g(Hi);const Bi="offcanvas",Ri={backdrop:!0,keyboard:!0,scroll:!1},Wi={backdrop:"boolean",keyboard:"boolean",scroll:"boolean"},$i="show",zi=".offcanvas.show",qi="hidden.bs.offcanvas";class Fi extends B{constructor(t,e){super(t),this._config=this._getConfig(e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get NAME(){return Bi}static get Default(){return Ri}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||j.trigger(this._element,"show.bs.offcanvas",{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._element.style.visibility="visible",this._backdrop.show(),this._config.scroll||(new fi).hide(),this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add($i),this._queueCallback((()=>{this._config.scroll||this._focustrap.activate(),j.trigger(this._element,"shown.bs.offcanvas",{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(j.trigger(this._element,"hide.bs.offcanvas").defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.remove($i),this._backdrop.hide(),this._queueCallback((()=>{this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._element.style.visibility="hidden",this._config.scroll||(new fi).reset(),j.trigger(this._element,qi)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_getConfig(t){return t={...Ri,...U.getDataAttributes(this._element),..."object"==typeof t?t:{}},a(Bi,t,Wi),t}_initializeBackDrop(){return new bi({className:"offcanvas-backdrop",isVisible:this._config.backdrop,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:()=>this.hide()})}_initializeFocusTrap(){return new Ai({trapElement:this._element})}_addEventListeners(){j.on(this._element,"keydown.dismiss.bs.offcanvas",(t=>{this._config.keyboard&&"Escape"===t.key&&this.hide()}))}static jQueryInterface(t){return this.each((function(){const e=Fi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}j.on(document,"click.bs.offcanvas.data-api",'[data-bs-toggle="offcanvas"]',(function(t){const e=n(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),c(this))return;j.one(e,qi,(()=>{l(this)&&this.focus()}));const i=V.findOne(zi);i&&i!==e&&Fi.getInstance(i).hide(),Fi.getOrCreateInstance(e).toggle(this)})),j.on(window,"load.bs.offcanvas.data-api",(()=>V.find(zi).forEach((t=>Fi.getOrCreateInstance(t).show())))),R(Fi),g(Fi);const Ui=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Vi=/^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i,Ki=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i,Xi=(t,e)=>{const i=t.nodeName.toLowerCase();if(e.includes(i))return!Ui.has(i)||Boolean(Vi.test(t.nodeValue)||Ki.test(t.nodeValue));const n=e.filter((t=>t instanceof RegExp));for(let t=0,e=n.length;t<e;t++)if(n[t].test(i))return!0;return!1};function Yi(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(let t=0,i=s.length;t<i;t++){const i=s[t],n=i.nodeName.toLowerCase();if(!Object.keys(e).includes(n)){i.remove();continue}const o=[].concat(...i.attributes),r=[].concat(e["*"]||[],e[n]||[]);o.forEach((t=>{Xi(t,r)||i.removeAttribute(t.nodeName)}))}return n.body.innerHTML}const Qi="tooltip",Gi=new Set(["sanitize","allowList","sanitizeFn"]),Zi={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(array|string|function)",container:"(string|element|boolean)",fallbackPlacements:"array",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",allowList:"object",popperConfig:"(null|object|function)"},Ji={AUTO:"auto",TOP:"top",RIGHT:m()?"left":"right",BOTTOM:"bottom",LEFT:m()?"right":"left"},tn={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:[0,0],container:!1,fallbackPlacements:["top","right","bottom","left"],boundary:"clippingParents",customClass:"",sanitize:!0,sanitizeFn:null,allowList:{"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},popperConfig:null},en={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"},nn="fade",sn="show",on="show",rn="out",an=".tooltip-inner",ln=".modal",cn="hide.bs.modal",hn="hover",dn="focus";class un extends B{constructor(t,e){if(void 0===Fe)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t),this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this._config=this._getConfig(e),this.tip=null,this._setListeners()}static get Default(){return tn}static get NAME(){return Qi}static get Event(){return en}static get DefaultType(){return Zi}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(t){if(this._isEnabled)if(t){const e=this._initializeOnDelegatedTarget(t);e._activeTrigger.click=!e._activeTrigger.click,e._isWithActiveTrigger()?e._enter(null,e):e._leave(null,e)}else{if(this.getTipElement().classList.contains(sn))return void this._leave(null,this);this._enter(null,this)}}dispose(){clearTimeout(this._timeout),j.off(this._element.closest(ln),cn,this._hideModalHandler),this.tip&&this.tip.remove(),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this.isWithContent()||!this._isEnabled)return;const t=j.trigger(this._element,this.constructor.Event.SHOW),e=h(this._element),i=null===e?this._element.ownerDocument.documentElement.contains(this._element):e.contains(this._element);if(t.defaultPrevented||!i)return;"tooltip"===this.constructor.NAME&&this.tip&&this.getTitle()!==this.tip.querySelector(an).innerHTML&&(this._disposePopper(),this.tip.remove(),this.tip=null);const n=this.getTipElement(),s=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME);n.setAttribute("id",s),this._element.setAttribute("aria-describedby",s),this._config.animation&&n.classList.add(nn);const o="function"==typeof this._config.placement?this._config.placement.call(this,n,this._element):this._config.placement,r=this._getAttachment(o);this._addAttachmentClass(r);const{container:a}=this._config;H.set(n,this.constructor.DATA_KEY,this),this._element.ownerDocument.documentElement.contains(this.tip)||(a.append(n),j.trigger(this._element,this.constructor.Event.INSERTED)),this._popper?this._popper.update():this._popper=qe(this._element,n,this._getPopperConfig(r)),n.classList.add(sn);const l=this._resolvePossibleFunction(this._config.customClass);l&&n.classList.add(...l.split(" ")),"ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach((t=>{j.on(t,"mouseover",d)}));const c=this.tip.classList.contains(nn);this._queueCallback((()=>{const t=this._hoverState;this._hoverState=null,j.trigger(this._element,this.constructor.Event.SHOWN),t===rn&&this._leave(null,this)}),this.tip,c)}hide(){if(!this._popper)return;const t=this.getTipElement();if(j.trigger(this._element,this.constructor.Event.HIDE).defaultPrevented)return;t.classList.remove(sn),"ontouchstart"in document.documentElement&&[].concat(...document.body.children).forEach((t=>j.off(t,"mouseover",d))),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1;const e=this.tip.classList.contains(nn);this._queueCallback((()=>{this._isWithActiveTrigger()||(this._hoverState!==on&&t.remove(),this._cleanTipClass(),this._element.removeAttribute("aria-describedby"),j.trigger(this._element,this.constructor.Event.HIDDEN),this._disposePopper())}),this.tip,e),this._hoverState=""}update(){null!==this._popper&&this._popper.update()}isWithContent(){return Boolean(this.getTitle())}getTipElement(){if(this.tip)return this.tip;const t=document.createElement("div");t.innerHTML=this._config.template;const e=t.children[0];return this.setContent(e),e.classList.remove(nn,sn),this.tip=e,this.tip}setContent(t){this._sanitizeAndSetContent(t,this.getTitle(),an)}_sanitizeAndSetContent(t,e,i){const n=V.findOne(i,t);e||!n?this.setElementContent(n,e):n.remove()}setElementContent(t,e){if(null!==t)return o(e)?(e=r(e),void(this._config.html?e.parentNode!==t&&(t.innerHTML="",t.append(e)):t.textContent=e.textContent)):void(this._config.html?(this._config.sanitize&&(e=Yi(e,this._config.allowList,this._config.sanitizeFn)),t.innerHTML=e):t.textContent=e)}getTitle(){const t=this._element.getAttribute("data-bs-original-title")||this._config.title;return this._resolvePossibleFunction(t)}updateAttachment(t){return"right"===t?"end":"left"===t?"start":t}_initializeOnDelegatedTarget(t,e){return e||this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return"function"==typeof t?t.call(this._element):t}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"onChange",enabled:!0,phase:"afterWrite",fn:t=>this._handlePopperPlacementChange(t)}],onFirstUpdate:t=>{t.options.placement!==t.placement&&this._handlePopperPlacementChange(t)}};return{...e,..."function"==typeof this._config.popperConfig?this._config.popperConfig(e):this._config.popperConfig}}_addAttachmentClass(t){this.getTipElement().classList.add(`${this._getBasicClassPrefix()}-${this.updateAttachment(t)}`)}_getAttachment(t){return Ji[t.toUpperCase()]}_setListeners(){this._config.trigger.split(" ").forEach((t=>{if("click"===t)j.on(this._element,this.constructor.Event.CLICK,this._config.selector,(t=>this.toggle(t)));else if("manual"!==t){const e=t===hn?this.constructor.Event.MOUSEENTER:this.constructor.Event.FOCUSIN,i=t===hn?this.constructor.Event.MOUSELEAVE:this.constructor.Event.FOCUSOUT;j.on(this._element,e,this._config.selector,(t=>this._enter(t))),j.on(this._element,i,this._config.selector,(t=>this._leave(t)))}})),this._hideModalHandler=()=>{this._element&&this.hide()},j.on(this._element.closest(ln),cn,this._hideModalHandler),this._config.selector?this._config={...this._config,trigger:"manual",selector:""}:this._fixTitle()}_fixTitle(){const t=this._element.getAttribute("title"),e=typeof this._element.getAttribute("data-bs-original-title");(t||"string"!==e)&&(this._element.setAttribute("data-bs-original-title",t||""),!t||this._element.getAttribute("aria-label")||this._element.textContent||this._element.setAttribute("aria-label",t),this._element.setAttribute("title",""))}_enter(t,e){e=this._initializeOnDelegatedTarget(t,e),t&&(e._activeTrigger["focusin"===t.type?dn:hn]=!0),e.getTipElement().classList.contains(sn)||e._hoverState===on?e._hoverState=on:(clearTimeout(e._timeout),e._hoverState=on,e._config.delay&&e._config.delay.show?e._timeout=setTimeout((()=>{e._hoverState===on&&e.show()}),e._config.delay.show):e.show())}_leave(t,e){e=this._initializeOnDelegatedTarget(t,e),t&&(e._activeTrigger["focusout"===t.type?dn:hn]=e._element.contains(t.relatedTarget)),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=rn,e._config.delay&&e._config.delay.hide?e._timeout=setTimeout((()=>{e._hoverState===rn&&e.hide()}),e._config.delay.hide):e.hide())}_isWithActiveTrigger(){for(const t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1}_getConfig(t){const e=U.getDataAttributes(this._element);return Object.keys(e).forEach((t=>{Gi.has(t)&&delete e[t]})),(t={...this.constructor.Default,...e,..."object"==typeof t&&t?t:{}}).container=!1===t.container?document.body:r(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),a(Qi,t,this.constructor.DefaultType),t.sanitize&&(t.template=Yi(t.template,t.allowList,t.sanitizeFn)),t}_getDelegateConfig(){const t={};for(const e in this._config)this.constructor.Default[e]!==this._config[e]&&(t[e]=this._config[e]);return t}_cleanTipClass(){const t=this.getTipElement(),e=new RegExp(`(^|\\s)${this._getBasicClassPrefix()}\\S+`,"g"),i=t.getAttribute("class").match(e);null!==i&&i.length>0&&i.map((t=>t.trim())).forEach((e=>t.classList.remove(e)))}_getBasicClassPrefix(){return"bs-tooltip"}_handlePopperPlacementChange(t){const{state:e}=t;e&&(this.tip=e.elements.popper,this._cleanTipClass(),this._addAttachmentClass(this._getAttachment(e.placement)))}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null)}static jQueryInterface(t){return this.each((function(){const e=un.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}g(un);const fn={...un.Default,placement:"right",offset:[0,8],trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="popover-arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'},pn={...un.DefaultType,content:"(string|element|function)"},mn={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"};class gn extends un{static get Default(){return fn}static get NAME(){return"popover"}static get Event(){return mn}static get DefaultType(){return pn}isWithContent(){return this.getTitle()||this._getContent()}setContent(t){this._sanitizeAndSetContent(t,this.getTitle(),".popover-header"),this._sanitizeAndSetContent(t,this._getContent(),".popover-body")}_getContent(){return this._resolvePossibleFunction(this._config.content)}_getBasicClassPrefix(){return"bs-popover"}static jQueryInterface(t){return this.each((function(){const e=gn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}g(gn);const _n="scrollspy",bn={offset:10,method:"auto",target:""},vn={offset:"number",method:"string",target:"(string|element)"},yn="active",wn=".nav-link, .list-group-item, .dropdown-item",En="position";class An extends B{constructor(t,e){super(t),this._scrollElement="BODY"===this._element.tagName?window:this._element,this._config=this._getConfig(e),this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,j.on(this._scrollElement,"scroll.bs.scrollspy",(()=>this._process())),this.refresh(),this._process()}static get Default(){return bn}static get NAME(){return _n}refresh(){const t=this._scrollElement===this._scrollElement.window?"offset":En,e="auto"===this._config.method?t:this._config.method,n=e===En?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),V.find(wn,this._config.target).map((t=>{const s=i(t),o=s?V.findOne(s):null;if(o){const t=o.getBoundingClientRect();if(t.width||t.height)return[U[e](o).top+n,s]}return null})).filter((t=>t)).sort(((t,e)=>t[0]-e[0])).forEach((t=>{this._offsets.push(t[0]),this._targets.push(t[1])}))}dispose(){j.off(this._scrollElement,".bs.scrollspy"),super.dispose()}_getConfig(t){return(t={...bn,...U.getDataAttributes(this._element),..."object"==typeof t&&t?t:{}}).target=r(t.target)||document.documentElement,a(_n,t,vn),t}_getScrollTop(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop}_getScrollHeight(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)}_getOffsetHeight(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height}_process(){const t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),i=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=i){const t=this._targets[this._targets.length-1];this._activeTarget!==t&&this._activate(t)}else{if(this._activeTarget&&t<this._offsets[0]&&this._offsets[0]>0)return this._activeTarget=null,void this._clear();for(let e=this._offsets.length;e--;)this._activeTarget!==this._targets[e]&&t>=this._offsets[e]&&(void 0===this._offsets[e+1]||t<this._offsets[e+1])&&this._activate(this._targets[e])}}_activate(t){this._activeTarget=t,this._clear();const e=wn.split(",").map((e=>`${e}[data-bs-target="${t}"],${e}[href="${t}"]`)),i=V.findOne(e.join(","),this._config.target);i.classList.add(yn),i.classList.contains("dropdown-item")?V.findOne(".dropdown-toggle",i.closest(".dropdown")).classList.add(yn):V.parents(i,".nav, .list-group").forEach((t=>{V.prev(t,".nav-link, .list-group-item").forEach((t=>t.classList.add(yn))),V.prev(t,".nav-item").forEach((t=>{V.children(t,".nav-link").forEach((t=>t.classList.add(yn)))}))})),j.trigger(this._scrollElement,"activate.bs.scrollspy",{relatedTarget:t})}_clear(){V.find(wn,this._config.target).filter((t=>t.classList.contains(yn))).forEach((t=>t.classList.remove(yn)))}static jQueryInterface(t){return this.each((function(){const e=An.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}j.on(window,"load.bs.scrollspy.data-api",(()=>{V.find('[data-bs-spy="scroll"]').forEach((t=>new An(t)))})),g(An);const Tn="active",On="fade",Cn="show",kn=".active",Ln=":scope > li > .active";class xn extends B{static get NAME(){return"tab"}show(){if(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&this._element.classList.contains(Tn))return;let t;const e=n(this._element),i=this._element.closest(".nav, .list-group");if(i){const e="UL"===i.nodeName||"OL"===i.nodeName?Ln:kn;t=V.find(e,i),t=t[t.length-1]}const s=t?j.trigger(t,"hide.bs.tab",{relatedTarget:this._element}):null;if(j.trigger(this._element,"show.bs.tab",{relatedTarget:t}).defaultPrevented||null!==s&&s.defaultPrevented)return;this._activate(this._element,i);const o=()=>{j.trigger(t,"hidden.bs.tab",{relatedTarget:this._element}),j.trigger(this._element,"shown.bs.tab",{relatedTarget:t})};e?this._activate(e,e.parentNode,o):o()}_activate(t,e,i){const n=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?V.children(e,kn):V.find(Ln,e))[0],s=i&&n&&n.classList.contains(On),o=()=>this._transitionComplete(t,n,i);n&&s?(n.classList.remove(Cn),this._queueCallback(o,t,!0)):o()}_transitionComplete(t,e,i){if(e){e.classList.remove(Tn);const t=V.findOne(":scope > .dropdown-menu .active",e.parentNode);t&&t.classList.remove(Tn),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}t.classList.add(Tn),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),u(t),t.classList.contains(On)&&t.classList.add(Cn);let n=t.parentNode;if(n&&"LI"===n.nodeName&&(n=n.parentNode),n&&n.classList.contains("dropdown-menu")){const e=t.closest(".dropdown");e&&V.find(".dropdown-toggle",e).forEach((t=>t.classList.add(Tn))),t.setAttribute("aria-expanded",!0)}i&&i()}static jQueryInterface(t){return this.each((function(){const e=xn.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}j.on(document,"click.bs.tab.data-api",'[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),c(this)||xn.getOrCreateInstance(this).show()})),g(xn);const Dn="toast",Sn="hide",Nn="show",In="showing",Pn={animation:"boolean",autohide:"boolean",delay:"number"},jn={animation:!0,autohide:!0,delay:5e3};class Mn extends B{constructor(t,e){super(t),this._config=this._getConfig(e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get DefaultType(){return Pn}static get Default(){return jn}static get NAME(){return Dn}show(){j.trigger(this._element,"show.bs.toast").defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(Sn),u(this._element),this._element.classList.add(Nn),this._element.classList.add(In),this._queueCallback((()=>{this._element.classList.remove(In),j.trigger(this._element,"shown.bs.toast"),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this._element.classList.contains(Nn)&&(j.trigger(this._element,"hide.bs.toast").defaultPrevented||(this._element.classList.add(In),this._queueCallback((()=>{this._element.classList.add(Sn),this._element.classList.remove(In),this._element.classList.remove(Nn),j.trigger(this._element,"hidden.bs.toast")}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this._element.classList.contains(Nn)&&this._element.classList.remove(Nn),super.dispose()}_getConfig(t){return t={...jn,...U.getDataAttributes(this._element),..."object"==typeof t&&t?t:{}},a(Dn,t,this.constructor.DefaultType),t}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){j.on(this._element,"mouseover.bs.toast",(t=>this._onInteraction(t,!0))),j.on(this._element,"mouseout.bs.toast",(t=>this._onInteraction(t,!1))),j.on(this._element,"focusin.bs.toast",(t=>this._onInteraction(t,!0))),j.on(this._element,"focusout.bs.toast",(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=Mn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}return R(Mn),g(Mn),{Alert:W,Button:z,Carousel:st,Collapse:pt,Dropdown:hi,Modal:Hi,Offcanvas:Fi,Popover:gn,ScrollSpy:An,Tab:xn,Toast:Mn,Tooltip:un}}));
+//# sourceMappingURL=bootstrap.bundle.min.js.map \ No newline at end of file
diff --git a/js/face-landmarks-detection.js b/js/face-landmarks-detection.js
new file mode 100644
index 0000000..cb2f93d
--- /dev/null
+++ b/js/face-landmarks-detection.js
@@ -0,0 +1,1497 @@
+/**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@tensorflow/tfjs-core'), require('@tensorflow/tfjs-converter')) :
+ typeof define === 'function' && define.amd ? define(['exports', '@tensorflow/tfjs-core', '@tensorflow/tfjs-converter'], factory) :
+ (global = global || self, factory(global.faceLandmarksDetection = {}, global.tf, global.tf));
+}(this, (function (exports, tf, tfconv) { 'use strict';
+
+ /*! *****************************************************************************
+ Copyright (c) Microsoft Corporation.
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+ ***************************************************************************** */
+
+ var __assign = function() {
+ __assign = Object.assign || function __assign(t) {
+ for (var s, i = 1, n = arguments.length; i < n; i++) {
+ s = arguments[i];
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
+ }
+ return t;
+ };
+ return __assign.apply(this, arguments);
+ };
+
+ function __awaiter(thisArg, _arguments, P, generator) {
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
+ return new (P || (P = Promise))(function (resolve, reject) {
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
+ });
+ }
+
+ function __generator(thisArg, body) {
+ var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
+ return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
+ function verb(n) { return function (v) { return step([n, v]); }; }
+ function step(op) {
+ if (f) throw new TypeError("Generator is already executing.");
+ while (_) try {
+ if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
+ if (y = 0, t) op = [op[0] & 2, t.value];
+ switch (op[0]) {
+ case 0: case 1: t = op; break;
+ case 4: _.label++; return { value: op[1], done: false };
+ case 5: _.label++; y = op[1]; op = [0]; continue;
+ case 7: op = _.ops.pop(); _.trys.pop(); continue;
+ default:
+ if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
+ if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
+ if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
+ if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
+ if (t[2]) _.ops.pop();
+ _.trys.pop(); continue;
+ }
+ op = body.call(thisArg, _);
+ } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
+ if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function __awaiter$1(e,t,n,r){return new(n||(n=Promise))(function(o,i){function a(e){try{c(r.next(e));}catch(e){i(e);}}function s(e){try{c(r.throw(e));}catch(e){i(e);}}function c(e){var t;e.done?o(e.value):(t=e.value,t instanceof n?t:new n(function(e){e(t);})).then(a,s);}c((r=r.apply(e,t||[])).next());})}function __generator$1(e,t){var n,r,o,i,a={label:0,sent:function(){if(1&o[0])throw o[1];return o[1]},trys:[],ops:[]};return i={next:s(0),throw:s(1),return:s(2)},"function"==typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function s(i){return function(s){return function(i){if(n)throw new TypeError("Generator is already executing.");for(;a;)try{if(n=1,r&&(o=2&i[0]?r.return:i[0]?r.throw||((o=r.return)&&o.call(r),0):r.next)&&!(o=o.call(r,i[1])).done)return o;switch(r=0,o&&(i=[2&i[0],o.value]),i[0]){case 0:case 1:o=i;break;case 4:return a.label++,{value:i[1],done:!1};case 5:a.label++,r=i[1],i=[0];continue;case 7:i=a.ops.pop(),a.trys.pop();continue;default:if(!(o=(o=a.trys).length>0&&o[o.length-1])&&(6===i[0]||2===i[0])){a=0;continue}if(3===i[0]&&(!o||i[1]>o[0]&&i[1]<o[3])){a.label=i[1];break}if(6===i[0]&&a.label<o[1]){a.label=o[1],o=i;break}if(o&&a.label<o[2]){a.label=o[2],a.ops.push(i);break}o[2]&&a.ops.pop(),a.trys.pop();continue}i=t.call(e,a);}catch(e){i=[6,e],r=0;}finally{n=o=0;}if(5&i[0])throw i[1];return {value:i[0]?i[1]:void 0,done:!0}}([i,s])}}}var disposeBox=function(e){e.startEndTensor.dispose(),e.startPoint.dispose(),e.endPoint.dispose();},createBox=function(e){return {startEndTensor:e,startPoint:tf.slice(e,[0,0],[-1,2]),endPoint:tf.slice(e,[0,2],[-1,2])}},scaleBox=function(e,t){var n=tf.mul(e.startPoint,t),r=tf.mul(e.endPoint,t),o=tf.concat2d([n,r],1);return createBox(o)},ANCHORS_CONFIG={strides:[8,16],anchors:[2,6]},NUM_LANDMARKS=6;function generateAnchors(e,t,n){for(var r=[],o=0;o<n.strides.length;o++)for(var i=n.strides[o],a=Math.floor((t+i-1)/i),s=Math.floor((e+i-1)/i),c=n.anchors[o],l=0;l<a;l++)for(var u=i*(l+.5),d=0;d<s;d++)for(var h=i*(d+.5),f=0;f<c;f++)r.push([h,u]);return r}function decodeBounds(e,t,n){var r=tf.slice(e,[0,1],[-1,2]),o=tf.add(r,t),i=tf.slice(e,[0,3],[-1,2]),a=tf.div(i,n),s=tf.div(o,n),c=tf.div(a,2),l=tf.sub(s,c),u=tf.add(s,c),d=tf.mul(l,n),h=tf.mul(u,n);return tf.concat2d([d,h],1)}function getInputTensorDimensions(e){return e instanceof tf.Tensor?[e.shape[0],e.shape[1]]:[e.height,e.width]}function flipFaceHorizontal(e,t){var n,r,o;if(e.topLeft instanceof tf.Tensor&&e.bottomRight instanceof tf.Tensor){var i=tf.tidy(function(){return [tf.concat([tf.slice(tf.sub(t-1,e.topLeft),0,1),tf.slice(e.topLeft,1,1)]),tf.concat([tf.sub(t-1,tf.slice(e.bottomRight,0,1)),tf.slice(e.bottomRight,1,1)])]});n=i[0],r=i[1],null!=e.landmarks&&(o=tf.tidy(function(){var n=tf.sub(tf.tensor1d([t-1,0]),e.landmarks),r=tf.tensor1d([1,-1]);return tf.mul(n,r)}));}else {var a=e.topLeft,s=a[0],c=a[1],l=e.bottomRight,u=l[0],d=l[1];n=[t-1-s,c],r=[t-1-u,d],null!=e.landmarks&&(o=e.landmarks.map(function(e){return [t-1-e[0],e[1]]}));}var h={topLeft:n,bottomRight:r};return null!=o&&(h.landmarks=o),null!=e.probability&&(h.probability=e.probability instanceof tf.Tensor?e.probability.clone():e.probability),h}function scaleBoxFromPrediction(e,t){return tf.tidy(function(){var n;return n=e.hasOwnProperty("box")?e.box:e,tf.squeeze(scaleBox(n,t).startEndTensor)})}var BlazeFaceModel=function(){function e(e,t,n,r,o,i){this.blazeFaceModel=e,this.width=t,this.height=n,this.maxFaces=r,this.anchorsData=generateAnchors(t,n,ANCHORS_CONFIG),this.anchors=tf.tensor2d(this.anchorsData),this.inputSizeData=[t,n],this.inputSize=tf.tensor1d([t,n]),this.iouThreshold=o,this.scoreThreshold=i;}return e.prototype.getBoundingBoxes=function(e,t,n){return void 0===n&&(n=!0),__awaiter$1(this,void 0,void 0,function(){var r,o,i,a,s,c,l,u,d,h,f,p,b,m,v=this;return __generator$1(this,function(y){switch(y.label){case 0:return r=tf.tidy(function(){var t=tf.image.resizeBilinear(e,[v.width,v.height]),n=tf.mul(tf.sub(tf.div(t,255),.5),2),r=v.blazeFaceModel.predict(n),o=tf.squeeze(r),i=decodeBounds(o,v.anchors,v.inputSize),a=tf.slice(o,[0,0],[-1,1]);return [o,i,tf.squeeze(tf.sigmoid(a))]}),o=r[0],i=r[1],a=r[2],s=console.warn,console.warn=function(){},c=tf.image.nonMaxSuppression(i,a,this.maxFaces,this.iouThreshold,this.scoreThreshold),console.warn=s,[4,c.array()];case 1:return l=y.sent(),c.dispose(),u=l.map(function(e){return tf.slice(i,[e,0],[1,-1])}),t?[3,3]:[4,Promise.all(u.map(function(e){return __awaiter$1(v,void 0,void 0,function(){var t;return __generator$1(this,function(n){switch(n.label){case 0:return [4,e.array()];case 1:return t=n.sent(),e.dispose(),[2,t]}})})}))];case 2:u=y.sent(),y.label=3;case 3:for(d=e.shape[1],h=e.shape[2],f=t?tf.div([h,d],this.inputSize):[h/this.inputSizeData[0],d/this.inputSizeData[1]],p=[],b=function(e){var r=u[e],i=tf.tidy(function(){var i=createBox(r instanceof tf.Tensor?r:tf.tensor2d(r));if(!n)return i;var s,c=l[e];return s=t?tf.slice(v.anchors,[c,0],[1,2]):v.anchorsData[c],{box:i,landmarks:tf.reshape(tf.squeeze(tf.slice(o,[c,NUM_LANDMARKS-1],[1,-1])),[NUM_LANDMARKS,-1]),probability:tf.slice(a,[c],[1]),anchor:s}});p.push(i);},m=0;m<u.length;m++)b(m);return i.dispose(),a.dispose(),o.dispose(),[2,{boxes:p,scaleFactor:f}]}})})},e.prototype.estimateFaces=function(e,t,n,r){return void 0===t&&(t=!1),void 0===n&&(n=!1),void 0===r&&(r=!0),__awaiter$1(this,void 0,void 0,function(){var o,i,a,s,c,l,u=this;return __generator$1(this,function(d){switch(d.label){case 0:return o=getInputTensorDimensions(e),i=o[1],a=tf.tidy(function(){return e instanceof tf.Tensor||(e=tf.browser.fromPixels(e)),tf.expandDims(tf.cast(e,"float32"),0)}),[4,this.getBoundingBoxes(a,t,r)];case 1:return s=d.sent(),c=s.boxes,l=s.scaleFactor,a.dispose(),t?[2,c.map(function(e){var t=scaleBoxFromPrediction(e,l),o={topLeft:tf.slice(t,[0],[2]),bottomRight:tf.slice(t,[2],[2])};if(r){var a=e,s=a.landmarks,c=a.probability,u=a.anchor,d=tf.mul(tf.add(s,u),l);o.landmarks=d,o.probability=c;}return n&&(o=flipFaceHorizontal(o,i)),o})]:[2,Promise.all(c.map(function(e){return __awaiter$1(u,void 0,void 0,function(){var t,o,a,s,c,u,d,h,f,p,b,m=this;return __generator$1(this,function(v){switch(v.label){case 0:return t=scaleBoxFromPrediction(e,l),r?[3,2]:[4,t.array()];case 1:return c=v.sent(),o={topLeft:c.slice(0,2),bottomRight:c.slice(2)},[3,4];case 2:return [4,Promise.all([e.landmarks,t,e.probability].map(function(e){return __awaiter$1(m,void 0,void 0,function(){return __generator$1(this,function(t){return [2,e.array()]})})}))];case 3:a=v.sent(),s=a[0],c=a[1],u=a[2],d=e.anchor,f=(h=l)[0],p=h[1],b=s.map(function(e){return [(e[0]+d[0])*f,(e[1]+d[1])*p]}),o={topLeft:c.slice(0,2),bottomRight:c.slice(2),landmarks:b,probability:u},disposeBox(e.box),e.landmarks.dispose(),e.probability.dispose(),v.label=4;case 4:return t.dispose(),n&&(o=flipFaceHorizontal(o,i)),[2,o]}})})}))]}})})},e}(),BLAZEFACE_MODEL_URL="https://tfhub.dev/tensorflow/tfjs-model/blazeface/1/default/1";function load(e){var t=void 0===e?{}:e,n=t.maxFaces,r=void 0===n?10:n,o=t.inputWidth,i=void 0===o?128:o,a=t.inputHeight,s=void 0===a?128:a,c=t.iouThreshold,l=void 0===c?.3:c,u=t.scoreThreshold,d=void 0===u?.75:u,h=t.modelUrl;return __awaiter$1(this,void 0,void 0,function(){var e;return __generator$1(this,function(t){switch(t.label){case 0:return null==h?[3,2]:[4,tfconv.loadGraphModel(h)];case 1:return e=t.sent(),[3,4];case 2:return [4,tfconv.loadGraphModel(BLAZEFACE_MODEL_URL,{fromTFHub:!0})];case 3:e=t.sent(),t.label=4;case 4:return [2,new BlazeFaceModel(e,i,s,r,l,d)]}})})}
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var MESH_ANNOTATIONS = {
+ silhouette: [
+ 10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288,
+ 397, 365, 379, 378, 400, 377, 152, 148, 176, 149, 150, 136,
+ 172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109
+ ],
+ lipsUpperOuter: [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291],
+ lipsLowerOuter: [146, 91, 181, 84, 17, 314, 405, 321, 375, 291],
+ lipsUpperInner: [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308],
+ lipsLowerInner: [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308],
+ rightEyeUpper0: [246, 161, 160, 159, 158, 157, 173],
+ rightEyeLower0: [33, 7, 163, 144, 145, 153, 154, 155, 133],
+ rightEyeUpper1: [247, 30, 29, 27, 28, 56, 190],
+ rightEyeLower1: [130, 25, 110, 24, 23, 22, 26, 112, 243],
+ rightEyeUpper2: [113, 225, 224, 223, 222, 221, 189],
+ rightEyeLower2: [226, 31, 228, 229, 230, 231, 232, 233, 244],
+ rightEyeLower3: [143, 111, 117, 118, 119, 120, 121, 128, 245],
+ rightEyebrowUpper: [156, 70, 63, 105, 66, 107, 55, 193],
+ rightEyebrowLower: [35, 124, 46, 53, 52, 65],
+ rightEyeIris: [473, 474, 475, 476, 477],
+ leftEyeUpper0: [466, 388, 387, 386, 385, 384, 398],
+ leftEyeLower0: [263, 249, 390, 373, 374, 380, 381, 382, 362],
+ leftEyeUpper1: [467, 260, 259, 257, 258, 286, 414],
+ leftEyeLower1: [359, 255, 339, 254, 253, 252, 256, 341, 463],
+ leftEyeUpper2: [342, 445, 444, 443, 442, 441, 413],
+ leftEyeLower2: [446, 261, 448, 449, 450, 451, 452, 453, 464],
+ leftEyeLower3: [372, 340, 346, 347, 348, 349, 350, 357, 465],
+ leftEyebrowUpper: [383, 300, 293, 334, 296, 336, 285, 417],
+ leftEyebrowLower: [265, 353, 276, 283, 282, 295],
+ leftEyeIris: [468, 469, 470, 471, 472],
+ midwayBetweenEyes: [168],
+ noseTip: [1],
+ noseBottom: [2],
+ noseRightCorner: [98],
+ noseLeftCorner: [327],
+ rightCheek: [205],
+ leftCheek: [425]
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function scaleBoxCoordinates(box, factor) {
+ var startPoint = [box.startPoint[0] * factor[0], box.startPoint[1] * factor[1]];
+ var endPoint = [box.endPoint[0] * factor[0], box.endPoint[1] * factor[1]];
+ return { startPoint: startPoint, endPoint: endPoint };
+ }
+ function getBoxSize(box) {
+ return [
+ Math.abs(box.endPoint[0] - box.startPoint[0]),
+ Math.abs(box.endPoint[1] - box.startPoint[1])
+ ];
+ }
+ function getBoxCenter(box) {
+ return [
+ box.startPoint[0] + (box.endPoint[0] - box.startPoint[0]) / 2,
+ box.startPoint[1] + (box.endPoint[1] - box.startPoint[1]) / 2
+ ];
+ }
+ function cutBoxFromImageAndResize(box, image, cropSize) {
+ var h = image.shape[1];
+ var w = image.shape[2];
+ var boxes = [[
+ box.startPoint[1] / h, box.startPoint[0] / w, box.endPoint[1] / h,
+ box.endPoint[0] / w
+ ]];
+ return tf.image.cropAndResize(image, boxes, [0], cropSize, 'bilinear' /* method */, 0 /* extrapolation value */);
+ }
+ /**
+ * Enlarges the box by the provided factor.
+ * @param box An object with startPoint and endPoint properties describing the
+ * outlines of the box to be enlarged.
+ * @param factor optional The enlargement factor. Defaults to 1.5
+ */
+ function enlargeBox(box, factor) {
+ if (factor === void 0) { factor = 1.5; }
+ var center = getBoxCenter(box);
+ var size = getBoxSize(box);
+ var newHalfSize = [factor * size[0] / 2, factor * size[1] / 2];
+ var startPoint = [center[0] - newHalfSize[0], center[1] - newHalfSize[1]];
+ var endPoint = [center[0] + newHalfSize[0], center[1] + newHalfSize[1]];
+ return { startPoint: startPoint, endPoint: endPoint, landmarks: box.landmarks };
+ }
+ /**
+ * Squarifies the provided box by setting its length and height equal to
+ * max(length, height) while preserving its center point.
+ * @param box An object with startPoint and endPoint properties describing the
+ * outlines of the box to be squarified.
+ */
+ function squarifyBox(box) {
+ var centers = getBoxCenter(box);
+ var size = getBoxSize(box);
+ var maxEdge = Math.max.apply(Math, size);
+ var halfSize = maxEdge / 2;
+ var startPoint = [centers[0] - halfSize, centers[1] - halfSize];
+ var endPoint = [centers[0] + halfSize, centers[1] + halfSize];
+ return { startPoint: startPoint, endPoint: endPoint, landmarks: box.landmarks };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var IDENTITY_MATRIX = [[1, 0, 0], [0, 1, 0], [0, 0, 1]];
+ /**
+ * Normalizes the provided angle to the range -pi to pi.
+ * @param angle The angle in radians to be normalized.
+ */
+ function normalizeRadians(angle) {
+ return angle - 2 * Math.PI * Math.floor((angle + Math.PI) / (2 * Math.PI));
+ }
+ /**
+ * Computes the angle of rotation between two anchor points.
+ * @param point1 First anchor point
+ * @param point2 Second anchor point
+ */
+ function computeRotation(point1, point2) {
+ var radians = Math.PI / 2 - Math.atan2(-(point2[1] - point1[1]), point2[0] - point1[0]);
+ return normalizeRadians(radians);
+ }
+ function buildTranslationMatrix(x, y) {
+ return [[1, 0, x], [0, 1, y], [0, 0, 1]];
+ }
+ function dot(v1, v2) {
+ var product = 0;
+ for (var i = 0; i < v1.length; i++) {
+ product += v1[i] * v2[i];
+ }
+ return product;
+ }
+ function getColumnFrom2DArr(arr, columnIndex) {
+ var column = [];
+ for (var i = 0; i < arr.length; i++) {
+ column.push(arr[i][columnIndex]);
+ }
+ return column;
+ }
+ function multiplyTransformMatrices(mat1, mat2) {
+ var product = [];
+ var size = mat1.length;
+ for (var row = 0; row < size; row++) {
+ product.push([]);
+ for (var col = 0; col < size; col++) {
+ product[row].push(dot(mat1[row], getColumnFrom2DArr(mat2, col)));
+ }
+ }
+ return product;
+ }
+ function buildRotationMatrix(rotation, center) {
+ var cosA = Math.cos(rotation);
+ var sinA = Math.sin(rotation);
+ var rotationMatrix = [[cosA, -sinA, 0], [sinA, cosA, 0], [0, 0, 1]];
+ var translationMatrix = buildTranslationMatrix(center[0], center[1]);
+ var translationTimesRotation = multiplyTransformMatrices(translationMatrix, rotationMatrix);
+ var negativeTranslationMatrix = buildTranslationMatrix(-center[0], -center[1]);
+ return multiplyTransformMatrices(translationTimesRotation, negativeTranslationMatrix);
+ }
+ function invertTransformMatrix(matrix) {
+ var rotationComponent = [[matrix[0][0], matrix[1][0]], [matrix[0][1], matrix[1][1]]];
+ var translationComponent = [matrix[0][2], matrix[1][2]];
+ var invertedTranslation = [
+ -dot(rotationComponent[0], translationComponent),
+ -dot(rotationComponent[1], translationComponent)
+ ];
+ return [
+ rotationComponent[0].concat(invertedTranslation[0]),
+ rotationComponent[1].concat(invertedTranslation[1]), [0, 0, 1]
+ ];
+ }
+ function rotatePoint(homogeneousCoordinate, rotationMatrix) {
+ return [
+ dot(homogeneousCoordinate, rotationMatrix[0]),
+ dot(homogeneousCoordinate, rotationMatrix[1])
+ ];
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var LANDMARKS_COUNT = 468;
+ var UPDATE_REGION_OF_INTEREST_IOU_THRESHOLD = 0.25;
+ var MESH_MOUTH_INDEX = 13;
+ var MESH_KEYPOINTS_LINE_OF_SYMMETRY_INDICES = [MESH_MOUTH_INDEX, MESH_ANNOTATIONS['midwayBetweenEyes'][0]];
+ var BLAZEFACE_MOUTH_INDEX = 3;
+ var BLAZEFACE_NOSE_INDEX = 2;
+ var BLAZEFACE_KEYPOINTS_LINE_OF_SYMMETRY_INDICES = [BLAZEFACE_MOUTH_INDEX, BLAZEFACE_NOSE_INDEX];
+ var LEFT_EYE_OUTLINE = MESH_ANNOTATIONS['leftEyeLower0'];
+ var LEFT_EYE_BOUNDS = [LEFT_EYE_OUTLINE[0], LEFT_EYE_OUTLINE[LEFT_EYE_OUTLINE.length - 1]];
+ var RIGHT_EYE_OUTLINE = MESH_ANNOTATIONS['rightEyeLower0'];
+ var RIGHT_EYE_BOUNDS = [RIGHT_EYE_OUTLINE[0], RIGHT_EYE_OUTLINE[RIGHT_EYE_OUTLINE.length - 1]];
+ var IRIS_UPPER_CENTER_INDEX = 3;
+ var IRIS_LOWER_CENTER_INDEX = 4;
+ var IRIS_IRIS_INDEX = 71;
+ var IRIS_NUM_COORDINATES = 76;
+ // Factor by which to enlarge the box around the eye landmarks so the input
+ // region matches the expectations of the iris model.
+ var ENLARGE_EYE_RATIO = 2.3;
+ var IRIS_MODEL_INPUT_SIZE = 64;
+ // A mapping from facemesh model keypoints to iris model keypoints.
+ var MESH_TO_IRIS_INDICES_MAP = [
+ { key: 'EyeUpper0', indices: [9, 10, 11, 12, 13, 14, 15] },
+ { key: 'EyeUpper1', indices: [25, 26, 27, 28, 29, 30, 31] },
+ { key: 'EyeUpper2', indices: [41, 42, 43, 44, 45, 46, 47] },
+ { key: 'EyeLower0', indices: [0, 1, 2, 3, 4, 5, 6, 7, 8] },
+ { key: 'EyeLower1', indices: [16, 17, 18, 19, 20, 21, 22, 23, 24] },
+ { key: 'EyeLower2', indices: [32, 33, 34, 35, 36, 37, 38, 39, 40] },
+ { key: 'EyeLower3', indices: [54, 55, 56, 57, 58, 59, 60, 61, 62] },
+ { key: 'EyebrowUpper', indices: [63, 64, 65, 66, 67, 68, 69, 70] },
+ { key: 'EyebrowLower', indices: [48, 49, 50, 51, 52, 53] }
+ ];
+ // Replace the raw coordinates returned by facemesh with refined iris model
+ // coordinates.
+ // Update the z coordinate to be an average of the original and the new. This
+ // produces the best visual effect.
+ function replaceRawCoordinates(rawCoords, newCoords, prefix, keys) {
+ for (var i = 0; i < MESH_TO_IRIS_INDICES_MAP.length; i++) {
+ var _a = MESH_TO_IRIS_INDICES_MAP[i], key = _a.key, indices = _a.indices;
+ var originalIndices = MESH_ANNOTATIONS["" + prefix + key];
+ var shouldReplaceAllKeys = keys == null;
+ if (shouldReplaceAllKeys || keys.includes(key)) {
+ for (var j = 0; j < indices.length; j++) {
+ var index = indices[j];
+ rawCoords[originalIndices[j]] = [
+ newCoords[index][0], newCoords[index][1],
+ (newCoords[index][2] + rawCoords[originalIndices[j]][2]) / 2
+ ];
+ }
+ }
+ }
+ }
+ // The Pipeline coordinates between the bounding box and skeleton models.
+ var Pipeline = /** @class */ (function () {
+ function Pipeline(boundingBoxDetector, meshDetector, meshWidth, meshHeight, maxContinuousChecks, maxFaces, irisModel) {
+ // An array of facial bounding boxes.
+ this.regionsOfInterest = [];
+ this.runsWithoutFaceDetector = 0;
+ this.boundingBoxDetector = boundingBoxDetector;
+ this.meshDetector = meshDetector;
+ this.irisModel = irisModel;
+ this.meshWidth = meshWidth;
+ this.meshHeight = meshHeight;
+ this.maxContinuousChecks = maxContinuousChecks;
+ this.maxFaces = maxFaces;
+ }
+ Pipeline.prototype.transformRawCoords = function (rawCoords, box, angle, rotationMatrix) {
+ var _this = this;
+ var boxSize = getBoxSize({ startPoint: box.startPoint, endPoint: box.endPoint });
+ var scaleFactor = [boxSize[0] / this.meshWidth, boxSize[1] / this.meshHeight];
+ var coordsScaled = rawCoords.map(function (coord) { return ([
+ scaleFactor[0] * (coord[0] - _this.meshWidth / 2),
+ scaleFactor[1] * (coord[1] - _this.meshHeight / 2), coord[2]
+ ]); });
+ var coordsRotationMatrix = buildRotationMatrix(angle, [0, 0]);
+ var coordsRotated = coordsScaled.map(function (coord) {
+ return (rotatePoint(coord, coordsRotationMatrix).concat([coord[2]]));
+ });
+ var inverseRotationMatrix = invertTransformMatrix(rotationMatrix);
+ var boxCenter = getBoxCenter({ startPoint: box.startPoint, endPoint: box.endPoint }).concat([
+ 1
+ ]);
+ var originalBoxCenter = [
+ dot(boxCenter, inverseRotationMatrix[0]),
+ dot(boxCenter, inverseRotationMatrix[1])
+ ];
+ return coordsRotated.map(function (coord) { return ([
+ coord[0] + originalBoxCenter[0],
+ coord[1] + originalBoxCenter[1], coord[2]
+ ]); });
+ };
+ Pipeline.prototype.getLeftToRightEyeDepthDifference = function (rawCoords) {
+ var leftEyeZ = rawCoords[LEFT_EYE_BOUNDS[0]][2];
+ var rightEyeZ = rawCoords[RIGHT_EYE_BOUNDS[0]][2];
+ return leftEyeZ - rightEyeZ;
+ };
+ // Returns a box describing a cropped region around the eye fit for passing to
+ // the iris model.
+ Pipeline.prototype.getEyeBox = function (rawCoords, face, eyeInnerCornerIndex, eyeOuterCornerIndex, flip) {
+ if (flip === void 0) { flip = false; }
+ var box = squarifyBox(enlargeBox(this.calculateLandmarksBoundingBox([rawCoords[eyeInnerCornerIndex], rawCoords[eyeOuterCornerIndex]]), ENLARGE_EYE_RATIO));
+ var boxSize = getBoxSize(box);
+ var crop = tf.image.cropAndResize(face, [[
+ box.startPoint[1] / this.meshHeight,
+ box.startPoint[0] / this.meshWidth, box.endPoint[1] / this.meshHeight,
+ box.endPoint[0] / this.meshWidth
+ ]], [0], [IRIS_MODEL_INPUT_SIZE, IRIS_MODEL_INPUT_SIZE]);
+ if (flip) {
+ crop = tf.image.flipLeftRight(crop);
+ }
+ return { box: box, boxSize: boxSize, crop: crop };
+ };
+ // Given a cropped image of an eye, returns the coordinates of the contours
+ // surrounding the eye and the iris.
+ Pipeline.prototype.getEyeCoords = function (eyeData, eyeBox, eyeBoxSize, flip) {
+ if (flip === void 0) { flip = false; }
+ var eyeRawCoords = [];
+ for (var i = 0; i < IRIS_NUM_COORDINATES; i++) {
+ var x = eyeData[i * 3];
+ var y = eyeData[i * 3 + 1];
+ var z = eyeData[i * 3 + 2];
+ eyeRawCoords.push([
+ (flip ? (1 - (x / IRIS_MODEL_INPUT_SIZE)) :
+ (x / IRIS_MODEL_INPUT_SIZE)) *
+ eyeBoxSize[0] +
+ eyeBox.startPoint[0],
+ (y / IRIS_MODEL_INPUT_SIZE) * eyeBoxSize[1] + eyeBox.startPoint[1], z
+ ]);
+ }
+ return { rawCoords: eyeRawCoords, iris: eyeRawCoords.slice(IRIS_IRIS_INDEX) };
+ };
+ // The z-coordinates returned for the iris are unreliable, so we take the z
+ // values from the surrounding keypoints.
+ Pipeline.prototype.getAdjustedIrisCoords = function (rawCoords, irisCoords, direction) {
+ var upperCenterZ = rawCoords[MESH_ANNOTATIONS[direction + "EyeUpper0"][IRIS_UPPER_CENTER_INDEX]][2];
+ var lowerCenterZ = rawCoords[MESH_ANNOTATIONS[direction + "EyeLower0"][IRIS_LOWER_CENTER_INDEX]][2];
+ var averageZ = (upperCenterZ + lowerCenterZ) / 2;
+ // Iris indices:
+ // 0: center | 1: right | 2: above | 3: left | 4: below
+ return irisCoords.map(function (coord, i) {
+ var z = averageZ;
+ if (i === 2) {
+ z = upperCenterZ;
+ }
+ else if (i === 4) {
+ z = lowerCenterZ;
+ }
+ return [coord[0], coord[1], z];
+ });
+ };
+ /**
+ * Returns an array of predictions for each face in the input.
+ * @param input - tensor of shape [1, H, W, 3].
+ * @param predictIrises - Whether to return keypoints for the irises.
+ */
+ Pipeline.prototype.predict = function (input, predictIrises) {
+ return __awaiter(this, void 0, void 0, function () {
+ var returnTensors, annotateFace, _a, boxes, scaleFactor_1, scaledBoxes;
+ var _this = this;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ if (!this.shouldUpdateRegionsOfInterest()) return [3 /*break*/, 2];
+ returnTensors = false;
+ annotateFace = true;
+ return [4 /*yield*/, this.boundingBoxDetector.getBoundingBoxes(input, returnTensors, annotateFace)];
+ case 1:
+ _a = _b.sent(), boxes = _a.boxes, scaleFactor_1 = _a.scaleFactor;
+ if (boxes.length === 0) {
+ this.regionsOfInterest = [];
+ return [2 /*return*/, null];
+ }
+ scaledBoxes = boxes.map(function (prediction) {
+ var predictionBoxCPU = {
+ startPoint: tf.squeeze(prediction.box.startPoint).arraySync(),
+ endPoint: tf.squeeze(prediction.box.endPoint).arraySync()
+ };
+ var scaledBox = scaleBoxCoordinates(predictionBoxCPU, scaleFactor_1);
+ var enlargedBox = enlargeBox(scaledBox);
+ var squarifiedBox = squarifyBox(enlargedBox);
+ return __assign({}, squarifiedBox, { landmarks: prediction.landmarks.arraySync() });
+ });
+ boxes.forEach(function (box) {
+ if (box != null && box.startPoint != null) {
+ box.startEndTensor.dispose();
+ box.startPoint.dispose();
+ box.endPoint.dispose();
+ }
+ });
+ this.updateRegionsOfInterest(scaledBoxes);
+ this.runsWithoutFaceDetector = 0;
+ return [3 /*break*/, 3];
+ case 2:
+ this.runsWithoutFaceDetector++;
+ _b.label = 3;
+ case 3: return [2 /*return*/, tf.tidy(function () {
+ return _this.regionsOfInterest.map(function (box, i) {
+ var angle = 0;
+ // The facial bounding box landmarks could come either from blazeface
+ // (if we are using a fresh box), or from the mesh model (if we are
+ // reusing an old box).
+ var boxLandmarksFromMeshModel = box.landmarks.length >= LANDMARKS_COUNT;
+ var indexOfMouth = MESH_KEYPOINTS_LINE_OF_SYMMETRY_INDICES[0], indexOfForehead = MESH_KEYPOINTS_LINE_OF_SYMMETRY_INDICES[1];
+ if (boxLandmarksFromMeshModel === false) {
+ indexOfMouth = BLAZEFACE_KEYPOINTS_LINE_OF_SYMMETRY_INDICES[0], indexOfForehead = BLAZEFACE_KEYPOINTS_LINE_OF_SYMMETRY_INDICES[1];
+ }
+ angle = computeRotation(box.landmarks[indexOfMouth], box.landmarks[indexOfForehead]);
+ var faceCenter = getBoxCenter({ startPoint: box.startPoint, endPoint: box.endPoint });
+ var faceCenterNormalized = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
+ var rotatedImage = input;
+ var rotationMatrix = IDENTITY_MATRIX;
+ if (angle !== 0) {
+ rotatedImage =
+ tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
+ rotationMatrix = buildRotationMatrix(-angle, faceCenter);
+ }
+ var boxCPU = { startPoint: box.startPoint, endPoint: box.endPoint };
+ var face = tf.div(cutBoxFromImageAndResize(boxCPU, rotatedImage, [
+ _this.meshHeight, _this.meshWidth
+ ]), 255);
+ // The first returned tensor represents facial contours, which are
+ // included in the coordinates.
+ var _a = _this.meshDetector.predict(face), flag = _a[1], coords = _a[2];
+ var coordsReshaped = tf.reshape(coords, [-1, 3]);
+ var rawCoords = coordsReshaped.arraySync();
+ if (predictIrises) {
+ var _b = _this.getEyeBox(rawCoords, face, LEFT_EYE_BOUNDS[0], LEFT_EYE_BOUNDS[1], true), leftEyeBox = _b.box, leftEyeBoxSize = _b.boxSize, leftEyeCrop = _b.crop;
+ var _c = _this.getEyeBox(rawCoords, face, RIGHT_EYE_BOUNDS[0], RIGHT_EYE_BOUNDS[1]), rightEyeBox = _c.box, rightEyeBoxSize = _c.boxSize, rightEyeCrop = _c.crop;
+ var eyePredictions = (_this.irisModel.predict(tf.concat([leftEyeCrop, rightEyeCrop])));
+ var eyePredictionsData = eyePredictions.dataSync();
+ var leftEyeData = eyePredictionsData.slice(0, IRIS_NUM_COORDINATES * 3);
+ var _d = _this.getEyeCoords(leftEyeData, leftEyeBox, leftEyeBoxSize, true), leftEyeRawCoords = _d.rawCoords, leftIrisRawCoords = _d.iris;
+ var rightEyeData = eyePredictionsData.slice(IRIS_NUM_COORDINATES * 3);
+ var _e = _this.getEyeCoords(rightEyeData, rightEyeBox, rightEyeBoxSize), rightEyeRawCoords = _e.rawCoords, rightIrisRawCoords = _e.iris;
+ var leftToRightEyeDepthDifference = _this.getLeftToRightEyeDepthDifference(rawCoords);
+ if (Math.abs(leftToRightEyeDepthDifference) <
+ 30) { // User is looking straight ahead.
+ replaceRawCoordinates(rawCoords, leftEyeRawCoords, 'left');
+ replaceRawCoordinates(rawCoords, rightEyeRawCoords, 'right');
+ }
+ else if (leftToRightEyeDepthDifference < 1) { // User is looking
+ // towards the
+ // right.
+ // If the user is looking to the left or to the right, the iris
+ // coordinates tend to diverge too much from the mesh coordinates
+ // for them to be merged. So we only update a single contour line
+ // above and below the eye.
+ replaceRawCoordinates(rawCoords, leftEyeRawCoords, 'left', ['EyeUpper0', 'EyeLower0']);
+ }
+ else { // User is looking towards the left.
+ replaceRawCoordinates(rawCoords, rightEyeRawCoords, 'right', ['EyeUpper0', 'EyeLower0']);
+ }
+ var adjustedLeftIrisCoords = _this.getAdjustedIrisCoords(rawCoords, leftIrisRawCoords, 'left');
+ var adjustedRightIrisCoords = _this.getAdjustedIrisCoords(rawCoords, rightIrisRawCoords, 'right');
+ rawCoords = rawCoords.concat(adjustedLeftIrisCoords)
+ .concat(adjustedRightIrisCoords);
+ }
+ var transformedCoordsData = _this.transformRawCoords(rawCoords, box, angle, rotationMatrix);
+ var transformedCoords = tf.tensor2d(transformedCoordsData);
+ var landmarksBox = enlargeBox(_this.calculateLandmarksBoundingBox(transformedCoordsData));
+ var squarifiedLandmarksBox = squarifyBox(landmarksBox);
+ _this.regionsOfInterest[i] = __assign({}, squarifiedLandmarksBox, { landmarks: transformedCoords.arraySync() });
+ var prediction = {
+ coords: tf.tensor2d(rawCoords, [rawCoords.length, 3]),
+ scaledCoords: transformedCoords,
+ box: landmarksBox,
+ flag: tf.squeeze(flag)
+ };
+ return prediction;
+ });
+ })];
+ }
+ });
+ });
+ };
+ // Updates regions of interest if the intersection over union between
+ // the incoming and previous regions falls below a threshold.
+ Pipeline.prototype.updateRegionsOfInterest = function (boxes) {
+ for (var i = 0; i < boxes.length; i++) {
+ var box = boxes[i];
+ var previousBox = this.regionsOfInterest[i];
+ var iou = 0;
+ if (previousBox && previousBox.startPoint) {
+ var _a = box.startPoint, boxStartX = _a[0], boxStartY = _a[1];
+ var _b = box.endPoint, boxEndX = _b[0], boxEndY = _b[1];
+ var _c = previousBox.startPoint, previousBoxStartX = _c[0], previousBoxStartY = _c[1];
+ var _d = previousBox.endPoint, previousBoxEndX = _d[0], previousBoxEndY = _d[1];
+ var xStartMax = Math.max(boxStartX, previousBoxStartX);
+ var yStartMax = Math.max(boxStartY, previousBoxStartY);
+ var xEndMin = Math.min(boxEndX, previousBoxEndX);
+ var yEndMin = Math.min(boxEndY, previousBoxEndY);
+ var intersection = (xEndMin - xStartMax) * (yEndMin - yStartMax);
+ var boxArea = (boxEndX - boxStartX) * (boxEndY - boxStartY);
+ var previousBoxArea = (previousBoxEndX - previousBoxStartX) *
+ (previousBoxEndY - boxStartY);
+ iou = intersection / (boxArea + previousBoxArea - intersection);
+ }
+ if (iou < UPDATE_REGION_OF_INTEREST_IOU_THRESHOLD) {
+ this.regionsOfInterest[i] = box;
+ }
+ }
+ this.regionsOfInterest = this.regionsOfInterest.slice(0, boxes.length);
+ };
+ Pipeline.prototype.clearRegionOfInterest = function (index) {
+ if (this.regionsOfInterest[index] != null) {
+ this.regionsOfInterest = this.regionsOfInterest.slice(0, index).concat(this.regionsOfInterest.slice(index + 1));
+ }
+ };
+ Pipeline.prototype.shouldUpdateRegionsOfInterest = function () {
+ var roisCount = this.regionsOfInterest.length;
+ var noROIs = roisCount === 0;
+ if (this.maxFaces === 1 || noROIs) {
+ return noROIs;
+ }
+ return roisCount !== this.maxFaces &&
+ this.runsWithoutFaceDetector >= this.maxContinuousChecks;
+ };
+ Pipeline.prototype.calculateLandmarksBoundingBox = function (landmarks) {
+ var xs = landmarks.map(function (d) { return d[0]; });
+ var ys = landmarks.map(function (d) { return d[1]; });
+ var startPoint = [Math.min.apply(Math, xs), Math.min.apply(Math, ys)];
+ var endPoint = [Math.max.apply(Math, xs), Math.max.apply(Math, ys)];
+ return { startPoint: startPoint, endPoint: endPoint };
+ };
+ return Pipeline;
+ }());
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var UV_COORDS = [
+ [0.499976992607117, 0.652534008026123],
+ [0.500025987625122, 0.547487020492554],
+ [0.499974012374878, 0.602371990680695],
+ [0.482113003730774, 0.471979022026062],
+ [0.500150978565216, 0.527155995368958],
+ [0.499909996986389, 0.498252987861633],
+ [0.499523013830185, 0.40106201171875],
+ [0.289712011814117, 0.380764007568359],
+ [0.499954998493195, 0.312398016452789],
+ [0.499987006187439, 0.269918978214264],
+ [0.500023007392883, 0.107050001621246],
+ [0.500023007392883, 0.666234016418457],
+ [0.5000159740448, 0.679224014282227],
+ [0.500023007392883, 0.692348003387451],
+ [0.499976992607117, 0.695277988910675],
+ [0.499976992607117, 0.70593398809433],
+ [0.499976992607117, 0.719385027885437],
+ [0.499976992607117, 0.737019002437592],
+ [0.499967992305756, 0.781370997428894],
+ [0.499816000461578, 0.562981009483337],
+ [0.473773002624512, 0.573909997940063],
+ [0.104906998574734, 0.254140973091125],
+ [0.365929991006851, 0.409575998783112],
+ [0.338757991790771, 0.41302502155304],
+ [0.311120003461838, 0.409460008144379],
+ [0.274657994508743, 0.389131009578705],
+ [0.393361985683441, 0.403706014156342],
+ [0.345234006643295, 0.344011008739471],
+ [0.370094001293182, 0.346076011657715],
+ [0.319321990013123, 0.347265005111694],
+ [0.297903001308441, 0.353591024875641],
+ [0.24779200553894, 0.410809993743896],
+ [0.396889001131058, 0.842755019664764],
+ [0.280097991228104, 0.375599980354309],
+ [0.106310002505779, 0.399955987930298],
+ [0.2099249958992, 0.391353011131287],
+ [0.355807989835739, 0.534406006336212],
+ [0.471751004457474, 0.65040397644043],
+ [0.474155008792877, 0.680191993713379],
+ [0.439785003662109, 0.657229006290436],
+ [0.414617002010345, 0.66654098033905],
+ [0.450374007225037, 0.680860996246338],
+ [0.428770989179611, 0.682690978050232],
+ [0.374971002340317, 0.727805018424988],
+ [0.486716985702515, 0.547628998756409],
+ [0.485300987958908, 0.527395009994507],
+ [0.257764995098114, 0.314490020275116],
+ [0.401223003864288, 0.455172002315521],
+ [0.429818987846375, 0.548614978790283],
+ [0.421351999044418, 0.533740997314453],
+ [0.276895999908447, 0.532056987285614],
+ [0.483370006084442, 0.499586999416351],
+ [0.33721199631691, 0.282882988452911],
+ [0.296391993761063, 0.293242990970612],
+ [0.169294998049736, 0.193813979625702],
+ [0.447580009698868, 0.302609980106354],
+ [0.392390012741089, 0.353887975215912],
+ [0.354490011930466, 0.696784019470215],
+ [0.067304998636246, 0.730105042457581],
+ [0.442739009857178, 0.572826027870178],
+ [0.457098007202148, 0.584792017936707],
+ [0.381974011659622, 0.694710969924927],
+ [0.392388999462128, 0.694203019142151],
+ [0.277076005935669, 0.271932005882263],
+ [0.422551989555359, 0.563233017921448],
+ [0.385919004678726, 0.281364023685455],
+ [0.383103013038635, 0.255840003490448],
+ [0.331431001424789, 0.119714021682739],
+ [0.229923993349075, 0.232002973556519],
+ [0.364500999450684, 0.189113974571228],
+ [0.229622006416321, 0.299540996551514],
+ [0.173287004232407, 0.278747975826263],
+ [0.472878992557526, 0.666198015213013],
+ [0.446828007698059, 0.668527007102966],
+ [0.422762006521225, 0.673889994621277],
+ [0.445307999849319, 0.580065965652466],
+ [0.388103008270264, 0.693961024284363],
+ [0.403039008378983, 0.706539988517761],
+ [0.403629004955292, 0.693953037261963],
+ [0.460041999816895, 0.557139039039612],
+ [0.431158006191254, 0.692366003990173],
+ [0.452181994915009, 0.692366003990173],
+ [0.475387006998062, 0.692366003990173],
+ [0.465828001499176, 0.779190003871918],
+ [0.472328990697861, 0.736225962638855],
+ [0.473087012767792, 0.717857003211975],
+ [0.473122000694275, 0.704625964164734],
+ [0.473033010959625, 0.695277988910675],
+ [0.427942007780075, 0.695277988910675],
+ [0.426479011774063, 0.703539967536926],
+ [0.423162013292313, 0.711845993995667],
+ [0.4183090031147, 0.720062971115112],
+ [0.390094995498657, 0.639572978019714],
+ [0.013953999616206, 0.560034036636353],
+ [0.499913990497589, 0.58014702796936],
+ [0.413199990987778, 0.69539999961853],
+ [0.409626007080078, 0.701822996139526],
+ [0.468080013990402, 0.601534962654114],
+ [0.422728985548019, 0.585985004901886],
+ [0.463079988956451, 0.593783974647522],
+ [0.37211999297142, 0.47341400384903],
+ [0.334562003612518, 0.496073007583618],
+ [0.411671012639999, 0.546965003013611],
+ [0.242175996303558, 0.14767599105835],
+ [0.290776997804642, 0.201445996761322],
+ [0.327338010072708, 0.256527006626129],
+ [0.399509996175766, 0.748921036720276],
+ [0.441727995872498, 0.261676013469696],
+ [0.429764986038208, 0.187834024429321],
+ [0.412198007106781, 0.108901023864746],
+ [0.288955003023148, 0.398952007293701],
+ [0.218936994671822, 0.435410976409912],
+ [0.41278201341629, 0.398970007896423],
+ [0.257135003805161, 0.355440020561218],
+ [0.427684992551804, 0.437960982322693],
+ [0.448339998722076, 0.536936044692993],
+ [0.178560003638268, 0.45755398273468],
+ [0.247308000922203, 0.457193970680237],
+ [0.286267012357712, 0.467674970626831],
+ [0.332827985286713, 0.460712015628815],
+ [0.368755996227264, 0.447206974029541],
+ [0.398963987827301, 0.432654976844788],
+ [0.476410001516342, 0.405806005001068],
+ [0.189241006970406, 0.523923993110657],
+ [0.228962004184723, 0.348950982093811],
+ [0.490725994110107, 0.562400996685028],
+ [0.404670000076294, 0.485132992267609],
+ [0.019469000399113, 0.401564002037048],
+ [0.426243007183075, 0.420431017875671],
+ [0.396993011236191, 0.548797011375427],
+ [0.266469985246658, 0.376977026462555],
+ [0.439121007919312, 0.51895797252655],
+ [0.032313998788595, 0.644356966018677],
+ [0.419054001569748, 0.387154996395111],
+ [0.462783008813858, 0.505746960639954],
+ [0.238978996872902, 0.779744982719421],
+ [0.198220998048782, 0.831938028335571],
+ [0.107550002634525, 0.540755033493042],
+ [0.183610007166862, 0.740257024765015],
+ [0.134409993886948, 0.333683013916016],
+ [0.385764002799988, 0.883153975009918],
+ [0.490967005491257, 0.579378008842468],
+ [0.382384985685349, 0.508572995662689],
+ [0.174399003386497, 0.397670984268188],
+ [0.318785011768341, 0.39623498916626],
+ [0.343364000320435, 0.400596976280212],
+ [0.396100014448166, 0.710216999053955],
+ [0.187885001301765, 0.588537991046906],
+ [0.430987000465393, 0.944064974784851],
+ [0.318993002176285, 0.898285031318665],
+ [0.266247987747192, 0.869701027870178],
+ [0.500023007392883, 0.190576016902924],
+ [0.499976992607117, 0.954452991485596],
+ [0.366169989109039, 0.398822009563446],
+ [0.393207013607025, 0.39553701877594],
+ [0.410373002290726, 0.391080021858215],
+ [0.194993004202843, 0.342101991176605],
+ [0.388664990663528, 0.362284004688263],
+ [0.365961998701096, 0.355970978736877],
+ [0.343364000320435, 0.355356991291046],
+ [0.318785011768341, 0.35834002494812],
+ [0.301414996385574, 0.363156020641327],
+ [0.058132998645306, 0.319076001644135],
+ [0.301414996385574, 0.387449026107788],
+ [0.499987989664078, 0.618434011936188],
+ [0.415838003158569, 0.624195992946625],
+ [0.445681989192963, 0.566076993942261],
+ [0.465844005346298, 0.620640993118286],
+ [0.49992299079895, 0.351523995399475],
+ [0.288718998432159, 0.819945991039276],
+ [0.335278987884521, 0.852819979190826],
+ [0.440512001514435, 0.902418971061707],
+ [0.128294005990028, 0.791940987110138],
+ [0.408771991729736, 0.373893976211548],
+ [0.455606997013092, 0.451801002025604],
+ [0.499877005815506, 0.908990025520325],
+ [0.375436991453171, 0.924192011356354],
+ [0.11421000212431, 0.615022003650665],
+ [0.448662012815475, 0.695277988910675],
+ [0.4480200111866, 0.704632043838501],
+ [0.447111994028091, 0.715808033943176],
+ [0.444831997156143, 0.730794012546539],
+ [0.430011987686157, 0.766808986663818],
+ [0.406787008047104, 0.685672998428345],
+ [0.400738000869751, 0.681069016456604],
+ [0.392399996519089, 0.677703022956848],
+ [0.367855995893478, 0.663918972015381],
+ [0.247923001646996, 0.601333022117615],
+ [0.452769994735718, 0.420849978923798],
+ [0.43639200925827, 0.359887003898621],
+ [0.416164010763168, 0.368713974952698],
+ [0.413385987281799, 0.692366003990173],
+ [0.228018000721931, 0.683571994304657],
+ [0.468268007040024, 0.352671027183533],
+ [0.411361992359161, 0.804327011108398],
+ [0.499989002943039, 0.469825029373169],
+ [0.479153990745544, 0.442654013633728],
+ [0.499974012374878, 0.439637005329132],
+ [0.432112008333206, 0.493588984012604],
+ [0.499886006116867, 0.866917014122009],
+ [0.49991300702095, 0.821729004383087],
+ [0.456548988819122, 0.819200992584229],
+ [0.344549000263214, 0.745438992977142],
+ [0.37890899181366, 0.574010014533997],
+ [0.374292999505997, 0.780184984207153],
+ [0.319687992334366, 0.570737957954407],
+ [0.357154995203018, 0.604269981384277],
+ [0.295284003019333, 0.621580958366394],
+ [0.447750002145767, 0.862477004528046],
+ [0.410986006259918, 0.508723020553589],
+ [0.31395098567009, 0.775308012962341],
+ [0.354128003120422, 0.812552988529205],
+ [0.324548006057739, 0.703992962837219],
+ [0.189096003770828, 0.646299958229065],
+ [0.279776990413666, 0.71465802192688],
+ [0.1338230073452, 0.682700991630554],
+ [0.336768001317978, 0.644733011722565],
+ [0.429883986711502, 0.466521978378296],
+ [0.455527991056442, 0.548622965812683],
+ [0.437114000320435, 0.558896005153656],
+ [0.467287987470627, 0.529924988746643],
+ [0.414712011814117, 0.335219979286194],
+ [0.37704598903656, 0.322777986526489],
+ [0.344107985496521, 0.320150971412659],
+ [0.312875986099243, 0.32233202457428],
+ [0.283526003360748, 0.333190023899078],
+ [0.241245999932289, 0.382785975933075],
+ [0.102986000478268, 0.468762993812561],
+ [0.267612010240555, 0.424560010433197],
+ [0.297879010438919, 0.433175981044769],
+ [0.333433985710144, 0.433878004550934],
+ [0.366427004337311, 0.426115989685059],
+ [0.396012008190155, 0.416696012020111],
+ [0.420121014118195, 0.41022801399231],
+ [0.007561000064015, 0.480777025222778],
+ [0.432949006557465, 0.569517970085144],
+ [0.458638995885849, 0.479089021682739],
+ [0.473466008901596, 0.545744001865387],
+ [0.476087987422943, 0.563830018043518],
+ [0.468472003936768, 0.555056989192963],
+ [0.433990985155106, 0.582361996173859],
+ [0.483518004417419, 0.562983989715576],
+ [0.482482999563217, 0.57784903049469],
+ [0.42645001411438, 0.389798998832703],
+ [0.438998997211456, 0.39649498462677],
+ [0.450067013502121, 0.400434017181396],
+ [0.289712011814117, 0.368252992630005],
+ [0.276670008897781, 0.363372981548309],
+ [0.517862021923065, 0.471948027610779],
+ [0.710287988185883, 0.380764007568359],
+ [0.526226997375488, 0.573909997940063],
+ [0.895093023777008, 0.254140973091125],
+ [0.634069979190826, 0.409575998783112],
+ [0.661242008209229, 0.41302502155304],
+ [0.688880026340485, 0.409460008144379],
+ [0.725341975688934, 0.389131009578705],
+ [0.606630027294159, 0.40370500087738],
+ [0.654766023159027, 0.344011008739471],
+ [0.629905998706818, 0.346076011657715],
+ [0.680678009986877, 0.347265005111694],
+ [0.702096998691559, 0.353591024875641],
+ [0.75221198797226, 0.410804986953735],
+ [0.602918028831482, 0.842862963676453],
+ [0.719901978969574, 0.375599980354309],
+ [0.893692970275879, 0.399959981441498],
+ [0.790081977844238, 0.391354024410248],
+ [0.643998026847839, 0.534487962722778],
+ [0.528249025344849, 0.65040397644043],
+ [0.525849997997284, 0.680191040039062],
+ [0.560214996337891, 0.657229006290436],
+ [0.585384011268616, 0.66654098033905],
+ [0.549625992774963, 0.680860996246338],
+ [0.57122802734375, 0.682691991329193],
+ [0.624852001667023, 0.72809898853302],
+ [0.513050019741058, 0.547281980514526],
+ [0.51509702205658, 0.527251958847046],
+ [0.742246985435486, 0.314507007598877],
+ [0.598631024360657, 0.454979002475739],
+ [0.570338010787964, 0.548575043678284],
+ [0.578631997108459, 0.533622980117798],
+ [0.723087012767792, 0.532054007053375],
+ [0.516445994377136, 0.499638974666595],
+ [0.662801027297974, 0.282917976379395],
+ [0.70362401008606, 0.293271005153656],
+ [0.830704987049103, 0.193813979625702],
+ [0.552385985851288, 0.302568018436432],
+ [0.607609987258911, 0.353887975215912],
+ [0.645429015159607, 0.696707010269165],
+ [0.932694971561432, 0.730105042457581],
+ [0.557260990142822, 0.572826027870178],
+ [0.542901992797852, 0.584792017936707],
+ [0.6180260181427, 0.694710969924927],
+ [0.607590973377228, 0.694203019142151],
+ [0.722943007946014, 0.271963000297546],
+ [0.577413976192474, 0.563166975975037],
+ [0.614082992076874, 0.281386971473694],
+ [0.616907000541687, 0.255886018276215],
+ [0.668509006500244, 0.119913995265961],
+ [0.770092010498047, 0.232020974159241],
+ [0.635536015033722, 0.189248979091644],
+ [0.77039098739624, 0.299556016921997],
+ [0.826722025871277, 0.278755009174347],
+ [0.527121007442474, 0.666198015213013],
+ [0.553171992301941, 0.668527007102966],
+ [0.577238023281097, 0.673889994621277],
+ [0.554691970348358, 0.580065965652466],
+ [0.611896991729736, 0.693961024284363],
+ [0.59696102142334, 0.706539988517761],
+ [0.596370995044708, 0.693953037261963],
+ [0.539958000183105, 0.557139039039612],
+ [0.568841993808746, 0.692366003990173],
+ [0.547818005084991, 0.692366003990173],
+ [0.52461302280426, 0.692366003990173],
+ [0.534089982509613, 0.779141008853912],
+ [0.527670979499817, 0.736225962638855],
+ [0.526912987232208, 0.717857003211975],
+ [0.526877999305725, 0.704625964164734],
+ [0.526966989040375, 0.695277988910675],
+ [0.572058022022247, 0.695277988910675],
+ [0.573521018028259, 0.703539967536926],
+ [0.57683801651001, 0.711845993995667],
+ [0.581691026687622, 0.720062971115112],
+ [0.609944999217987, 0.639909982681274],
+ [0.986046016216278, 0.560034036636353],
+ [0.5867999792099, 0.69539999961853],
+ [0.590372025966644, 0.701822996139526],
+ [0.531915009021759, 0.601536989212036],
+ [0.577268004417419, 0.585934996604919],
+ [0.536915004253387, 0.593786001205444],
+ [0.627542972564697, 0.473352015018463],
+ [0.665585994720459, 0.495950996875763],
+ [0.588353991508484, 0.546862006187439],
+ [0.757824003696442, 0.14767599105835],
+ [0.709249973297119, 0.201507985591888],
+ [0.672684013843536, 0.256581008434296],
+ [0.600408971309662, 0.74900496006012],
+ [0.55826598405838, 0.261672019958496],
+ [0.570303976535797, 0.187870979309082],
+ [0.588165998458862, 0.109044015407562],
+ [0.711045026779175, 0.398952007293701],
+ [0.781069993972778, 0.435405015945435],
+ [0.587247014045715, 0.398931980133057],
+ [0.742869973182678, 0.355445981025696],
+ [0.572156012058258, 0.437651991844177],
+ [0.55186802148819, 0.536570012569427],
+ [0.821442008018494, 0.457556009292603],
+ [0.752701997756958, 0.457181990146637],
+ [0.71375697851181, 0.467626988887787],
+ [0.66711300611496, 0.460672974586487],
+ [0.631101012229919, 0.447153985500336],
+ [0.6008620262146, 0.432473003864288],
+ [0.523481011390686, 0.405627012252808],
+ [0.810747981071472, 0.523926019668579],
+ [0.771045982837677, 0.348959028720856],
+ [0.509127020835876, 0.562718033790588],
+ [0.595292985439301, 0.485023975372314],
+ [0.980530977249146, 0.401564002037048],
+ [0.573499977588654, 0.420000016689301],
+ [0.602994978427887, 0.548687994480133],
+ [0.733529984951019, 0.376977026462555],
+ [0.560611009597778, 0.519016981124878],
+ [0.967685997486115, 0.644356966018677],
+ [0.580985009670258, 0.387160003185272],
+ [0.537728011608124, 0.505385041236877],
+ [0.760966002941132, 0.779752969741821],
+ [0.801778972148895, 0.831938028335571],
+ [0.892440974712372, 0.54076099395752],
+ [0.816350996494293, 0.740260004997253],
+ [0.865594983100891, 0.333687007427216],
+ [0.614073991775513, 0.883246004581451],
+ [0.508952975273132, 0.579437971115112],
+ [0.617941975593567, 0.508316040039062],
+ [0.825608015060425, 0.397674977779388],
+ [0.681214988231659, 0.39623498916626],
+ [0.656635999679565, 0.400596976280212],
+ [0.603900015354156, 0.710216999053955],
+ [0.81208598613739, 0.588539004325867],
+ [0.56801301240921, 0.944564998149872],
+ [0.681007981300354, 0.898285031318665],
+ [0.733752012252808, 0.869701027870178],
+ [0.633830010890961, 0.398822009563446],
+ [0.606792986392975, 0.39553701877594],
+ [0.589659988880157, 0.391062021255493],
+ [0.805015981197357, 0.342108011245728],
+ [0.611334979534149, 0.362284004688263],
+ [0.634037971496582, 0.355970978736877],
+ [0.656635999679565, 0.355356991291046],
+ [0.681214988231659, 0.35834002494812],
+ [0.698584973812103, 0.363156020641327],
+ [0.941866993904114, 0.319076001644135],
+ [0.698584973812103, 0.387449026107788],
+ [0.584177017211914, 0.624107003211975],
+ [0.554318010807037, 0.566076993942261],
+ [0.534153997898102, 0.62064003944397],
+ [0.711217999458313, 0.819975018501282],
+ [0.664629995822906, 0.852871000766754],
+ [0.559099972248077, 0.902631998062134],
+ [0.871706008911133, 0.791940987110138],
+ [0.591234028339386, 0.373893976211548],
+ [0.544341027736664, 0.451583981513977],
+ [0.624562978744507, 0.924192011356354],
+ [0.88577002286911, 0.615028977394104],
+ [0.551338016986847, 0.695277988910675],
+ [0.551980018615723, 0.704632043838501],
+ [0.552887976169586, 0.715808033943176],
+ [0.555167973041534, 0.730794012546539],
+ [0.569944024085999, 0.767035007476807],
+ [0.593203008174896, 0.685675978660583],
+ [0.599261999130249, 0.681069016456604],
+ [0.607599973678589, 0.677703022956848],
+ [0.631937980651855, 0.663500010967255],
+ [0.752032995223999, 0.601315021514893],
+ [0.547226011753082, 0.420395016670227],
+ [0.563543975353241, 0.359827995300293],
+ [0.583841025829315, 0.368713974952698],
+ [0.586614012718201, 0.692366003990173],
+ [0.771915018558502, 0.683578014373779],
+ [0.531597018241882, 0.352482974529266],
+ [0.588370978832245, 0.804440975189209],
+ [0.52079701423645, 0.442565023899078],
+ [0.567984998226166, 0.493479013442993],
+ [0.543282985687256, 0.819254994392395],
+ [0.655317008495331, 0.745514988899231],
+ [0.621008992195129, 0.574018001556396],
+ [0.625559985637665, 0.78031200170517],
+ [0.680198013782501, 0.570719003677368],
+ [0.64276397228241, 0.604337990283966],
+ [0.704662978649139, 0.621529996395111],
+ [0.552012026309967, 0.862591981887817],
+ [0.589071989059448, 0.508637011051178],
+ [0.685944974422455, 0.775357007980347],
+ [0.645735025405884, 0.812640011310577],
+ [0.675342977046967, 0.703978002071381],
+ [0.810858011245728, 0.646304965019226],
+ [0.72012197971344, 0.714666962623596],
+ [0.866151988506317, 0.682704985141754],
+ [0.663187026977539, 0.644596993923187],
+ [0.570082008838654, 0.466325998306274],
+ [0.544561982154846, 0.548375964164734],
+ [0.562758982181549, 0.558784961700439],
+ [0.531987011432648, 0.530140042304993],
+ [0.585271000862122, 0.335177004337311],
+ [0.622952997684479, 0.32277899980545],
+ [0.655896008014679, 0.320163011550903],
+ [0.687132000923157, 0.322345972061157],
+ [0.716481983661652, 0.333200991153717],
+ [0.758756995201111, 0.382786989212036],
+ [0.897013008594513, 0.468769013881683],
+ [0.732392013072968, 0.424547016620636],
+ [0.70211398601532, 0.433162987232208],
+ [0.66652500629425, 0.433866024017334],
+ [0.633504986763, 0.426087975502014],
+ [0.603875994682312, 0.416586995124817],
+ [0.579657971858978, 0.409945011138916],
+ [0.992439985275269, 0.480777025222778],
+ [0.567192018032074, 0.569419980049133],
+ [0.54136598110199, 0.478899002075195],
+ [0.526564002037048, 0.546118021011353],
+ [0.523913025856018, 0.563830018043518],
+ [0.531529009342194, 0.555056989192963],
+ [0.566035985946655, 0.582329034805298],
+ [0.51631098985672, 0.563053965568542],
+ [0.5174720287323, 0.577877044677734],
+ [0.573594987392426, 0.389806985855103],
+ [0.560697972774506, 0.395331978797913],
+ [0.549755990505219, 0.399751007556915],
+ [0.710287988185883, 0.368252992630005],
+ [0.723330020904541, 0.363372981548309]
+ ];
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var FACEMESH_GRAPHMODEL_PATH = 'https://tfhub.dev/mediapipe/tfjs-model/facemesh/1/default/1';
+ var IRIS_GRAPHMODEL_PATH = 'https://tfhub.dev/mediapipe/tfjs-model/iris/1/default/2';
+ var MESH_MODEL_INPUT_WIDTH = 192;
+ var MESH_MODEL_INPUT_HEIGHT = 192;
+ var PREDICTION_VALUES = 'MediaPipePredictionValues';
+ var PREDICTION_TENSORS = 'MediaPipePredictionTensors';
+ /**
+ * Load the model.
+ *
+ * @param options - a configuration object with the following properties:
+ * - `maxContinuousChecks` How many frames to go without running the bounding
+ * box detector. Only relevant if maxFaces > 1. Defaults to 5.
+ * - `detectionConfidence` Threshold for discarding a prediction. Defaults to
+ * 0.9.
+ * - `maxFaces` The maximum number of faces detected in the input. Should be
+ * set to the minimum number for performance. Defaults to 10.
+ * - `iouThreshold` A float representing the threshold for deciding whether
+ * boxes overlap too much in non-maximum suppression. Must be between [0, 1].
+ * Defaults to 0.3.
+ * - `scoreThreshold` A threshold for deciding when to remove boxes based
+ * on score in non-maximum suppression. Defaults to 0.75.
+ * - `shouldLoadIrisModel` Whether to also load the iris detection model.
+ * Defaults to true.
+ * - `modelUrl` Optional param for specifying a custom facemesh model url or
+ * a `tf.io.IOHandler` object.
+ * - `detectorModelUrl` Optional param for specifying a custom blazeface model
+ * url or a `tf.io.IOHandler` object.
+ * - `irisModelUrl` Optional param for specifying a custom iris model url or
+ * a `tf.io.IOHandler` object.
+ */
+ function load$1(config) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a, maxContinuousChecks, _b, detectionConfidence, _c, maxFaces, _d, iouThreshold, _e, scoreThreshold, _f, shouldLoadIrisModel, modelUrl, detectorModelUrl, irisModelUrl, models, faceMesh;
+ return __generator(this, function (_g) {
+ switch (_g.label) {
+ case 0:
+ _a = config.maxContinuousChecks, maxContinuousChecks = _a === void 0 ? 5 : _a, _b = config.detectionConfidence, detectionConfidence = _b === void 0 ? 0.9 : _b, _c = config.maxFaces, maxFaces = _c === void 0 ? 10 : _c, _d = config.iouThreshold, iouThreshold = _d === void 0 ? 0.3 : _d, _e = config.scoreThreshold, scoreThreshold = _e === void 0 ? 0.75 : _e, _f = config.shouldLoadIrisModel, shouldLoadIrisModel = _f === void 0 ? true : _f, modelUrl = config.modelUrl, detectorModelUrl = config.detectorModelUrl, irisModelUrl = config.irisModelUrl;
+ if (!shouldLoadIrisModel) return [3 /*break*/, 2];
+ return [4 /*yield*/, Promise.all([
+ loadDetectorModel(detectorModelUrl, maxFaces, iouThreshold, scoreThreshold),
+ loadMeshModel(modelUrl),
+ loadIrisModel(irisModelUrl)
+ ])];
+ case 1:
+ models = _g.sent();
+ return [3 /*break*/, 4];
+ case 2: return [4 /*yield*/, Promise.all([
+ loadDetectorModel(detectorModelUrl, maxFaces, iouThreshold, scoreThreshold),
+ loadMeshModel(modelUrl)
+ ])];
+ case 3:
+ models = _g.sent();
+ _g.label = 4;
+ case 4:
+ faceMesh = new FaceMesh(models[0], models[1], maxContinuousChecks, detectionConfidence, maxFaces, shouldLoadIrisModel ? models[2] : null);
+ return [2 /*return*/, faceMesh];
+ }
+ });
+ });
+ }
+ function loadDetectorModel(modelUrl, maxFaces, iouThreshold, scoreThreshold) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ return [2 /*return*/, load({ modelUrl: modelUrl, maxFaces: maxFaces, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold })];
+ });
+ });
+ }
+ function loadMeshModel(modelUrl) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ if (modelUrl != null) {
+ return [2 /*return*/, tfconv.loadGraphModel(modelUrl)];
+ }
+ return [2 /*return*/, tfconv.loadGraphModel(FACEMESH_GRAPHMODEL_PATH, { fromTFHub: true })];
+ });
+ });
+ }
+ function loadIrisModel(modelUrl) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ if (modelUrl != null) {
+ return [2 /*return*/, tfconv.loadGraphModel(modelUrl)];
+ }
+ return [2 /*return*/, tfconv.loadGraphModel(IRIS_GRAPHMODEL_PATH, { fromTFHub: true })];
+ });
+ });
+ }
+ function getInputTensorDimensions$1(input) {
+ return input instanceof tf.Tensor ? [input.shape[0], input.shape[1]] :
+ [input.height, input.width];
+ }
+ function flipFaceHorizontal$1(face, imageWidth) {
+ if (face.mesh instanceof tf.Tensor) {
+ var _a = tf.tidy(function () {
+ var subtractBasis = tf.tensor1d([imageWidth - 1, 0, 0]);
+ var multiplyBasis = tf.tensor1d([1, -1, 1]);
+ return tf.tidy(function () {
+ return [
+ tf.concat([
+ tf.sub(imageWidth - 1, tf.slice(face.boundingBox.topLeft, 0, 1)),
+ tf.slice(face.boundingBox.topLeft, 1, 1)
+ ]),
+ tf.concat([
+ tf.sub(imageWidth - 1, tf.slice(face.boundingBox.bottomRight, 0, 1)),
+ tf.slice(face.boundingBox.bottomRight, 1, 1)
+ ]),
+ tf.mul(tf.sub(subtractBasis, face.mesh), multiplyBasis),
+ tf.mul(tf.sub(subtractBasis, face.scaledMesh), multiplyBasis)
+ ];
+ });
+ }), topLeft = _a[0], bottomRight = _a[1], mesh = _a[2], scaledMesh = _a[3];
+ return Object.assign({}, face, { boundingBox: { topLeft: topLeft, bottomRight: bottomRight }, mesh: mesh, scaledMesh: scaledMesh });
+ }
+ return Object.assign({}, face, {
+ boundingBox: {
+ topLeft: [
+ imageWidth - 1 - face.boundingBox.topLeft[0],
+ face.boundingBox.topLeft[1]
+ ],
+ bottomRight: [
+ imageWidth - 1 - face.boundingBox.bottomRight[0],
+ face.boundingBox.bottomRight[1]
+ ]
+ },
+ mesh: (face.mesh).map(function (coord) {
+ var flippedCoord = coord.slice(0);
+ flippedCoord[0] = imageWidth - 1 - coord[0];
+ return flippedCoord;
+ }),
+ scaledMesh: face.scaledMesh.map(function (coord) {
+ var flippedCoord = coord.slice(0);
+ flippedCoord[0] = imageWidth - 1 - coord[0];
+ return flippedCoord;
+ })
+ });
+ }
+ var FaceMesh = /** @class */ (function () {
+ function FaceMesh(blazeFace, blazeMeshModel, maxContinuousChecks, detectionConfidence, maxFaces, irisModel) {
+ this.kind = 'MediaPipeFaceMesh';
+ this.pipeline = new Pipeline(blazeFace, blazeMeshModel, MESH_MODEL_INPUT_WIDTH, MESH_MODEL_INPUT_HEIGHT, maxContinuousChecks, maxFaces, irisModel);
+ this.detectionConfidence = detectionConfidence;
+ }
+ FaceMesh.getAnnotations = function () {
+ return MESH_ANNOTATIONS;
+ };
+ /**
+ * Returns an array of UV coordinates for the 468 facial keypoint vertices in
+ * mesh_map.jpg. Can be used to map textures to the facial mesh.
+ */
+ FaceMesh.getUVCoords = function () {
+ return UV_COORDS;
+ };
+ /**
+ * Returns an array of faces in an image.
+ *
+ * @param input The image to classify. Can be a tensor, DOM element image,
+ * video, or canvas.
+ * @param returnTensors (defaults to `false`) Whether to return tensors as
+ * opposed to values.
+ * @param flipHorizontal Whether to flip/mirror the facial keypoints
+ * horizontally. Should be true for videos that are flipped by default (e.g.
+ * webcams).
+ * @param predictIrises
+ *
+ * @return An array of AnnotatedPrediction objects.
+ */
+ FaceMesh.prototype.estimateFaces = function (config) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a, returnTensors, _b, flipHorizontal, _c, predictIrises, input, _d, width, image, predictions, savedWebglPackDepthwiseConvFlag;
+ var _this = this;
+ return __generator(this, function (_e) {
+ switch (_e.label) {
+ case 0:
+ _a = config.returnTensors, returnTensors = _a === void 0 ? false : _a, _b = config.flipHorizontal, flipHorizontal = _b === void 0 ? false : _b, _c = config.predictIrises, predictIrises = _c === void 0 ? true : _c;
+ input = config.input;
+ if (predictIrises && this.pipeline.irisModel == null) {
+ throw new Error('The iris model was not loaded as part of facemesh. ' +
+ 'Please initialize the model with ' +
+ 'facemesh.load({shouldLoadIrisModel: true}).');
+ }
+ _d = getInputTensorDimensions$1(input), width = _d[1];
+ image = tf.tidy(function () {
+ if (!(input instanceof tf.Tensor)) {
+ input = tf.browser.fromPixels(input);
+ }
+ return tf.expandDims(tf.cast(input, 'float32'), 0);
+ });
+ if (!(tf.getBackend() === 'webgl')) return [3 /*break*/, 2];
+ savedWebglPackDepthwiseConvFlag = tf.env().get('WEBGL_PACK_DEPTHWISECONV');
+ tf.env().set('WEBGL_PACK_DEPTHWISECONV', true);
+ return [4 /*yield*/, this.pipeline.predict(image, predictIrises)];
+ case 1:
+ predictions = _e.sent();
+ tf.env().set('WEBGL_PACK_DEPTHWISECONV', savedWebglPackDepthwiseConvFlag);
+ return [3 /*break*/, 4];
+ case 2: return [4 /*yield*/, this.pipeline.predict(image, predictIrises)];
+ case 3:
+ predictions = _e.sent();
+ _e.label = 4;
+ case 4:
+ image.dispose();
+ if (predictions != null && predictions.length > 0) {
+ return [2 /*return*/, Promise.all(predictions.map(function (prediction, i) { return __awaiter(_this, void 0, void 0, function () {
+ var coords, scaledCoords, box, flag, tensorsToRead, tensorValues, flagValue, annotatedPrediction_1, _a, coordsArr, coordsArrScaled, annotatedPrediction, annotations, key;
+ var _this = this;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ coords = prediction.coords, scaledCoords = prediction.scaledCoords, box = prediction.box, flag = prediction.flag;
+ tensorsToRead = [flag];
+ if (!returnTensors) {
+ tensorsToRead = tensorsToRead.concat([coords, scaledCoords]);
+ }
+ return [4 /*yield*/, Promise.all(tensorsToRead.map(function (d) { return __awaiter(_this, void 0, void 0, function () { return __generator(this, function (_a) {
+ return [2 /*return*/, d.array()];
+ }); }); }))];
+ case 1:
+ tensorValues = _b.sent();
+ flagValue = tensorValues[0];
+ flag.dispose();
+ if (flagValue < this.detectionConfidence) {
+ this.pipeline.clearRegionOfInterest(i);
+ }
+ if (returnTensors) {
+ annotatedPrediction_1 = {
+ kind: PREDICTION_TENSORS,
+ faceInViewConfidence: flagValue,
+ mesh: coords,
+ scaledMesh: scaledCoords,
+ boundingBox: {
+ topLeft: tf.tensor1d(box.startPoint),
+ bottomRight: tf.tensor1d(box.endPoint)
+ }
+ };
+ if (flipHorizontal) {
+ return [2 /*return*/, flipFaceHorizontal$1(annotatedPrediction_1, width)];
+ }
+ return [2 /*return*/, annotatedPrediction_1];
+ }
+ _a = tensorValues.slice(1), coordsArr = _a[0], coordsArrScaled = _a[1];
+ scaledCoords.dispose();
+ coords.dispose();
+ annotatedPrediction = {
+ kind: PREDICTION_VALUES,
+ faceInViewConfidence: flagValue,
+ boundingBox: { topLeft: box.startPoint, bottomRight: box.endPoint },
+ mesh: coordsArr,
+ scaledMesh: coordsArrScaled
+ };
+ if (flipHorizontal) {
+ annotatedPrediction =
+ flipFaceHorizontal$1(annotatedPrediction, width);
+ }
+ annotations = {};
+ for (key in MESH_ANNOTATIONS) {
+ if (predictIrises || key.includes('Iris') === false) {
+ annotations[key] = MESH_ANNOTATIONS[key].map(function (index) { return annotatedPrediction.scaledMesh[index]; });
+ }
+ }
+ annotatedPrediction['annotations'] = annotations;
+ return [2 /*return*/, annotatedPrediction];
+ }
+ });
+ }); }))];
+ }
+ return [2 /*return*/, []];
+ }
+ });
+ });
+ };
+ return FaceMesh;
+ }());
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ (function (SupportedPackages) {
+ SupportedPackages["mediapipeFacemesh"] = "mediapipe-facemesh";
+ })(exports.SupportedPackages || (exports.SupportedPackages = {}));
+ /**
+ * Load face-landmarks-detection.
+ *
+ * @param pkg - The name of the package to load, e.g. 'mediapipe-facemesh'.
+ * @param config - a configuration object with the following properties:
+ * - `maxContinuousChecks` How many frames to go without running the bounding
+ * box detector. Only relevant if maxFaces > 1. Defaults to 5.
+ * - `detectionConfidence` Threshold for discarding a prediction. Defaults to
+ * 0.9.
+ * - `maxFaces` The maximum number of faces detected in the input. Should be
+ * set to the minimum number for performance. Defaults to 10.
+ * - `iouThreshold` A float representing the threshold for deciding whether
+ * boxes overlap too much in non-maximum suppression. Must be between [0, 1].
+ * Defaults to 0.3.
+ * - `scoreThreshold` A threshold for deciding when to remove boxes based
+ * on score in non-maximum suppression. Defaults to 0.75.
+ * - `shouldLoadIrisModel` Whether to also load the iris detection model.
+ * Defaults to true.
+ */
+ function load$2(pkg, config) {
+ if (pkg === void 0) { pkg = exports.SupportedPackages.mediapipeFacemesh; }
+ if (config === void 0) { config = {}; }
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ if (pkg === exports.SupportedPackages.mediapipeFacemesh) {
+ return [2 /*return*/, load$1(config)];
+ }
+ else {
+ throw new Error(pkg + " is not a valid package name.");
+ }
+ });
+ });
+ }
+
+ exports.load = load$2;
+
+ Object.defineProperty(exports, '__esModule', { value: true });
+
+})));
diff --git a/js/index.js b/js/index.js
new file mode 100644
index 0000000..0bc87ea
--- /dev/null
+++ b/js/index.js
@@ -0,0 +1,409 @@
+console.log("🧪 JS is updated: 2024-05-20");
+
+let setStatus = function () {};
+
+// Summary calculator
+function summarizeResults() {
+ console.log("▶ summarizeResults() invoked");
+ const ratingCells = document.querySelectorAll('td[id^="assessment-"]');
+
+ const counts = {
+ perfect: 0,
+ deviation0: 0,
+ deviation1: 0,
+ deviation2: 0,
+ deviation3: 0,
+ deviation4: 0,
+ };
+
+ const weights = {
+ perfect: 2,
+ deviation0: 1,
+ deviation1: 0,
+ deviation2: -1,
+ deviation3: -2,
+ deviation4: -2,
+ };
+
+ ratingCells.forEach(cell => {
+ const html = cell.innerHTML;
+ if (html.includes("perfect")) counts.perfect++;
+ else if (html.includes("deviation-0")) counts.deviation0++;
+ else if (html.includes("deviation-1")) counts.deviation1++;
+ else if (html.includes("deviation-2")) counts.deviation2++;
+ else if (html.includes("deviation-3")) counts.deviation3++;
+ else if (html.includes("deviation-4")) counts.deviation4++;
+ });
+
+ const totalScore =
+ counts.perfect * weights.perfect +
+ counts.deviation0 * weights.deviation0 +
+ counts.deviation1 * weights.deviation1 +
+ counts.deviation2 * weights.deviation2 +
+ counts.deviation3 * weights.deviation3 +
+ counts.deviation4 * weights.deviation4;
+
+ const maxScore = 22;
+ const normalized = Math.max(0, Math.min(8, Math.round((totalScore / maxScore) * 9)));
+ const psl = `PSL${normalized}`;
+
+ const labels = {
+ 0: "Subhuman",
+ 1: "Sub5",
+ 2: "Low-tier Normie",
+ 3: "Normie",
+ 4: "Upper Normie",
+ 5: "Chadlite",
+ 6: "Chad",
+ 7: "Gigachad",
+ 8: "Terachad"
+ };
+ const label = labels[normalized] || "Unknown";
+
+const hierarchyLabels = {
+ 8: {
+ titles: ["Era Defining Man", "Immortal", "God Among Men"],
+ description: "Men destined to define eras and change history, who will be remembered for millennia to come",
+ color: "#A0D8EF"
+ },
+ 7: {
+ titles: ["King", "Explorer", "Philosopher"],
+ description: "Philosophers, Kings, Explorers, and Inventors",
+ color: "#4682B4"
+ },
+ 6: {
+ titles: ["Influential Artist", "Writer", "General"],
+ description: "Influential Artists, Writers, and Generals",
+ color: "#5F9EA0"
+ },
+ 5: {
+ titles: ["Diplomat", "Official", "Officer"],
+ description: "Officers, Officials, Diplomats",
+ color: "#8FBC8F"
+ },
+ 4: {
+ titles: ["Middle Management", "Soldier", "Craftsman"],
+ description: "Middle Management, Soldiers, Craftsmen",
+ color: "#8B5A2B"
+ },
+ 3: {
+ titles: ["Manual Laborerer", "Peasant", "Street Merchant"],
+ description: "Manual Labourers, Peasants, Street Merchants",
+ color: "#A52A2A"
+ },
+ 2: {
+ titles: ["Street Sweeper", "Drain Cleaner", "Waste Collector"],
+ description: "Street Cleaners",
+ color: "#B22222"
+ },
+ 1: {
+ titles: ["Sanitation Worker", "Toilet Scrubber", "Gutterman"],
+ description: "Sanitation Workers",
+ color: "#800000"
+ },
+ 0: {
+ titles: ["Untouchable", "Subhuman", "Bottom of the Barrel"],
+ description: "Untouchables",
+ color: "#2F2F2F"
+ }
+};
+
+
+ const badgeClass = normalized <= 3 ? "danger" : normalized <= 6 ? "warning" : "success";
+ const gradingToggle = document.getElementById("grading-toggle");
+ const usePSL = gradingToggle ? gradingToggle.checked : true;
+
+ const scoreCell = document.getElementById("total-score");
+ const breakdownCell = document.getElementById("total-breakdown");
+ const resultCell = document.getElementById("total-psl");
+
+ if (scoreCell) scoreCell.innerHTML = `<strong>${totalScore}</strong>`;
+ if (breakdownCell) breakdownCell.innerHTML = `
+ Perfect: ${counts.perfect},
+ Slight: ${counts.deviation0},
+ Noticeable: ${counts.deviation1},
+ Significant: ${counts.deviation2},
+ Horrible: ${counts.deviation3},
+ Extreme: ${counts.deviation4}
+ `;
+
+ if (resultCell) {
+ if (usePSL) {
+ resultCell.innerHTML = `<span class="badge bg-${badgeClass}">${psl}</span><br><small>${label}</small>`;
+ } else {
+ const h = hierarchyLabels[normalized] || hierarchyLabels[1];
+const title = Array.isArray(h.titles)
+ ? h.titles[Math.floor(Math.random() * h.titles.length)]
+ : h.titles;
+
+const lightColors = ["#A0D8EF", "#8FBC8F", "#A52A2A"];
+const isLight = lightColors.includes(h.color);
+const textColor = isLight ? "black" : "white";
+
+resultCell.innerHTML = `
+ <span class="badge" style="background-color: ${h.color}; color: ${textColor};">
+ ${title}
+ </span><br>
+ <small style="color: ${h.color}; font-size: 0.75rem;">${h.description}</small>
+`;
+
+ }
+ }
+}
+
+async function main() {
+ const _model = await faceLandmarksDetection.load(faceLandmarksDetection.SupportedPackages.mediapipeFacemesh, {
+ maxFaces: 1
+ });
+ window.database = await setupDatabase();
+ const imageInputFile = document.getElementById("image-file");
+ const imageInputUrl = document.getElementById("image-url");
+ const introductionElement = document.getElementById("introduction");
+ const analyzingElement = document.getElementById("analyzing");
+ const canvas = document.getElementById("canvas");
+ const ctx = canvas.getContext("2d");
+
+ let data = void 0;
+
+const gradingToggle = document.getElementById("grading-toggle");
+if (gradingToggle) {
+ gradingToggle.addEventListener("change", summarizeResults);
+}
+
+
+ imageInputFile.addEventListener("change", async () => {
+ if (imageInputFile.files[0]) {
+ introductionElement.style.display = "none";
+ analyzingElement.classList.remove("d-none");
+ data && clearData();
+
+ setStatus("Reading image");
+
+ imageInputUrl.value = "";
+
+ let url = URL.createObjectURL(imageInputFile.files[0]);
+
+ await onChange(url);
+ }
+ });
+
+ imageInputUrl.addEventListener("change", async () => {
+ if (imageInputUrl.value) {
+ introductionElement.style.display = "none";
+ analyzingElement.classList.remove("d-none");
+ data && clearData();
+
+ setStatus("Downloading image");
+
+ imageInputFile.value = "";
+
+ let file = await (await fetch(imageInputUrl.value)).blob();
+ let url = URL.createObjectURL(file);
+
+ await onChange(url);
+ }
+ });
+
+ async function onChange(url) {
+ setStatus("Analyzing");
+
+ let analysis = await analyze(canvas, ctx, url);
+
+ data = analysis.criteria = {
+ midfaceRatio: {...createBindings(analysis.criteria.midfaceRatio, "midface-ratio")},
+ facialWidthToHeightRatio: {...createBindings(analysis.criteria.facialWidthToHeightRatio, "facial-width-to-height-ratio")},
+ chinToPhiltrumRatio: {...createBindings(analysis.criteria.chinToPhiltrumRatio, "chin-to-philtrum-ratio")},
+ canthalTilt: {...createBindings(analysis.criteria.canthalTilt, "canthal-tilt")},
+ mouthToNoseRatio: {...createBindings(analysis.criteria.mouthToNoseRatio, "mouth-to-nose-ratio")},
+ bigonialWidth: {...createBindings(analysis.criteria.bigonialWidth, "bigonial-width")},
+ lipRatio: {...createBindings(analysis.criteria.lipRatio, "lip-ratio")},
+ eyeSeparationRatio: {...createBindings(analysis.criteria.eyeSeparationRatio, "eye-separation-ratio")},
+ eyeToMouthAngle: {...createBindings(analysis.criteria.eyeToMouthAngle, "eye-to-mouth-angle")},
+ lowerThirdHeight: {...createBindings(analysis.criteria.lowerThirdHeight, "lower-third-height")},
+ palpebralFissureLength: {...createBindings(analysis.criteria.palpebralFissureLength, "palpebral-fissure-length")},
+ eyeColor: {...createBindings(analysis.criteria.eyeColor, "eye-color")},
+ };
+
+ function createBindings(metric, id) {
+ return {
+ analysis: metric,
+ render: document.getElementById(`value-${id}`),
+ toggle: document.getElementById(`toggle-${id}`),
+ ideal: document.getElementById(`ideal-${id}`),
+ assessment: document.getElementById(`assessment-${id}`),
+ };
+ }
+
+ let calculate = () => {
+ for (let i of Object.values(analysis.criteria)) {
+ i.analysis.calculate();
+ i.render.innerHTML = i.analysis.render();
+ i.ideal.innerHTML = i.analysis.ideal();
+ i.assessment.innerHTML = i.analysis.assess();
+ }
+
+ analysis.criteria.eyeColor.analysis.detect(
+ analysis.image,
+ Array.from(analysis.criteria.eyeColor.render.children).map(i => i.getContext("2d"))
+ );
+
+ summarizeResults(); // 👈 Score update
+ }
+
+ let render = () => {
+ analysis.resetToImage();
+ for (let i of Object.values(analysis.criteria)) {
+ if (i.toggle.checked) {
+ i.analysis.draw(ctx);
+ }
+ }
+ }
+
+ for (let i of Object.values(analysis.criteria)) {
+ i.toggle.onchange = () => render();
+ }
+
+ let moving = false;
+
+ canvas.onmousedown = ({ offsetX: x, offsetY: y }) => {
+ let necessaryPoints = Object.values(analysis.criteria).filter(i => i.toggle.checked).map(i => i.analysis.necessaryPoints()).flat();
+
+ for (let i in analysis.points) {
+ if (analysis.points.hasOwnProperty(i) && necessaryPoints.includes(i)) {
+ if (Math.sqrt(
+ (analysis.points[i][0] - x) ** 2
+ + (analysis.points[i][1] - y) ** 2
+ ) <= analysis.arcRadius) {
+ moving = i;
+ return;
+ }
+ }
+ }
+ }
+
+ canvas.ontouchstart = (e) => {
+ let bcr = e.target.getBoundingClientRect();
+ let x = e.targetTouches[0].clientX - bcr.x;
+ let y = e.targetTouches[0].clientY - bcr.y;
+ canvas.onmousedown({ offsetX: x, offsetY: y });
+ }
+
+ canvas.onmouseup = () => {
+ moving = false;
+ }
+
+ canvas.ontouchend = canvas.ontouchcancel = (e) => {
+ canvas.onmouseup();
+ }
+
+ canvas.onmousemove = ({ offsetX: x, offsetY: y }) => {
+ if (moving) {
+ analysis.points[moving] = [x, y];
+ calculate();
+ render();
+ } else {
+ let necessaryPoints = Object.values(analysis.criteria).filter(i => i.toggle.checked).map(i => i.analysis.necessaryPoints()).flat();
+
+ for (let i in analysis.points) {
+ if (analysis.points.hasOwnProperty(i) && necessaryPoints.includes(i)) {
+ if (Math.sqrt(
+ (analysis.points[i][0] - x) ** 2
+ + (analysis.points[i][1] - y) ** 2
+ ) <= analysis.arcRadius) {
+ render();
+ ctx.beginPath();
+ ctx.strokeStyle = "gray";
+ let oldLineWidth = ctx.lineWidth;
+ ctx.lineWidth = 0.5;
+ ctx.arc(analysis.points[i][0], analysis.points[i][1], ctx.arcRadius + 1.5, 0, 2 * Math.PI);
+ ctx.stroke();
+ ctx.lineWidth = oldLineWidth;
+ return;
+ }
+ }
+ }
+ render();
+ }
+ }
+
+ canvas.ontouchmove = (e) => {
+ let bcr = e.target.getBoundingClientRect();
+ let x = e.targetTouches[0].clientX - bcr.x;
+ let y = e.targetTouches[0].clientY - bcr.y;
+ canvas.onmousemove({ offsetX: x, offsetY: y });
+ }
+
+ analyzingElement.classList.add("d-none");
+
+ calculate();
+ render();
+ }
+
+ function clearData() {
+ canvas.width = 0;
+ canvas.height = 0;
+ for (let i of Object.values(data)) {
+ i.render.innerHTML = "";
+ i.ideal.innerHTML = "";
+ i.assessment.innerHTML = "";
+ }
+ }
+
+ setStatus = (text) => document.getElementById("analyzing-status").innerHTML = text;
+
+ document.querySelector("#loading").style.display = "none";
+ document.querySelector(".container").classList.remove("d-none");
+}
+
+async function analyze(canvas, ctx, url) {
+ setStatus("Loading image");
+
+ let image = await loadImage(url);
+
+ canvas.width = image.width;
+ canvas.height = image.height;
+ resetToImage(ctx, image);
+ ctx.lineWidth = Math.sqrt((image.width * image.height) / 100000);
+ ctx.arcRadius = Math.sqrt((image.width * image.height) / 100000);
+
+ setStatus("Analyzing");
+
+ const model = await faceLandmarksDetection.load(faceLandmarksDetection.SupportedPackages.mediapipeFacemesh, {
+ maxFaces: 1
+ });
+ let face = await findLandmarks(model, image);
+ let [points, criteria] = analyseCriteria(face);
+
+ return {
+ image,
+ resetToImage: () => resetToImage(ctx, image),
+ points,
+ criteria,
+ arcRadius: ctx.arcRadius,
+ };
+}
+
+function loadImage(url) {
+ return new Promise((resolve) => {
+ const image = new Image();
+ image.src = url;
+ image.addEventListener("load", () => resolve(image));
+ });
+}
+
+function resetToImage(ctx, image) {
+ ctx.drawImage(image, 0, 0);
+}
+
+async function findLandmarks(model, image) {
+ const predictions = await model.estimateFaces({ input: image });
+ if (predictions.length > 0) {
+ return predictions[0];
+ } else {
+ throw new Error("No face detected");
+ }
+}
+
+(async function () {
+ await main();
+})();
diff --git a/js/tf-backend-cpu.js b/js/tf-backend-cpu.js
new file mode 100644
index 0000000..539d2c5
--- /dev/null
+++ b/js/tf-backend-cpu.js
@@ -0,0 +1,9526 @@
+/**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@tensorflow/tfjs-core'), require('seedrandom')) :
+ typeof define === 'function' && define.amd ? define(['exports', '@tensorflow/tfjs-core', 'seedrandom'], factory) :
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.tf = global.tf || {}, global.tf, global.seedrandom));
+}(this, (function (exports, tfjsCore, seedrandom) { 'use strict';
+
+ function _interopNamespace(e) {
+ if (e && e.__esModule) return e;
+ var n = Object.create(null);
+ if (e) {
+ Object.keys(e).forEach(function (k) {
+ if (k !== 'default') {
+ var d = Object.getOwnPropertyDescriptor(e, k);
+ Object.defineProperty(n, k, d.get ? d : {
+ enumerable: true,
+ get: function () {
+ return e[k];
+ }
+ });
+ }
+ });
+ }
+ n['default'] = e;
+ return n;
+ }
+
+ var seedrandom__namespace = /*#__PURE__*/_interopNamespace(seedrandom);
+
+ /*! *****************************************************************************
+ Copyright (c) Microsoft Corporation.
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+ ***************************************************************************** */
+ /* global Reflect, Promise */
+ var extendStatics = function (d, b) {
+ extendStatics = Object.setPrototypeOf ||
+ ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
+ function (d, b) { for (var p in b)
+ if (b.hasOwnProperty(p))
+ d[p] = b[p]; };
+ return extendStatics(d, b);
+ };
+ function __extends(d, b) {
+ extendStatics(d, b);
+ function __() { this.constructor = d; }
+ d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
+ }
+ function __awaiter(thisArg, _arguments, P, generator) {
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
+ return new (P || (P = Promise))(function (resolve, reject) {
+ function fulfilled(value) { try {
+ step(generator.next(value));
+ }
+ catch (e) {
+ reject(e);
+ } }
+ function rejected(value) { try {
+ step(generator["throw"](value));
+ }
+ catch (e) {
+ reject(e);
+ } }
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
+ });
+ }
+ function __generator(thisArg, body) {
+ var _ = { label: 0, sent: function () { if (t[0] & 1)
+ throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
+ return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () { return this; }), g;
+ function verb(n) { return function (v) { return step([n, v]); }; }
+ function step(op) {
+ if (f)
+ throw new TypeError("Generator is already executing.");
+ while (_)
+ try {
+ if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done)
+ return t;
+ if (y = 0, t)
+ op = [op[0] & 2, t.value];
+ switch (op[0]) {
+ case 0:
+ case 1:
+ t = op;
+ break;
+ case 4:
+ _.label++;
+ return { value: op[1], done: false };
+ case 5:
+ _.label++;
+ y = op[1];
+ op = [0];
+ continue;
+ case 7:
+ op = _.ops.pop();
+ _.trys.pop();
+ continue;
+ default:
+ if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
+ _ = 0;
+ continue;
+ }
+ if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) {
+ _.label = op[1];
+ break;
+ }
+ if (op[0] === 6 && _.label < t[1]) {
+ _.label = t[1];
+ t = op;
+ break;
+ }
+ if (t && _.label < t[2]) {
+ _.label = t[2];
+ _.ops.push(op);
+ break;
+ }
+ if (t[2])
+ _.ops.pop();
+ _.trys.pop();
+ continue;
+ }
+ op = body.call(thisArg, _);
+ }
+ catch (e) {
+ op = [6, e];
+ y = 0;
+ }
+ finally {
+ f = t = 0;
+ }
+ if (op[0] & 5)
+ throw op[1];
+ return { value: op[0] ? op[1] : void 0, done: true };
+ }
+ }
+ function __values(o) {
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
+ if (m)
+ return m.call(o);
+ if (o && typeof o.length === "number")
+ return {
+ next: function () {
+ if (o && i >= o.length)
+ o = void 0;
+ return { value: o && o[i++], done: !o };
+ }
+ };
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
+ }
+ function __read(o, n) {
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
+ if (!m)
+ return o;
+ var i = m.call(o), r, ar = [], e;
+ try {
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done)
+ ar.push(r.value);
+ }
+ catch (error) {
+ e = { error: error };
+ }
+ finally {
+ try {
+ if (r && !r.done && (m = i["return"]))
+ m.call(i);
+ }
+ finally {
+ if (e)
+ throw e.error;
+ }
+ }
+ return ar;
+ }
+ function __spread() {
+ for (var ar = [], i = 0; i < arguments.length; i++)
+ ar = ar.concat(__read(arguments[i]));
+ return ar;
+ }
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function assertNotComplex(tensor, opName) {
+ if (!Array.isArray(tensor)) {
+ tensor = [tensor];
+ }
+ tensor.forEach(function (t) {
+ if (t != null) {
+ tfjsCore.util.assert(t.dtype !== 'complex64', function () { return opName + " does not support complex64 tensors in the CPU backend."; });
+ }
+ });
+ }
+
+ var whereImpl = tfjsCore.kernel_impls.whereImpl;
+ var MathBackendCPU = /** @class */ (function (_super) {
+ __extends(MathBackendCPU, _super);
+ function MathBackendCPU() {
+ var _this = _super.call(this) || this;
+ _this.blockSize = 48;
+ _this.firstUse = true;
+ _this.data = new tfjsCore.DataStorage(_this, tfjsCore.engine());
+ return _this;
+ }
+ MathBackendCPU.prototype.nextDataId = function () {
+ return MathBackendCPU.nextDataId++;
+ };
+ MathBackendCPU.prototype.write = function (values, shape, dtype) {
+ if (this.firstUse) {
+ this.firstUse = false;
+ if (tfjsCore.env().get('IS_NODE')) {
+ tfjsCore.backend_util.warn('\n============================\n' +
+ 'Hi there 👋. Looks like you are running TensorFlow.js in ' +
+ 'Node.js. To speed things up dramatically, install our node ' +
+ 'backend, which binds to TensorFlow C++, by running ' +
+ 'npm i @tensorflow/tfjs-node, ' +
+ 'or npm i @tensorflow/tfjs-node-gpu if you have CUDA. ' +
+ 'Then call require(\'@tensorflow/tfjs-node\'); (-gpu ' +
+ 'suffix for CUDA) at the start of your program. ' +
+ 'Visit https://github.com/tensorflow/tfjs-node for more details.' +
+ '\n============================');
+ }
+ }
+ var dataId = { id: this.nextDataId() };
+ this.data.set(dataId, { values: values, dtype: dtype, refCount: 1 });
+ return dataId;
+ };
+ /**
+ * Create a data bucket in cpu backend.
+ * @param shape Shape of the `TensorInfo`.
+ * @param dtype DType of the `TensorInfo`.
+ * @param values The value of the `TensorInfo` stored as a flattened array.
+ */
+ MathBackendCPU.prototype.makeTensorInfo = function (shape, dtype, values) {
+ var outId;
+ if (dtype === 'string' && values != null && values.length > 0 &&
+ tfjsCore.util.isString(values[0])) {
+ var encodedValues = values.map(function (d) { return tfjsCore.util.encodeString(d); });
+ outId = this.write(encodedValues, shape, dtype);
+ }
+ else {
+ outId = this.write(values, shape, dtype);
+ }
+ return { dataId: outId, shape: shape, dtype: dtype };
+ };
+ /** Return refCount of a `TensorData`. */
+ MathBackendCPU.prototype.refCount = function (dataId) {
+ if (this.data.has(dataId)) {
+ var tensorData = this.data.get(dataId);
+ return tensorData.refCount;
+ }
+ return 0;
+ };
+ /** Increase refCount of a `TensorData`. */
+ MathBackendCPU.prototype.incRef = function (dataId) {
+ var tensorData = this.data.get(dataId);
+ tensorData.refCount++;
+ };
+ /** Decrease refCount of a `TensorData`. */
+ MathBackendCPU.prototype.decRef = function (dataId) {
+ if (this.data.has(dataId)) {
+ var tensorData = this.data.get(dataId);
+ tensorData.refCount--;
+ }
+ };
+ MathBackendCPU.prototype.move = function (dataId, values, shape, dtype, refCount) {
+ this.data.set(dataId, { values: values, dtype: dtype, refCount: refCount });
+ };
+ MathBackendCPU.prototype.numDataIds = function () {
+ return this.data.numDataIds();
+ };
+ MathBackendCPU.prototype.read = function (dataId) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_b) {
+ return [2 /*return*/, this.readSync(dataId)];
+ });
+ });
+ };
+ MathBackendCPU.prototype.readSync = function (dataId) {
+ var _b = this.data.get(dataId), dtype = _b.dtype, complexTensorInfos = _b.complexTensorInfos;
+ if (dtype === 'complex64') {
+ var realValues = this.readSync(complexTensorInfos.real.dataId);
+ var imagValues = this.readSync(complexTensorInfos.imag.dataId);
+ return tfjsCore.backend_util.mergeRealAndImagArrays(realValues, imagValues);
+ }
+ return this.data.get(dataId).values;
+ };
+ MathBackendCPU.prototype.bufferSync = function (t) {
+ var data = this.readSync(t.dataId);
+ var decodedData = data;
+ if (t.dtype === 'string') {
+ try {
+ // Decode the bytes into string.
+ decodedData = data.map(function (d) { return tfjsCore.util.decodeString(d); });
+ }
+ catch (_a) {
+ throw new Error('Failed to decode encoded string bytes into utf-8');
+ }
+ }
+ return tfjsCore.buffer(t.shape, t.dtype, decodedData);
+ };
+ MathBackendCPU.prototype.makeOutput = function (values, shape, dtype) {
+ var dataId = this.write(values, shape, dtype);
+ return tfjsCore.engine().makeTensorFromDataId(dataId, shape, dtype, this);
+ };
+ /**
+ * Dispose the memory if the dataId has 0 refCount. Return true if the memory
+ * is released or memory is not managed in this backend, false if memory is
+ * not cleared.
+ * @param dataId
+ * @oaram force Optional, remove the data regardless of refCount
+ */
+ MathBackendCPU.prototype.disposeData = function (dataId, force) {
+ if (force === void 0) { force = false; }
+ if (this.data.has(dataId)) {
+ this.data.get(dataId).refCount--;
+ if (!force && this.data.get(dataId).refCount > 0) {
+ return false;
+ }
+ var complexTensorInfos = this.data.get(dataId).complexTensorInfos;
+ if (complexTensorInfos != null) {
+ this.disposeData(complexTensorInfos.real.dataId, true);
+ this.disposeData(complexTensorInfos.imag.dataId, true);
+ }
+ this.data.delete(dataId);
+ }
+ return true;
+ };
+ MathBackendCPU.prototype.disposeIntermediateTensorInfo = function (tensorInfo) {
+ this.disposeData(tensorInfo.dataId);
+ };
+ MathBackendCPU.prototype.time = function (f) {
+ return __awaiter(this, void 0, void 0, function () {
+ var start, kernelMs;
+ return __generator(this, function (_b) {
+ start = tfjsCore.util.now();
+ f();
+ kernelMs = tfjsCore.util.now() - start;
+ return [2 /*return*/, { kernelMs: kernelMs }];
+ });
+ });
+ };
+ MathBackendCPU.prototype.memory = function () {
+ return {
+ // Unreliable due to automatic gc. The numbers above are cumulative.
+ unreliable: true,
+ reasons: ['The reported memory is an upper bound. Due to automatic garbage ' +
+ 'collection, the true allocated memory may be less.']
+ };
+ };
+ MathBackendCPU.prototype.where = function (condition) {
+ assertNotComplex([condition], 'where');
+ var condVals = this.readSync(condition.dataId);
+ return whereImpl(condition.shape, condVals);
+ };
+ MathBackendCPU.prototype.dispose = function () { };
+ MathBackendCPU.prototype.floatPrecision = function () {
+ return 32;
+ };
+ /** Returns the smallest representable number. */
+ MathBackendCPU.prototype.epsilon = function () {
+ return _super.prototype.epsilon.call(this);
+ };
+ return MathBackendCPU;
+ }(tfjsCore.KernelBackend));
+ MathBackendCPU.nextDataId = 0;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function simpleAbsImpl(vals) {
+ var resultValues = new Float32Array(vals.length);
+ for (var i = 0; i < vals.length; ++i) {
+ resultValues[i] = Math.abs(vals[i]);
+ }
+ return resultValues;
+ }
+ var abs = function (args) {
+ var x = args.inputs.x;
+ var cpuBackend = args.backend;
+ assertNotComplex(x, 'abs');
+ var resultValues = new Float32Array(tfjsCore.util.sizeFromShape(x.shape));
+ var values = cpuBackend.data.get(x.dataId).values;
+ resultValues = simpleAbsImpl(values);
+ return cpuBackend.makeOutput(resultValues, x.shape, x.dtype);
+ };
+ var absConfig = {
+ kernelName: tfjsCore.Abs,
+ backendName: 'cpu',
+ kernelFunc: abs,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Template that creates implementation for binary ops. Supports broadcast.
+ */
+ function createSimpleBinaryKernelImpl(op) {
+ return function (aShape, bShape, aVals, bVals, dtype) {
+ var newShape = tfjsCore.backend_util.assertAndGetBroadcastShape(aShape, bShape);
+ var resultRank = newShape.length;
+ var resultStrides = tfjsCore.util.computeStrides(newShape);
+ var resultSize = tfjsCore.util.sizeFromShape(newShape);
+ var result = tfjsCore.util.getTypedArrayFromDType(dtype, resultSize);
+ var aRank = aShape.length;
+ var bRank = bShape.length;
+ var aStrides = tfjsCore.util.computeStrides(aShape);
+ var bStrides = tfjsCore.util.computeStrides(bShape);
+ var aBroadcastDims = tfjsCore.backend_util.getBroadcastDims(aShape, newShape);
+ var bBroadcastDims = tfjsCore.backend_util.getBroadcastDims(bShape, newShape);
+ if (aBroadcastDims.length + bBroadcastDims.length === 0) {
+ for (var i = 0; i < result.length; ++i) {
+ result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]);
+ }
+ }
+ else {
+ var _loop_1 = function (i) {
+ var loc = tfjsCore.util.indexToLoc(i, resultRank, resultStrides);
+ var aLoc = loc.slice(-aRank);
+ aBroadcastDims.forEach(function (d) { return aLoc[d] = 0; });
+ var aIndex = tfjsCore.util.locToIndex(aLoc, aRank, aStrides);
+ var bLoc = loc.slice(-bRank);
+ bBroadcastDims.forEach(function (d) { return bLoc[d] = 0; });
+ var bIndex = tfjsCore.util.locToIndex(bLoc, bRank, bStrides);
+ result[i] = op(aVals[aIndex], bVals[bIndex]);
+ };
+ for (var i = 0; i < result.length; ++i) {
+ _loop_1(i);
+ }
+ }
+ return [result, newShape];
+ };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function complex(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var real = inputs.real, imag = inputs.imag;
+ var realVals = backend.data.get(real.dataId).values;
+ var imagVals = backend.data.get(imag.dataId).values;
+ var complexInfo = backend.makeTensorInfo(real.shape, 'complex64');
+ var complex = backend.data.get(complexInfo.dataId);
+ // The complex tensor owns the underlying real and imag tensorInfos, only the
+ // complex tensor tracks refCount, when complexData is disposed the
+ // underlying tensorData will be disposed.
+ complex.complexTensorInfos = {
+ real: backend.makeTensorInfo(real.shape, 'float32', realVals),
+ imag: backend.makeTensorInfo(imag.shape, 'float32', imagVals)
+ };
+ return complexInfo;
+ }
+ var complexConfig = {
+ kernelName: tfjsCore.Complex,
+ backendName: 'cpu',
+ kernelFunc: complex
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generates a tensorInfo with all zeros value.
+ * @param backend cpu backend.
+ * @param shape Shape for the zeros tensor.
+ * @param dtype Optional. If set, the result has this dtype.
+ */
+ function zeros(backend, shape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (dtype === 'complex64') {
+ var real = zeros(backend, shape, 'float32');
+ var imag = zeros(backend, shape, 'float32');
+ return complex({ inputs: { real: real, imag: imag }, backend: backend });
+ }
+ var values = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(shape), dtype);
+ return backend.makeTensorInfo(shape, dtype, values);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function identity(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var x = inputs.x;
+ backend.incRef(x.dataId);
+ return { dataId: x.dataId, shape: x.shape, dtype: x.dtype };
+ }
+ var identityConfig = {
+ kernelName: tfjsCore.Identity,
+ backendName: 'cpu',
+ kernelFunc: identity
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function real(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var input = inputs.input;
+ var real = backend.data.get(input.dataId).complexTensorInfos.real;
+ var realVal = backend.data.get(real.dataId).values;
+ // When complex tensor is disposed, its underlying parts will be disposed too.
+ // Make new tensor out of the real value of the complex. This makes sure the
+ // value is still accessible even if complex tensor is disposed.
+ return backend.makeTensorInfo(real.shape, real.dtype, realVal);
+ }
+ var realConfig = {
+ kernelName: tfjsCore.Real,
+ backendName: 'cpu',
+ kernelFunc: real
+ };
+
+ function cast(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var dtype = attrs.dtype;
+ // Casting to complex64.
+ if (dtype === 'complex64') {
+ if (x.dtype === 'complex64') {
+ return identity({ inputs: { x: x }, backend: backend });
+ }
+ var zerosTensorInfo = zeros(backend, x.shape, x.dtype);
+ var floatX = cast({ inputs: { x: x }, backend: backend, attrs: { dtype: 'float32' } });
+ var result = complex({ inputs: { real: floatX, imag: zerosTensorInfo }, backend: backend });
+ backend.disposeIntermediateTensorInfo(zerosTensorInfo);
+ backend.disposeIntermediateTensorInfo(floatX);
+ return result;
+ }
+ // Casting from complex64
+ if (x.dtype === 'complex64') {
+ var realPart = real({ inputs: { input: x }, backend: backend });
+ var result = cast({ inputs: { x: realPart }, backend: backend, attrs: { dtype: dtype } });
+ backend.disposeIntermediateTensorInfo(realPart);
+ return result;
+ }
+ if (!tfjsCore.util.hasEncodingLoss(x.dtype, dtype)) {
+ // We don't change the underlying data, since we cast to higher
+ // precision.
+ var result = identity({ inputs: { x: x }, backend: backend });
+ return { dataId: result.dataId, shape: result.shape, dtype: dtype };
+ }
+ if (dtype === 'int32') {
+ var values = backend.data.get(x.dataId).values;
+ var resultValues = Int32Array.from(values);
+ return backend.makeTensorInfo(x.shape, 'int32', resultValues);
+ }
+ if (dtype === 'bool') {
+ // This is essentially the result of notEqual(x, 0). We avoid using
+ // kernel notEqual to avoid circular dependency, i.e. binary_utils ->
+ // cast -> notEqual -> binary_utils.
+ var xVals = backend.data.get(x.dataId).values;
+ var zero = tfjsCore.util.toTypedArray([0], x.dtype);
+ var _a = __read(createSimpleBinaryKernelImpl(function (a, b) { return (a !== b) ? 1 : 0; })(x.shape, [], xVals, zero, 'bool'), 2), resultData = _a[0], resultShape = _a[1];
+ return backend.makeTensorInfo(resultShape, 'bool', resultData);
+ }
+ throw new Error("Error in Cast: failed to cast " + x.dtype + " to " + dtype);
+ }
+ var castConfig = {
+ kernelName: tfjsCore.Cast,
+ backendName: 'cpu',
+ kernelFunc: cast
+ };
+
+ /**
+ * Template that creates a `KernelFunc` for binary ops.
+ * @param name Kernel name.
+ * @param binaryKernelImpl A `SimpleBinaryKernelImpl` for the kernel.
+ * @param binaryKernelComplexImpl Optional. If exists, represents a
+ * `ComplexBinaryKernelImpl` for the kernel, will be used when input dtype
+ * is `complex64`.
+ * @param dtype Optional. If set, the result has this dtype. Otherwise, the
+ * result has the same dtype as the first input. This is mainly used in
+ * comparison kernels, such as Equal, Less, Greater, etc.
+ */
+ function binaryKernelFunc(name, simpleImpl, complexImpl, dtype) {
+ if (complexImpl == null) {
+ return function (_a) {
+ var inputs = _a.inputs, backend = _a.backend;
+ var a = inputs.a, b = inputs.b;
+ var cpuBackend = backend;
+ assertNotComplex([a, b], name);
+ var aVals = cpuBackend.data.get(a.dataId).values;
+ var bVals = cpuBackend.data.get(b.dataId).values;
+ var decodedAVals = a.dtype === 'string' ?
+ // tslint:disable-next-line: no-any
+ tfjsCore.backend_util.fromUint8ToStringArray(aVals) :
+ aVals;
+ var decodedBVals = a.dtype === 'string' ?
+ // tslint:disable-next-line: no-any
+ tfjsCore.backend_util.fromUint8ToStringArray(bVals) :
+ bVals;
+ var $dtype = dtype || a.dtype;
+ var _b = __read(simpleImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype), 2), resultData = _b[0], resultShape = _b[1];
+ return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData);
+ };
+ }
+ return function (_a) {
+ var inputs = _a.inputs, backend = _a.backend;
+ var a = inputs.a, b = inputs.b;
+ var cpuBackend = backend;
+ if (a.dtype === 'complex64' || b.dtype === 'complex64') {
+ var $aComplex = cast({ inputs: { x: a }, backend: cpuBackend, attrs: { dtype: 'complex64' } });
+ var $aComplexVals = cpuBackend.data.get($aComplex.dataId);
+ var aReal = $aComplexVals.complexTensorInfos.real;
+ var aImag = $aComplexVals.complexTensorInfos.imag;
+ var aRealVals = cpuBackend.data.get(aReal.dataId).values;
+ var aImagVals = cpuBackend.data.get(aImag.dataId).values;
+ var $bComplex = cast({ inputs: { x: b }, backend: cpuBackend, attrs: { dtype: 'complex64' } });
+ var $bComplexVals = cpuBackend.data.get($bComplex.dataId);
+ var bReal = $bComplexVals.complexTensorInfos.real;
+ var bImag = $bComplexVals.complexTensorInfos.imag;
+ var bRealVals = cpuBackend.data.get(bReal.dataId).values;
+ var bImagVals = cpuBackend.data.get(bImag.dataId).values;
+ var _b = __read(complexImpl(a.shape, b.shape, aRealVals, aImagVals, bRealVals, bImagVals), 3), resultRealData = _b[0], resultImagData = _b[1], resultShape = _b[2];
+ var resultReal = cpuBackend.makeTensorInfo(resultShape, 'float32', resultRealData);
+ var resultImag = cpuBackend.makeTensorInfo(resultShape, 'float32', resultImagData);
+ var result = complex({ inputs: { real: resultReal, imag: resultImag }, backend: cpuBackend });
+ cpuBackend.disposeIntermediateTensorInfo($aComplex);
+ cpuBackend.disposeIntermediateTensorInfo($bComplex);
+ cpuBackend.disposeIntermediateTensorInfo(resultReal);
+ cpuBackend.disposeIntermediateTensorInfo(resultImag);
+ return result;
+ }
+ else {
+ var aVals = cpuBackend.data.get(a.dataId).values;
+ var bVals = cpuBackend.data.get(b.dataId).values;
+ var $dtype = dtype || a.dtype;
+ var _c = __read(simpleImpl(a.shape, b.shape, aVals, bVals, $dtype), 2), resultData = _c[0], resultShape = _c[1];
+ return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData);
+ }
+ };
+ }
+ /**
+ * Template that creates the complex type implementation for binary ops.
+ * Supports broadcast.
+ */
+ function createComplexBinaryKernelImpl(op) {
+ return function (aShape, bShape, aRealVals, aImagVals, bRealVals, bImagVals) {
+ var resultShape = tfjsCore.backend_util.assertAndGetBroadcastShape(aShape, bShape);
+ var resultSize = tfjsCore.util.sizeFromShape(resultShape);
+ var resultRank = resultShape.length;
+ var resultStrides = tfjsCore.util.computeStrides(resultShape);
+ var resultRealVals = tfjsCore.util.getTypedArrayFromDType('float32', resultSize);
+ var resultImagVals = tfjsCore.util.getTypedArrayFromDType('float32', resultSize);
+ var aBroadcastDims = tfjsCore.backend_util.getBroadcastDims(aShape, resultShape);
+ var bBroadcastDims = tfjsCore.backend_util.getBroadcastDims(bShape, resultShape);
+ var aVals = tfjsCore.backend_util.mergeRealAndImagArrays(aRealVals, aImagVals);
+ var bVals = tfjsCore.backend_util.mergeRealAndImagArrays(bRealVals, bImagVals);
+ var aRank = aShape.length;
+ var aStrides = tfjsCore.util.computeStrides(aShape);
+ var bRank = bShape.length;
+ var bStrides = tfjsCore.util.computeStrides(bShape);
+ if (aBroadcastDims.length + bBroadcastDims.length === 0) {
+ for (var i = 0; i < resultRealVals.length; i++) {
+ var aIdx = i % aVals.length;
+ var bIdx = i % bVals.length;
+ var result = op(aVals[aIdx * 2], aVals[aIdx * 2 + 1], bVals[bIdx * 2], bVals[bIdx * 2 + 1]);
+ resultRealVals[i] = result.real;
+ resultImagVals[i] = result.imag;
+ }
+ }
+ else {
+ var _loop_1 = function (i) {
+ var loc = tfjsCore.util.indexToLoc(i, resultRank, resultStrides);
+ var aLoc = loc.slice(-aRank);
+ aBroadcastDims.forEach(function (d) { return aLoc[d] = 0; });
+ var aIndex = tfjsCore.util.locToIndex(aLoc, aRank, aStrides);
+ var bLoc = loc.slice(-bRank);
+ bBroadcastDims.forEach(function (d) { return bLoc[d] = 0; });
+ var bIndex = tfjsCore.util.locToIndex(bLoc, bRank, bStrides);
+ var opResult = op(aVals[aIndex * 2], aVals[aIndex * 2 + 1], bVals[bIndex * 2], bVals[bIndex * 2 + 1]);
+ resultRealVals[i] = opResult.real;
+ resultImagVals[i] = opResult.imag;
+ };
+ for (var i = 0; i < resultRealVals.length; i++) {
+ _loop_1(i);
+ }
+ }
+ return [resultRealVals, resultImagVals, resultShape];
+ };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var addImpl = createSimpleBinaryKernelImpl((function (a, b) { return a + b; }));
+ var addComplexImpl = createComplexBinaryKernelImpl((function (aReal, aImag, bReal, bImag) {
+ return { real: aReal + bReal, imag: aImag + bImag };
+ }));
+ var add = binaryKernelFunc(tfjsCore.Add, addImpl, addComplexImpl);
+ var addConfig = {
+ kernelName: tfjsCore.Add,
+ backendName: 'cpu',
+ kernelFunc: add
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function bincountImpl(xVals, weightsVals, weightsDtype, weightsShape, size) {
+ var weightsSize = tfjsCore.util.sizeFromShape(weightsShape);
+ var outVals = tfjsCore.util.makeZerosTypedArray(size, weightsDtype);
+ for (var i = 0; i < xVals.length; i++) {
+ var value = xVals[i];
+ if (value < 0) {
+ throw new Error('Input x must be non-negative!');
+ }
+ if (value >= size) {
+ continue;
+ }
+ if (weightsSize > 0) {
+ outVals[value] += weightsVals[i];
+ }
+ else {
+ outVals[value] += 1;
+ }
+ }
+ return outVals;
+ }
+ function bincountReduceImpl(xBuf, weightsBuf, size, binaryOutput) {
+ if (binaryOutput === void 0) { binaryOutput = false; }
+ var numRows = xBuf.shape[0];
+ var numCols = xBuf.shape[1];
+ var outBuf = tfjsCore.buffer([numRows, size], weightsBuf.dtype);
+ for (var i = 0; i < numRows; i++) {
+ for (var j = 0; j < numCols; j++) {
+ var value = xBuf.get(i, j);
+ if (value < 0) {
+ throw new Error('Input x must be non-negative!');
+ }
+ if (value >= size) {
+ continue;
+ }
+ if (binaryOutput) {
+ outBuf.set(1, i, value);
+ }
+ else {
+ if (weightsBuf.size > 0) {
+ outBuf.set(outBuf.get(i, value) + weightsBuf.get(i, j), i, value);
+ }
+ else {
+ outBuf.set(outBuf.get(i, value) + 1, i, value);
+ }
+ }
+ }
+ }
+ return outBuf;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Template that creates implementation for unary op.
+ */
+ function createSimpleUnaryImpl(op) {
+ return function (values, dtype, attrs) {
+ var newValues = tfjsCore.util.getTypedArrayFromDType(dtype, values.length);
+ for (var i = 0; i < values.length; ++i) {
+ newValues[i] = op(values[i], attrs);
+ }
+ return newValues;
+ };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Template that creates a `KernelFunc` for unary ops.
+ * @param name Kernel name.
+ * @param op A `SimpleUnaryOperation` for the kernel.
+ * @param dtype Optional. If set, the result has this dtype. Otherwise, the
+ * result has the same dtype as the input. This is mainly used in certain
+ * kernels that return bool type, such as isFinite, isInf, etc.
+ */
+ function unaryKernelFunc(name, op, dtype) {
+ return function (_a) {
+ var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend;
+ var x = inputs.x;
+ assertNotComplex(x, name);
+ if (x.dtype === 'string' || dtype === 'string') {
+ throw new Error('unaryKernelFunc does not support string input/output');
+ }
+ var cpuBackend = backend;
+ var values = cpuBackend.data.get(x.dataId).values;
+ var xSize = tfjsCore.util.sizeFromShape(x.shape);
+ var $dtype = dtype || x.dtype;
+ var newValues = tfjsCore.util.getArrayFromDType($dtype, xSize);
+ for (var i = 0; i < xSize; ++i) {
+ newValues[i] = op(values[i], attrs);
+ }
+ return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues);
+ };
+ }
+ /**
+ * Template that creates a `KernelFunc` for unary ops from the given
+ * `SimpleUnaryImpl`..
+ * @param name Kernel name.
+ * @param unaryImpl A `SimpleUnaryImpl` that implements the op.
+ * @param dtype Optional. If set, the result has this dtype. Otherwise, the
+ * result has the same dtype as the input. This is mainly used in certain
+ * kernels that return bool type, such as isFinite, isInf, etc.
+ */
+ function unaryKernelFuncFromImpl(name, unaryImpl, dtype) {
+ return function (_a) {
+ var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend;
+ var x = inputs.x;
+ assertNotComplex(x, name);
+ if (x.dtype === 'string' || dtype === 'string') {
+ throw new Error('unaryKernelFunc does not support string input/output');
+ }
+ var cpuBackend = backend;
+ var values = cpuBackend.data.get(x.dataId).values;
+ var $dtype = dtype || x.dtype;
+ var newValues = unaryImpl(values, $dtype, attrs);
+ return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues);
+ };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var ceilImpl = createSimpleUnaryImpl(function (xi) { return Math.ceil(xi); });
+ var ceil = unaryKernelFuncFromImpl(tfjsCore.Ceil, ceilImpl);
+ var ceilConfig = {
+ kernelName: tfjsCore.Ceil,
+ backendName: 'cpu',
+ kernelFunc: ceil,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function concatImpl(inputs, outShape, dtype, simplyConcat) {
+ var outVals = tfjsCore.util.getArrayFromDType(dtype, tfjsCore.util.sizeFromShape(outShape));
+ if (simplyConcat && dtype !== 'string') {
+ // Use built-in TypedArray.set() method for speed.
+ var offset_1 = 0;
+ inputs.forEach(function (input) {
+ var size = tfjsCore.util.sizeFromShape(input.shape);
+ outVals.set(input.vals, offset_1);
+ offset_1 += size;
+ });
+ }
+ else {
+ var colOffset_1 = 0;
+ inputs.forEach(function (input) {
+ var decodedData = dtype === 'string' ?
+ tfjsCore.backend_util.fromUint8ToStringArray(input.vals) :
+ input.vals;
+ var tIdx = 0;
+ for (var row = 0; row < input.shape[0]; ++row) {
+ var resIdx = row * outShape[1] + colOffset_1;
+ for (var col = 0; col < input.shape[1]; ++col) {
+ outVals[resIdx + col] = decodedData[tIdx++];
+ }
+ }
+ colOffset_1 += input.shape[1];
+ });
+ }
+ return outVals;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var equalImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a === b) ? 1 : 0; });
+ var equal = binaryKernelFunc(tfjsCore.Equal, equalImpl, null /* complexImpl */, 'bool');
+ var equalConfig = {
+ kernelName: tfjsCore.Equal,
+ backendName: 'cpu',
+ kernelFunc: equal
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var expImpl = createSimpleUnaryImpl(function (xi) { return Math.exp(xi); });
+ var exp = unaryKernelFuncFromImpl(tfjsCore.Exp, expImpl, 'float32');
+ var expConfig = {
+ kernelName: tfjsCore.Exp,
+ backendName: 'cpu',
+ kernelFunc: exp,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var expm1Impl = createSimpleUnaryImpl(function (xi) { return Math.expm1(xi); });
+ var expm1 = unaryKernelFuncFromImpl(tfjsCore.Expm1, expm1Impl);
+ var expm1Config = {
+ kernelName: tfjsCore.Expm1,
+ backendName: 'cpu',
+ kernelFunc: expm1,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var floorImpl = createSimpleUnaryImpl(function (xi) { return Math.floor(xi); });
+ var floor = unaryKernelFuncFromImpl(tfjsCore.Floor, floorImpl);
+ var floorConfig = {
+ kernelName: tfjsCore.Floor,
+ backendName: 'cpu',
+ kernelFunc: floor,
+ };
+
+ function gatherNdImpl(indicesData, paramsBuf, dtype, numSlices, sliceRank, sliceSize, strides, paramsShape, paramsSize) {
+ var outBuf = tfjsCore.buffer([numSlices, sliceSize], dtype);
+ for (var i = 0; i < numSlices; i++) {
+ var index = [];
+ var flattenIndex = 0;
+ for (var j = 0; j < sliceRank; j++) {
+ var dim = indicesData[i * sliceRank + j];
+ flattenIndex += dim * strides[j];
+ index.push(dim);
+ }
+ if (flattenIndex < 0 || flattenIndex >= paramsSize / sliceSize) {
+ throw new Error("Invalid indices: " + index + " does not index into " + paramsShape);
+ }
+ for (var k = 0; k < sliceSize; k++) {
+ outBuf.values[i * sliceSize + k] = paramsBuf.get.apply(paramsBuf, __spread(paramsBuf.indexToLoc(flattenIndex * sliceSize + k)));
+ }
+ }
+ return outBuf;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function gatherV2Impl(xBuf, indicesBuf, flattenOutputShape) {
+ var outBuf = tfjsCore.buffer(flattenOutputShape, xBuf.dtype);
+ for (var i = 0; i < outBuf.size; ++i) {
+ var newLoc = outBuf.indexToLoc(i);
+ var originalLoc = newLoc.slice();
+ var batchIdx = originalLoc[0];
+ var indicesIdx = originalLoc[2];
+ var indicesIndex = indicesBuf.locToIndex([batchIdx, indicesIdx]);
+ originalLoc[2] = indicesBuf.values[indicesIndex];
+ var originalIndex = xBuf.locToIndex(originalLoc);
+ outBuf.values[i] = xBuf.values[originalIndex];
+ }
+ return outBuf;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var greaterImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a > b) ? 1 : 0; });
+ var greater = binaryKernelFunc(tfjsCore.Greater, greaterImpl, null /* complexImpl */, 'bool');
+ var greaterConfig = {
+ kernelName: tfjsCore.Greater,
+ backendName: 'cpu',
+ kernelFunc: greater
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var greaterEqualImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a >= b) ? 1 : 0; });
+ var greaterEqual = binaryKernelFunc(tfjsCore.GreaterEqual, greaterEqualImpl, null /* complexImpl */, 'bool');
+ var greaterEqualConfig = {
+ kernelName: tfjsCore.GreaterEqual,
+ backendName: 'cpu',
+ kernelFunc: greaterEqual
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var lessImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a < b) ? 1 : 0; });
+ var less = binaryKernelFunc(tfjsCore.Less, lessImpl, null /* complexImpl */, 'bool');
+ var lessConfig = {
+ kernelName: tfjsCore.Less,
+ backendName: 'cpu',
+ kernelFunc: less
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var lessEqualImpl = createSimpleBinaryKernelImpl(function (a, b) { return (a <= b) ? 1 : 0; });
+ var lessEqual = binaryKernelFunc(tfjsCore.LessEqual, lessEqualImpl, null /* complexImpl */, 'bool');
+ var lessEqualConfig = {
+ kernelName: tfjsCore.LessEqual,
+ backendName: 'cpu',
+ kernelFunc: lessEqual
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function linSpaceImpl(start, stop, num) {
+ var step = (stop - start) / (num - 1);
+ var values = tfjsCore.util.makeZerosTypedArray(num, 'float32');
+ values[0] = start;
+ for (var i = 1; i < values.length; i++) {
+ values[i] = values[i - 1] + step;
+ }
+ return values;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var logImpl = createSimpleUnaryImpl(function (xi) { return Math.log(xi); });
+ var log = unaryKernelFuncFromImpl(tfjsCore.Log, logImpl);
+ var logConfig = {
+ kernelName: tfjsCore.Log,
+ backendName: 'cpu',
+ kernelFunc: log,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function maxImpl(aVals, reduceSize, outShape, dtype) {
+ var vals = tfjsCore.util.getTypedArrayFromDType(dtype, tfjsCore.util.sizeFromShape(outShape));
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var max = aVals[offset];
+ for (var j = 0; j < reduceSize; ++j) {
+ var value = aVals[offset + j];
+ if (Number.isNaN(value) ||
+ value > max) { // comparison with NaN always return false
+ max = value;
+ }
+ }
+ vals[i] = max;
+ }
+ return vals;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var maximumImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return Math.max(aValue, bValue); }));
+ var maximum = binaryKernelFunc(tfjsCore.Maximum, maximumImpl);
+ var maximumConfig = {
+ kernelName: tfjsCore.Maximum,
+ backendName: 'cpu',
+ kernelFunc: maximum
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var minimumImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return Math.min(aValue, bValue); }));
+ var minimum = binaryKernelFunc(tfjsCore.Minimum, minimumImpl);
+ var minimumConfig = {
+ kernelName: tfjsCore.Minimum,
+ backendName: 'cpu',
+ kernelFunc: minimum
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var multiplyImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return aValue * bValue; }));
+ var multiplyComplexImpl = createComplexBinaryKernelImpl((function (aReal, aImag, bReal, bImag) {
+ return {
+ real: aReal * bReal - aImag * bImag,
+ imag: aReal * bImag + aImag * bReal
+ };
+ }));
+ var multiply = binaryKernelFunc(tfjsCore.Multiply, multiplyImpl, multiplyComplexImpl);
+ var multiplyConfig = {
+ kernelName: tfjsCore.Multiply,
+ backendName: 'cpu',
+ kernelFunc: multiply
+ };
+
+ function negImpl(xVals, xShape, xDtype) {
+ var minusOne = tfjsCore.util.createScalarValue(-1, xDtype);
+ return multiplyImpl([], xShape, minusOne, xVals, xDtype);
+ }
+ function neg(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var x = inputs.x;
+ assertNotComplex(x, 'neg');
+ var xVals = backend.data.get(x.dataId).values;
+ var _a = __read(negImpl(xVals, x.shape, x.dtype), 2), res = _a[0], newShape = _a[1];
+ return backend.makeTensorInfo(newShape, x.dtype, res);
+ }
+ var negConfig = {
+ kernelName: tfjsCore.Neg,
+ backendName: 'cpu',
+ kernelFunc: neg
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var notEqualImpl = createSimpleBinaryKernelImpl((function (a, b) { return (a !== b) ? 1 : 0; }));
+ var notEqual = binaryKernelFunc(tfjsCore.NotEqual, notEqualImpl, null /* complexOp */, 'bool');
+ var notEqualConfig = {
+ kernelName: tfjsCore.NotEqual,
+ backendName: 'cpu',
+ kernelFunc: notEqual
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function transposeImpl(xVals, xShape, dtype, perm, newShape) {
+ var xRank = xShape.length;
+ var xSize = tfjsCore.util.sizeFromShape(xShape);
+ var xStrides = tfjsCore.util.computeStrides(xShape);
+ var newStrides = tfjsCore.util.computeStrides(newShape);
+ var result = tfjsCore.util.getTypedArrayFromDType(dtype, tfjsCore.util.sizeFromShape(newShape));
+ for (var i = 0; i < xSize; ++i) {
+ var loc = tfjsCore.util.indexToLoc(i, xRank, xStrides);
+ // Permute location.
+ var newLoc = new Array(loc.length);
+ for (var i_1 = 0; i_1 < newLoc.length; i_1++) {
+ newLoc[i_1] = loc[perm[i_1]];
+ }
+ var newIndex = tfjsCore.util.locToIndex(newLoc, xRank, newStrides);
+ result[newIndex] = xVals[i];
+ }
+ return result;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function transpose(args) {
+ var inputs = args.inputs, attrs = args.attrs, backend = args.backend;
+ var x = inputs.x;
+ var perm = attrs.perm;
+ assertNotComplex(x, 'transpose');
+ var xRank = x.shape.length;
+ var newShape = new Array(xRank);
+ for (var i = 0; i < newShape.length; i++) {
+ newShape[i] = x.shape[perm[i]];
+ }
+ var values = backend.data.get(x.dataId).values;
+ var result = transposeImpl(values, x.shape, x.dtype, perm, newShape);
+ var dataId = backend.write(result, newShape, x.dtype);
+ return { dataId: dataId, shape: newShape, dtype: x.dtype };
+ }
+ var transposeConfig = {
+ kernelName: tfjsCore.Transpose,
+ backendName: 'cpu',
+ kernelFunc: transpose
+ };
+
+ function prodImpl(xShape, xDtype, xVals, reductionAxes) {
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes(xShape, reductionAxes), 2), outShape = _a[0], reduceShape = _a[1];
+ var outDtype = tfjsCore.upcastType(xDtype, 'int32');
+ var outVals = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(outShape), outDtype);
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ for (var i = 0; i < outVals.length; ++i) {
+ var offset = i * reduceSize;
+ var prod_1 = 1;
+ for (var j = 0; j < reduceSize; ++j) {
+ prod_1 *= xVals[offset + j];
+ }
+ outVals[i] = prod_1;
+ }
+ return { outVals: outVals, outShape: outShape, outDtype: outDtype };
+ }
+ function prod(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, keepDims = attrs.keepDims;
+ assertNotComplex(x, 'prod');
+ var xRank = x.shape.length;
+ var axes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var permutation = tfjsCore.backend_util.getAxesPermutation(axes, xRank);
+ var reductionAxes = axes;
+ var permutedX = x;
+ var intermediateTensorInfos = [];
+ if (permutation != null) {
+ permutedX = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutation } });
+ intermediateTensorInfos.push(permutedX);
+ reductionAxes = tfjsCore.backend_util.getInnerMostAxes(reductionAxes.length, xRank);
+ }
+ var xVals = backend.data.get(permutedX.dataId).values;
+ var _a = prodImpl(permutedX.shape, permutedX.dtype, xVals, reductionAxes), outVals = _a.outVals, outShape = _a.outShape, outDtype = _a.outDtype;
+ var resultShape = outShape;
+ if (keepDims) {
+ resultShape = tfjsCore.backend_util.expandShapeToKeepDim(outShape, axes);
+ }
+ intermediateTensorInfos.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return backend.makeTensorInfo(resultShape, outDtype, outVals);
+ }
+ var prodConfig = {
+ kernelName: tfjsCore.Prod,
+ backendName: 'cpu',
+ kernelFunc: prod
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function rangeImpl(start, stop, step, dtype) {
+ var sameStartStop = start === stop;
+ var increasingRangeNegativeStep = start < stop && step < 0;
+ var decreasingRangePositiveStep = stop < start && step > 1;
+ if (sameStartStop || increasingRangeNegativeStep ||
+ decreasingRangePositiveStep) {
+ return tfjsCore.util.makeZerosTypedArray(0, dtype);
+ }
+ var numElements = Math.abs(Math.ceil((stop - start) / step));
+ var values = tfjsCore.util.makeZerosTypedArray(numElements, dtype);
+ if (stop < start && step === 1) {
+ // Auto adjust the step's sign if it hasn't been set
+ // (or was set to 1)
+ step = -1;
+ }
+ values[0] = start;
+ for (var i = 1; i < values.length; i++) {
+ values[i] = values[i - 1] + step;
+ }
+ return values;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var rsqrtImpl = createSimpleUnaryImpl(function (xi) { return 1 / Math.sqrt(xi); });
+ var rsqrt = unaryKernelFuncFromImpl(tfjsCore.Rsqrt, rsqrtImpl);
+ var rsqrtConfig = {
+ kernelName: tfjsCore.Rsqrt,
+ backendName: 'cpu',
+ kernelFunc: rsqrt,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var sigmoidImpl = createSimpleUnaryImpl(function (xi) { return 1 / (1 + Math.exp(-xi)); });
+ var sigmoid = unaryKernelFunc(tfjsCore.Sigmoid, function (xi) { return 1 / (1 + Math.exp(-xi)); });
+ var sigmoidConfig = {
+ kernelName: tfjsCore.Sigmoid,
+ backendName: 'cpu',
+ kernelFunc: sigmoid,
+ };
+
+ function sliceImpl(vals, begin, size, shape, dtype) {
+ var isContinous = tfjsCore.slice_util.isSliceContinous(shape, begin, size);
+ var length = tfjsCore.util.sizeFromShape(size);
+ var xStrides = tfjsCore.util.computeStrides(shape);
+ if (isContinous) {
+ var flatOffset = tfjsCore.slice_util.computeFlatOffset(begin, xStrides);
+ if (dtype === 'string') {
+ return vals.slice(flatOffset, flatOffset + length);
+ }
+ return vals.subarray(flatOffset, flatOffset + length);
+ }
+ var decodedData = dtype === 'string' ?
+ tfjsCore.backend_util.fromUint8ToStringArray(vals) :
+ vals;
+ var inBuf = tfjsCore.buffer(shape, dtype, decodedData);
+ var outBuf = tfjsCore.buffer(size, dtype);
+ for (var i = 0; i < outBuf.size; ++i) {
+ var outLoc = outBuf.indexToLoc(i);
+ var inLoc = outLoc.map(function (idx, j) { return idx + begin[j]; });
+ outBuf.set.apply(outBuf, __spread([inBuf.get.apply(inBuf, __spread(inLoc))], outLoc));
+ }
+ if (dtype === 'string') {
+ return tfjsCore.backend_util.fromStringArrayToUint8(outBuf.values);
+ }
+ return outBuf.values;
+ }
+ function slice(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var begin = attrs.begin, size = attrs.size;
+ assertNotComplex(x, 'slice');
+ var _a = __read(tfjsCore.slice_util.parseSliceParams(x, begin, size), 2), $begin = _a[0], $size = _a[1];
+ tfjsCore.slice_util.assertParamsValid(x, $begin, $size);
+ var vals = backend.data.get(x.dataId).values;
+ var outVals = sliceImpl(vals, $begin, $size, x.shape, x.dtype);
+ return backend.makeTensorInfo($size, x.dtype, outVals);
+ }
+ var sliceConfig = {
+ kernelName: tfjsCore.Slice,
+ backendName: 'cpu',
+ kernelFunc: slice
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function sparseFillEmptyRowsImpl(indices, indicesShape, indicesDType, values, valuesDType, denseShape, defaultValue) {
+ var indicesCount = indicesShape[0];
+ var denseRows = denseShape[0];
+ var emptyRowIndicator = new Array(denseRows);
+ var reverseIndexMap = new Array(indicesCount);
+ var rank = indicesShape[1];
+ if (denseRows === 0) {
+ if (indicesCount !== 0) {
+ throw new Error(tfjsCore.backend_util.getSparseFillEmptyRowsIndicesDenseShapeMismatch(indicesCount));
+ }
+ var outputIndices = tfjsCore.util.getArrayFromDType(indicesDType, 0);
+ var outputValues = tfjsCore.util.getArrayFromDType(valuesDType, 0);
+ return [
+ outputIndices, [0, rank], outputValues, emptyRowIndicator, reverseIndexMap
+ ];
+ }
+ var rowsAreOrdered = true;
+ var lastIndicesRow = 0;
+ var csrOffset = new Array(denseRows).fill(0);
+ for (var i = 0; i < indicesCount; ++i) {
+ // indices is a 2d tensor with shape of [N, rank]
+ var row = indices[i * rank];
+ if (row < 0) {
+ throw new Error(tfjsCore.backend_util.getSparseFillEmptyRowsNegativeIndexErrorMessage(i, row));
+ }
+ if (row >= denseRows) {
+ throw new Error(tfjsCore.backend_util.getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(i, row, denseRows));
+ }
+ ++csrOffset[row];
+ rowsAreOrdered = rowsAreOrdered && (row >= lastIndicesRow);
+ lastIndicesRow = row;
+ }
+ var allRowsFull = true;
+ for (var row = 0; row < denseRows; ++row) {
+ // csrOffset here describes the number of elements in this dense row
+ var rowEmpty = (csrOffset[row] === 0);
+ emptyRowIndicator[row] = rowEmpty;
+ allRowsFull = allRowsFull && !rowEmpty;
+ // In filled version, each row has at least one element.
+ csrOffset[row] = Math.max(csrOffset[row], 1);
+ // Update csrOffset to represent the number of elements up to and
+ // including denseRows + 1:
+ // csrOffset[0] == #{elements of row 0}
+ // csrOffset[1] == #{elements of row 1} + #{elements of row 0}
+ // ..
+ // csrOffset[i] == starting index for elements in row i + 1.
+ if (row > 0) {
+ csrOffset[row] += csrOffset[row - 1];
+ }
+ }
+ if (allRowsFull && rowsAreOrdered) {
+ var outputIndices = indices;
+ var outputValues = values;
+ for (var i = 0; i < indicesCount; ++i) {
+ reverseIndexMap[i] = i;
+ }
+ return [
+ outputIndices, [indicesCount, rank], outputValues, emptyRowIndicator,
+ reverseIndexMap
+ ];
+ }
+ else {
+ var fullIndicesCount = csrOffset[denseRows - 1];
+ var outputIndices = tfjsCore.util.getArrayFromDType(indicesDType, fullIndicesCount * rank);
+ var outputValues = tfjsCore.util.getArrayFromDType(valuesDType, fullIndicesCount);
+ var filledCount = new Array(denseRows).fill(0);
+ // Fill in values for rows that are not missing
+ for (var i = 0; i < indicesCount; ++i) {
+ // indices is a 2d tensor with shape of [N, rank]
+ var row = indices[i * rank];
+ var offset = filledCount[row];
+ var outputI = ((row === 0) ? 0 : csrOffset[row - 1]) + offset;
+ filledCount[row]++; // Increment the filled count for this row.
+ for (var j = 0; j < rank; ++j) {
+ // indices and outputIndices are 2d tensors with shape of [N, rank]
+ outputIndices[outputI * rank + j] = indices[i * rank + j];
+ }
+ outputValues[outputI] = values[i];
+ // We'll need this reverse index map to backprop correctly.
+ reverseIndexMap[i] = outputI;
+ }
+ // Fill in values for rows that are missing
+ for (var row = 0; row < denseRows; ++row) {
+ var rowCount = filledCount[row];
+ if (rowCount === 0) { // We haven't filled this row
+ var startingIndex = (row === 0) ? 0 : csrOffset[row - 1];
+ // Remaining index values were set to zero already.
+ // Just need to set the row index in the right location.
+ // outputIndices is a 2d tensor with shape of [N, rank]
+ outputIndices[startingIndex * rank + 0] = row;
+ for (var col = 1; col < rank; ++col) {
+ outputIndices[startingIndex * rank + col] = 0;
+ }
+ outputValues[startingIndex] = defaultValue;
+ }
+ }
+ return [
+ outputIndices, [fullIndicesCount, rank], outputValues, emptyRowIndicator,
+ reverseIndexMap
+ ];
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function sparseReshapeImpl(inputIndices, inputIndicesShape, inputDType, inputShape, targetShape) {
+ var denseSize = tfjsCore.util.sizeFromShape(inputShape);
+ var nnz = inputIndicesShape[0];
+ var outputRank = targetShape.length;
+ // Compute the output shape. Determine product of specified dimensions, and
+ // find the index of the unspecified one.
+ var outputShape = [];
+ var product = 1;
+ var unknownIndex = -1;
+ for (var d = 0; d < outputRank; ++d) {
+ var size = targetShape[d];
+ if (size === -1) {
+ if (unknownIndex !== -1) {
+ throw new Error(tfjsCore.backend_util
+ .getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(unknownIndex, d));
+ }
+ unknownIndex = d;
+ outputShape.push(1);
+ }
+ else {
+ if (size < 0) {
+ throw new Error(tfjsCore.backend_util.getSparseReshapeNegativeOutputDimErrorMessage(d, size));
+ }
+ product *= size;
+ outputShape.push(size);
+ }
+ }
+ if (unknownIndex !== -1) {
+ if (product <= 0) {
+ throw new Error(tfjsCore.backend_util.getSparseReshapeEmptyTensorZeroOutputDimErrorMessage());
+ }
+ var missing = Math.trunc(denseSize / product);
+ if (product * missing !== denseSize) {
+ throw new Error(tfjsCore.backend_util.getSparseReshapeInputOutputMultipleErrorMessage(inputShape, outputShape));
+ }
+ outputShape[unknownIndex] = missing;
+ }
+ var outputSize = tfjsCore.util.sizeFromShape(outputShape);
+ if (outputSize !== denseSize) {
+ throw new Error(tfjsCore.backend_util.getSparseReshapeInputOutputMismatchErrorMessage(inputShape, outputShape));
+ }
+ var inputRank = inputShape.length;
+ var inputStrides = [];
+ if (inputRank > 0) {
+ inputStrides[inputRank - 1] = 1;
+ for (var d = inputRank - 2; d >= 0; --d) {
+ inputStrides[d] = inputStrides[d + 1] * inputShape[d + 1];
+ }
+ }
+ var outputStrides = [];
+ if (outputRank > 0) {
+ outputStrides[outputRank - 1] = 1;
+ for (var d = outputRank - 2; d >= 0; --d) {
+ outputStrides[d] = outputStrides[d + 1] * outputShape[d + 1];
+ }
+ }
+ var newIndices = tfjsCore.util.getArrayFromDType(inputDType, nnz * outputRank);
+ for (var i = 0; i < nnz; ++i) {
+ var id = 0;
+ for (var j = 0; j < inputRank; ++j) {
+ // inputIndices is a 2d tensor with shape of [nnz, inputRank]
+ id += inputIndices[i * inputRank + j] * inputStrides[j];
+ }
+ for (var j = 0; j < outputRank; ++j) {
+ // newIndices is a 2d tensor with shape of [nnz, outputRank]
+ newIndices[i * outputRank + j] = Math.trunc(id / outputStrides[j]);
+ id %= outputStrides[j];
+ }
+ }
+ return [newIndices, [nnz, outputRank], outputShape];
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function sparseSegmentReductionImpl(input, inputShape, inputDType, indices, segmentIds, isMean, defaultValue) {
+ if (isMean === void 0) { isMean = false; }
+ if (defaultValue === void 0) { defaultValue = 0; }
+ var numIndices = indices.length;
+ // Flatten the array to two dimensions
+ var inputFlat = [inputShape[0], input.length / inputShape[0]];
+ var numCol = inputFlat[1];
+ // Note that the current implementation assumes that segmentIds values are
+ // sorted.
+ var lastSegmentIdPlusOne = numIndices > 0 ? segmentIds[numIndices - 1] + 1 : 0;
+ var outputRows = lastSegmentIdPlusOne;
+ if (outputRows < 0) {
+ throw new Error(tfjsCore.backend_util.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());
+ }
+ var outputShape = inputShape.slice();
+ outputShape[0] = outputRows;
+ var outputLength = outputShape.reduce(function (product, value) { return product * value; }, 1);
+ // Output array is initialized with the value 0 by default.
+ var output = tfjsCore.util.getArrayFromDType(inputDType, outputLength);
+ // Note that we do not initialize the output buffer with a default value, so
+ // we need to explicitly set missing indices to the default value.
+ if (numIndices === 0) {
+ if (outputRows > 0) {
+ output.fill(defaultValue);
+ }
+ return [output, outputShape];
+ }
+ if (outputRows <= 0) {
+ throw new Error(tfjsCore.backend_util.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());
+ }
+ var start = 0, end = 1;
+ // Index from which the output is not initialized.
+ var uninitializedIndex = 0;
+ var outIndex = segmentIds[start];
+ while (true) {
+ // We initialize nextIndex to 0 to avoid may be uninitialized warning
+ var nextIndex = 0;
+ if (end < numIndices) {
+ nextIndex = segmentIds[end];
+ if (outIndex === nextIndex) {
+ ++end;
+ continue;
+ }
+ // We have a new segment here. Verify that the segment ids are growing.
+ if (outIndex >= nextIndex) {
+ throw new Error(tfjsCore.backend_util
+ .getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage());
+ }
+ }
+ if (outIndex < 0 || outIndex >= outputRows) {
+ throw new Error(tfjsCore.backend_util.getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(outIndex, outputRows));
+ }
+ // If there is a gap between two indices, we need to set that gap to the
+ // default value.
+ if (outIndex > uninitializedIndex) {
+ output.fill(defaultValue, uninitializedIndex * numCol, outIndex * numCol);
+ }
+ for (var i = start; i < end; ++i) {
+ var index = indices[i];
+ if (index < 0 || index >= inputFlat[0]) {
+ throw new Error(tfjsCore.backend_util.getSparseSegmentReductionIndicesOutOfRangeErrorMessage(i, indices[i], inputFlat[0]));
+ }
+ for (var j = 0; j < numCol; j++) {
+ output[outIndex * numCol + j] += input[index * numCol + j];
+ }
+ }
+ if (isMean) {
+ for (var j = 0; j < numCol; j++) {
+ output[outIndex * numCol + j] /= end - start;
+ }
+ }
+ start = end;
+ ++end;
+ uninitializedIndex = outIndex + 1;
+ outIndex = nextIndex;
+ if (end > numIndices) {
+ break;
+ }
+ }
+ // Fill the gap at the end with the default value.
+ if (uninitializedIndex < outputRows) {
+ output.fill(defaultValue, uninitializedIndex * numCol, outputRows * numCol);
+ }
+ return [output, outputShape];
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var sqrtImpl = createSimpleUnaryImpl(function (xi) { return Math.sqrt(xi); });
+ var sqrt = unaryKernelFunc(tfjsCore.Sqrt, function (xi) { return Math.sqrt(xi); });
+ var sqrtConfig = {
+ kernelName: tfjsCore.Sqrt,
+ backendName: 'cpu',
+ kernelFunc: sqrt,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var squaredDifferenceImpl = createSimpleBinaryKernelImpl((function (a, b) {
+ var diff = a - b;
+ return diff * diff;
+ }));
+ var squaredDifference = binaryKernelFunc(tfjsCore.SquaredDifference, squaredDifferenceImpl);
+ var squaredDifferenceConfig = {
+ kernelName: tfjsCore.SquaredDifference,
+ backendName: 'cpu',
+ kernelFunc: squaredDifference
+ };
+
+ function stridedSliceImpl(outShape, xBuf, strides, begin) {
+ var outBuf = tfjsCore.buffer(outShape, xBuf.dtype);
+ for (var i = 0; i < outBuf.size; i++) {
+ var loc = outBuf.indexToLoc(i);
+ var newLoc = new Array(loc.length);
+ for (var j = 0; j < newLoc.length; j++) {
+ newLoc[j] = loc[j] * strides[j] + begin[j];
+ }
+ outBuf.set.apply(outBuf, __spread([xBuf.get.apply(xBuf, __spread(newLoc))], loc));
+ }
+ return outBuf;
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * The StringNGramsOp class creates ngrams from ragged string data.
+ * The constructor contains all attributes related to the operation such as
+ * padding widths and strings, and the compute function can be used to
+ * compute the ngrams for different ragged tensor inputs.
+ */
+ var StringNGramsOp = /** @class */ (function () {
+ function StringNGramsOp(separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) {
+ this.separator = tfjsCore.util.encodeString(separator);
+ this.nGramWidths = nGramWidths;
+ this.leftPad = tfjsCore.util.encodeString(leftPad);
+ this.rightPad = tfjsCore.util.encodeString(rightPad);
+ this.padWidth = padWidth;
+ this.preserveShort = preserveShortSequences;
+ }
+ StringNGramsOp.prototype.getPadWidth = function (nGramWidth) {
+ // Ngrams can be padded with either a fixed pad width or a dynamic pad
+ // width depending on the 'padWidth' arg, but in no case should the padding
+ // ever be wider than 'nGramWidth' - 1.
+ return Math.min(this.padWidth < 0 ? nGramWidth - 1 : this.padWidth, nGramWidth - 1);
+ };
+ StringNGramsOp.prototype.getNumNGrams = function (length, nGramWidth) {
+ var padWidth = this.getPadWidth(nGramWidth);
+ return Math.max(0, ((length + 2 * padWidth) - nGramWidth) + 1);
+ };
+ StringNGramsOp.prototype.createNGrams = function (data, splitIndex, output, outputStartIndex, numNGrams, nGramWidth) {
+ var _loop_1 = function (nGramIndex) {
+ var padWidth = this_1.getPadWidth(nGramWidth);
+ var leftPadding = Math.max(0, padWidth - nGramIndex);
+ var rightPadding = Math.max(0, padWidth - (numNGrams - (nGramIndex + 1)));
+ var numTokens = nGramWidth - (leftPadding + rightPadding);
+ var dataStartIndex = splitIndex + (leftPadding > 0 ? 0 : nGramIndex - padWidth);
+ // Calculate the total expected size of the nGram so we can reserve the
+ // correct amount of space in the string.
+ var nGramSize = 0;
+ // Size of the left padding.
+ nGramSize += leftPadding * this_1.leftPad.length;
+ // Size of the tokens.
+ for (var n = 0; n < numTokens; ++n) {
+ nGramSize += data[dataStartIndex + n].length;
+ }
+ // Size of the right padding.
+ nGramSize += rightPadding * this_1.rightPad.length;
+ // Size of the separators.
+ var numSeparators = leftPadding + rightPadding + numTokens - 1;
+ nGramSize += numSeparators * this_1.separator.length;
+ // Build the nGram.
+ output[outputStartIndex + nGramIndex] = new Uint8Array(nGramSize);
+ var nGram = output[outputStartIndex + nGramIndex];
+ var nextNGramIndex = 0;
+ var appendToNGram = function (str) { return str.forEach(function (value) { return nGram[nextNGramIndex++] = value; }); };
+ for (var n = 0; n < leftPadding; ++n) {
+ appendToNGram(this_1.leftPad);
+ appendToNGram(this_1.separator);
+ }
+ // Only output first numTokens - 1 pairs of data and separator
+ for (var n = 0; n < numTokens - 1; ++n) {
+ appendToNGram(data[dataStartIndex + n]);
+ appendToNGram(this_1.separator);
+ }
+ // Handle case when there are no tokens or no right padding as these
+ // can result in consecutive separators.
+ if (numTokens > 0) {
+ // If we have tokens, then output last and then pair each separator
+ // with the right padding that follows, to ensure nGram ends either with
+ // the token or with the right pad.
+ appendToNGram(data[dataStartIndex + numTokens - 1]);
+ for (var n = 0; n < rightPadding; ++n) {
+ appendToNGram(this_1.separator);
+ appendToNGram(this_1.rightPad);
+ }
+ }
+ else {
+ // If we don't have tokens, then the last item inserted into the nGram
+ // has been the separator from the left padding loop above. Hence,
+ // output right pad and separator and make sure to finish with a
+ // padding, not a separator.
+ for (var n = 0; n < rightPadding - 1; ++n) {
+ appendToNGram(this_1.rightPad);
+ appendToNGram(this_1.separator);
+ }
+ appendToNGram(this_1.rightPad);
+ }
+ };
+ var this_1 = this;
+ for (var nGramIndex = 0; nGramIndex < numNGrams; ++nGramIndex) {
+ _loop_1(nGramIndex);
+ }
+ };
+ // Data and splits together form the definition of the ragged tensor,
+ // where data is 1 dimensional and contains the values of the tensor
+ // and splits denotes the indices at which each row starts.
+ StringNGramsOp.prototype.compute = function (data, splits) {
+ var _this = this;
+ // Validate that the splits are valid indices into data, only if there are
+ // splits specified.
+ var inputDataSize = data.length;
+ var splitsSize = splits.length;
+ if (splitsSize > 0) {
+ var prevSplit = splits[0];
+ if (prevSplit !== 0) {
+ throw new Error("First split value must be 0, got " + prevSplit);
+ }
+ for (var i = 1; i < splitsSize; ++i) {
+ var validSplits = splits[i] >= prevSplit;
+ validSplits = validSplits && (splits[i] <= inputDataSize);
+ if (!validSplits) {
+ throw new Error("Invalid split value " + splits[i] + ", must be in [" + prevSplit + ", " + inputDataSize + "]");
+ }
+ prevSplit = splits[i];
+ }
+ if (prevSplit !== inputDataSize) {
+ throw new Error("Last split value must be data size. Expected " + inputDataSize + ", got " + prevSplit);
+ }
+ }
+ var numBatchItems = splitsSize - 1;
+ var nGramsSplits = tfjsCore.util.getArrayFromDType('int32', splitsSize);
+ // If there is no data or size, return an empty ragged tensor.
+ if (inputDataSize === 0 || splitsSize === 0) {
+ var empty = new Array(inputDataSize);
+ for (var i = 0; i <= numBatchItems; ++i) {
+ nGramsSplits[i] = 0;
+ }
+ return [empty, nGramsSplits];
+ }
+ nGramsSplits[0] = 0;
+ var _loop_2 = function (i) {
+ var length = splits[i] - splits[i - 1];
+ var numNGrams = 0;
+ this_2.nGramWidths.forEach(function (nGramWidth) {
+ numNGrams += _this.getNumNGrams(length, nGramWidth);
+ });
+ if (this_2.preserveShort && length > 0 && numNGrams === 0) {
+ numNGrams = 1;
+ }
+ nGramsSplits[i] = nGramsSplits[i - 1] + numNGrams;
+ };
+ var this_2 = this;
+ for (var i = 1; i <= numBatchItems; ++i) {
+ _loop_2(i);
+ }
+ var nGrams = new Array(nGramsSplits[numBatchItems]);
+ var _loop_3 = function (i) {
+ var splitIndex = splits[i];
+ var outputStartIdx = nGramsSplits[i];
+ this_3.nGramWidths.forEach(function (nGramWidth) {
+ var length = splits[i + 1] - splits[i];
+ var numNGrams = _this.getNumNGrams(length, nGramWidth);
+ _this.createNGrams(data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth);
+ outputStartIdx += numNGrams;
+ });
+ // If we're preserving short sequences, check to see if no sequence was
+ // generated by comparing the current output start idx to the original
+ // one (nGramSplitsdata). If no ngrams were generated, then they will
+ // be equal (since we increment outputStartIdx by numNGrams every
+ // time we create a set of ngrams.)
+ if (this_3.preserveShort && outputStartIdx === nGramsSplits[i]) {
+ var dataLength = splits[i + 1] - splits[i];
+ // One legitimate reason to not have any ngrams when this.preserveShort
+ // is true is if the sequence itself is empty. In that case, move on.
+ if (dataLength === 0) {
+ return "continue";
+ }
+ // We don't have to worry about dynamic padding sizes here: if padding
+ // was dynamic, every sequence would have had sufficient padding to
+ // generate at least one nGram.
+ var nGramWidth = dataLength + 2 * this_3.padWidth;
+ var numNGrams = 1;
+ this_3.createNGrams(data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth);
+ }
+ };
+ var this_3 = this;
+ for (var i = 0; i < numBatchItems; ++i) {
+ _loop_3(i);
+ }
+ return [nGrams, nGramsSplits];
+ };
+ return StringNGramsOp;
+ }());
+ function stringNGramsImpl(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) {
+ return new StringNGramsOp(separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences)
+ .compute(data, dataSplits);
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function split(str, delimiters, skipEmpty, result) {
+ if (!str.length) {
+ return;
+ }
+ // When the delimiter is empty, the input is split into individual characters.
+ if (delimiters.length === 0) {
+ for (var i = 0; i < str.length; ++i) {
+ result.push(str.subarray(i, i + 1));
+ }
+ return;
+ }
+ // When there is one delimiter, the input is split only at that delimiter.
+ if (delimiters.length === 1) {
+ var delimiter = delimiters[0];
+ var f = str.indexOf(delimiter);
+ while (f !== -1) {
+ var token = str.subarray(0, f);
+ if (!skipEmpty || token.length !== 0) {
+ result.push(token);
+ }
+ str = str.subarray(f + 1);
+ f = str.indexOf(delimiter);
+ }
+ if (!skipEmpty || str.length !== 0) {
+ result.push(str);
+ }
+ return;
+ }
+ // When there are multiple delimiters, the input is split at every instance
+ // one of the delimiters appears.
+ var tokenStart = 0;
+ for (var i = 0; i < str.length + 1; i++) {
+ if ((i === str.length) || (delimiters.indexOf(str[i]) !== -1)) {
+ var token = str.subarray(tokenStart, i);
+ if (!skipEmpty || token.length !== 0) {
+ result.push(token);
+ }
+ tokenStart = i + 1;
+ }
+ }
+ }
+ function stringSplitImpl(input, delimiter, skipEmpty) {
+ var batchSize = input.length;
+ // Empty delimiter means split the input character by character.
+ var tokens = [];
+ var outputSize = 0;
+ var maxNumEntries = 0;
+ var numIndices = new Array(batchSize);
+ for (var i = 0; i < batchSize; ++i) {
+ var prevTokensLength = tokens.length;
+ split(input[i], delimiter, skipEmpty, tokens);
+ var nEntries = tokens.length - prevTokensLength;
+ numIndices[i] = nEntries;
+ outputSize += nEntries;
+ maxNumEntries = Math.max(maxNumEntries, nEntries);
+ }
+ var indices = tfjsCore.util.getArrayFromDType('int32', outputSize * 2);
+ var values = new Array(outputSize);
+ var shape = [batchSize, maxNumEntries];
+ var c = 0;
+ for (var i = 0; i < batchSize; ++i) {
+ for (var j = 0; j < numIndices[i]; ++j) {
+ // indices is a 2d tensor with shape of [outputSize, 2]
+ indices[c * 2] = i;
+ indices[c * 2 + 1] = j;
+ values[c] = tokens[c];
+ ++c;
+ }
+ }
+ return [indices, values, shape];
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function stringToHashBucketFastImpl(input, numBuckets) {
+ var output = tfjsCore.util.getArrayFromDType('int32', input.length);
+ for (var i = 0; i < input.length; ++i) {
+ output[i] =
+ tfjsCore.util.fingerPrint64(input[i]).modulo(numBuckets).getLowBitsUnsigned();
+ }
+ return output;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var subImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) { return aValue - bValue; }));
+ var subComplexImpl = createComplexBinaryKernelImpl((function (aReal, aImag, bReal, bImag) {
+ return { real: aReal - bReal, imag: aImag - bImag };
+ }));
+ var sub = binaryKernelFunc(tfjsCore.Sub, subImpl, subComplexImpl);
+ var subConfig = {
+ kernelName: tfjsCore.Sub,
+ backendName: 'cpu',
+ kernelFunc: sub
+ };
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * An implementation of the tile kernel shared between webgl and cpu for string
+ * tensors only.
+ */
+ function tileImpl(xBuf, reps) {
+ var newShape = new Array(xBuf.rank);
+ for (var i = 0; i < newShape.length; i++) {
+ newShape[i] = xBuf.shape[i] * reps[i];
+ }
+ var result = tfjsCore.buffer(newShape, xBuf.dtype);
+ for (var i = 0; i < result.values.length; ++i) {
+ var newLoc = result.indexToLoc(i);
+ var originalLoc = new Array(xBuf.rank);
+ for (var j = 0; j < originalLoc.length; j++) {
+ originalLoc[j] = newLoc[j] % xBuf.shape[j];
+ }
+ var originalIndex = xBuf.locToIndex(originalLoc);
+ result.values[i] = xBuf.values[originalIndex];
+ }
+ return result;
+ }
+
+ var comparePair = function (a, b) {
+ var valueDiff = b.value - a.value;
+ return valueDiff === 0 ? a.index - b.index : valueDiff;
+ };
+ /**
+ * Partitions array where all elements smaller than the (k+1) smallest element
+ * are found to the left of it, and all larger to the right of it.
+ * Based on the Floyd-Rivest Algorithm, ref:
+ * https://en.wikipedia.org/wiki/Floyd%E2%80%93Rivest_algorithm
+ * @param array: Array to partition
+ * @param left: Left index for the interval
+ * @param right: Right index for the interval
+ * @param k: Desired index value, where array[k] is the (k+1)th smallest element
+ * when left = 0
+ */
+ function select$1(array, k, left, right) {
+ if (left === void 0) { left = 0; }
+ if (right === void 0) { right = array.length - 1; }
+ while (right > left) {
+ // Use select recursively to sample a smaller set of size s
+ // the arbitrary constants 600 and 0.5 are used in the original
+ // version to minimize execution time.
+ if (right - left > 600) {
+ var n = right - left + 1;
+ var i_1 = k - left + 1;
+ var z = Math.log(n);
+ var s = 0.5 * Math.exp(2 * z / 3);
+ var sd = 0.5 * Math.sqrt(z * s * (n - s) / n) * Math.sign(i_1 - n / 2);
+ var newLeft = Math.max(left, Math.floor(k - i_1 * s / n + sd));
+ var newRight = Math.min(right, Math.floor(k + (n - i_1) * s / n + sd));
+ select$1(array, k, newLeft, newRight);
+ }
+ // partition the elements between left and right around t
+ var t = array[k];
+ var i = left;
+ var j = right;
+ tfjsCore.util.swap(array, left, k);
+ if (comparePair(array[right], t) > 0) {
+ tfjsCore.util.swap(array, left, right);
+ }
+ while (i < j) {
+ tfjsCore.util.swap(array, i, j);
+ i++;
+ j--;
+ while (comparePair(array[i], t) < 0) {
+ i = i + 1;
+ }
+ while (comparePair(array[j], t) > 0) {
+ j = j - 1;
+ }
+ }
+ if (comparePair(array[left], t) === 0) {
+ tfjsCore.util.swap(array, left, j);
+ }
+ else {
+ j = j + 1;
+ tfjsCore.util.swap(array, j, right);
+ }
+ // Adjust left and right towards the boundaries of the subset
+ // containing the (k - left + 1)th smallest element.
+ if (j <= k) {
+ left = j + 1;
+ }
+ if (k <= j) {
+ right = j - 1;
+ }
+ }
+ }
+ function topKImpl(x, xShape, xDtype, k, sorted) {
+ // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim.
+ var lastDim = xShape[xShape.length - 1];
+ var _a = __read([x.length / lastDim, lastDim], 2), batch = _a[0], size = _a[1];
+ var allTopKVals = tfjsCore.util.getTypedArrayFromDType(xDtype, batch * k);
+ var allTopKIndices = tfjsCore.util.getTypedArrayFromDType('int32', batch * k);
+ var _loop_1 = function (b) {
+ var offset = b * size;
+ var vals = x.subarray(offset, offset + size);
+ var valAndInd = new Array(vals.length);
+ vals.forEach(function (value, index) { return valAndInd[index] = { value: value, index: index }; });
+ if (k < valAndInd.length) {
+ select$1(valAndInd, k);
+ valAndInd = valAndInd.slice(0, k);
+ }
+ if (sorted) {
+ valAndInd.sort(comparePair);
+ }
+ var outOffset = b * k;
+ var topKVals = allTopKVals.subarray(outOffset, outOffset + k);
+ var topKIndices = allTopKIndices.subarray(outOffset, outOffset + k);
+ for (var i = 0; i < k; i++) {
+ topKVals[i] = valAndInd[i].value;
+ topKIndices[i] = valAndInd[i].index;
+ }
+ };
+ for (var b = 0; b < batch; b++) {
+ _loop_1(b);
+ }
+ // Reshape back to the original input shape, except that the last
+ // dimension is k.
+ var outputShape = xShape.slice();
+ outputShape[outputShape.length - 1] = k;
+ return [
+ tfjsCore.buffer(outputShape, xDtype, allTopKVals),
+ tfjsCore.buffer(outputShape, 'int32', allTopKIndices)
+ ];
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function uniqueImpl(values, axis, shape, dtype) {
+ // Normalize and validate axis.
+ var $axis = tfjsCore.util.parseAxisParam(axis, shape)[0];
+ // Calculate the new shape that is suitable for extracting data along the
+ // given axis.
+ //
+ // The rank is 3.
+ // The size of the 1st dimension is the size of all the axes < the given axis.
+ // The size of the 2nd dimension is the same as the size of the given axis.
+ // The size of the 3rd dimension is the size of all the axes > the given axis.
+ //
+ // For example, for a 4D tensor with shape=[2, 3, 5, 4] and axis=2, the
+ // newShape would be: [2*3, 5, 4].
+ //
+ // Note that this is not the final output shape. This will be the shape for an
+ // intermediate TensorBuffer (see inputBuffer below) to allow us to extract
+ // values along the given axis. To demonstrate how it works, consider the
+ // following example:
+ //
+ // Input: a 3D tensor, with shape [1, 2, 3]
+ // [
+ // [
+ // [1,2,3],
+ // [4,5,6]
+ // ]
+ // ]
+ // Axis: 2 (the last axis).
+ // Along axis 2, we expect to extract 3 tensors: [1,4], [2,5], [3,6].
+ //
+ // For this example, newShape would be: [2, 3, 1], where 2 is calculated from
+ // 1*2. The re-shaped data would look like:
+ //
+ // [
+ // [
+ // [1], [2], [3]
+ // ],
+ // [
+ // [4], [5], [6]
+ // ]
+ // ]
+ //
+ // Then, we can construct a 3-level nested loop by the following dimension
+ // order to extract the values along the axis (dimension1):
+ // i: dimension1 // 0,1,2 (newShape[1])
+ // m: dimension0 // 0,1 (newShape[0])
+ // n: dimension2 // 0 (newShape[2])
+ //
+ // m, i, n
+ // ---------
+ // Iteration 0: data at [0, 0, 0] => "1"
+ // Iteration 1: data at [1, 0, 0] => "4"
+ // We got [1,4].
+ // Iteration 2: data at [0, 1, 0] => "2"
+ // Iteration 3: data at [1, 1, 0] => "5"
+ // We got [2,5].
+ // Iteration 4: data at [0, 2, 0] => "3"
+ // Iteration 5: data at [1, 2, 0] => "6"
+ // We got [3,6].
+ var newShape = [1, shape[0], 1];
+ for (var i = 0; i < $axis; i++) {
+ newShape[0] *= shape[i];
+ }
+ newShape[1] = shape[$axis];
+ for (var i = $axis + 1; i < shape.length; i++) {
+ newShape[2] *= shape[i];
+ }
+ // A map from unique elements (their string representations) to their values
+ // in "indices" (below).
+ var uniqueElements = {};
+ // The indices of each unique element in the original tensor along the given
+ // axis. It is 1D and has the same size as the given axis.
+ var indices = new Int32Array(shape[$axis]);
+ // Create a buffer so we can easily extract value at a given location.
+ var inputBuffer = new tfjsCore.TensorBuffer(newShape, dtype, values);
+ // The indices along the given axis that have unique elements. This is a
+ // de-duped version of "indices" above.
+ var uniqueIndices = [];
+ var is1DTensor = newShape[0] === 1 && newShape[2] === 1;
+ for (var i = 0; i < shape[$axis]; i++) {
+ // Extract values along the axis.
+ var element = void 0;
+ if (is1DTensor) {
+ // Fast path for 1D tensor input.
+ element = values[i].toString();
+ }
+ else {
+ var axisValues = [];
+ for (var m = 0; m < newShape[0]; m++) {
+ for (var n = 0; n < newShape[2]; n++) {
+ axisValues.push(inputBuffer.get(m, i, n));
+ }
+ }
+ element = axisValues.join(',');
+ }
+ // Dedup and update various indices.
+ if (uniqueElements[element] !== undefined) {
+ indices[i] = uniqueElements[element];
+ }
+ else {
+ var uniqueIndex = Object.keys(uniqueElements).length;
+ uniqueElements[element] = uniqueIndex;
+ indices[i] = uniqueIndex;
+ uniqueIndices.push(i);
+ }
+ }
+ // Now we know where each of the unique elements are located along the axis
+ // (uniqueIndices). Extract them from input buffer and store them in the
+ // output buffer.
+ var outputTmpShape = newShape.slice();
+ outputTmpShape[1] = Object.keys(uniqueElements).length;
+ var outputBuffer = new tfjsCore.TensorBuffer(outputTmpShape, dtype);
+ uniqueIndices.forEach(function (uniqueElementIndex, i) {
+ for (var m = 0; m < newShape[0]; m++) {
+ for (var n = 0; n < newShape[2]; n++) {
+ outputBuffer.set(inputBuffer.get(m, uniqueElementIndex, n), m, i, n);
+ }
+ }
+ });
+ // The output shape can be calculated from the input shape with the size of
+ // the given axis replaced by the number of unique elements along that axis.
+ var outputShape = shape.slice();
+ outputShape[$axis] = outputTmpShape[1];
+ return {
+ outputValues: outputBuffer.values,
+ outputShape: outputShape,
+ indices: indices,
+ };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var shared = {
+ __proto__: null,
+ simpleAbsImpl: simpleAbsImpl,
+ addImpl: addImpl,
+ bincountImpl: bincountImpl,
+ bincountReduceImpl: bincountReduceImpl,
+ ceilImpl: ceilImpl,
+ concatImpl: concatImpl,
+ equalImpl: equalImpl,
+ expImpl: expImpl,
+ expm1Impl: expm1Impl,
+ floorImpl: floorImpl,
+ gatherNdImpl: gatherNdImpl,
+ gatherV2Impl: gatherV2Impl,
+ greaterImpl: greaterImpl,
+ greaterEqualImpl: greaterEqualImpl,
+ lessImpl: lessImpl,
+ lessEqualImpl: lessEqualImpl,
+ linSpaceImpl: linSpaceImpl,
+ logImpl: logImpl,
+ maxImpl: maxImpl,
+ maximumImpl: maximumImpl,
+ minimumImpl: minimumImpl,
+ multiplyImpl: multiplyImpl,
+ negImpl: negImpl,
+ notEqualImpl: notEqualImpl,
+ prodImpl: prodImpl,
+ rangeImpl: rangeImpl,
+ rsqrtImpl: rsqrtImpl,
+ sigmoidImpl: sigmoidImpl,
+ sliceImpl: sliceImpl,
+ sparseFillEmptyRowsImpl: sparseFillEmptyRowsImpl,
+ sparseReshapeImpl: sparseReshapeImpl,
+ sparseSegmentReductionImpl: sparseSegmentReductionImpl,
+ sqrtImpl: sqrtImpl,
+ squaredDifferenceImpl: squaredDifferenceImpl,
+ stridedSliceImpl: stridedSliceImpl,
+ stringNGramsImpl: stringNGramsImpl,
+ stringSplitImpl: stringSplitImpl,
+ stringToHashBucketFastImpl: stringToHashBucketFastImpl,
+ subImpl: subImpl,
+ tileImpl: tileImpl,
+ topKImpl: topKImpl,
+ transposeImpl: transposeImpl,
+ uniqueImpl: uniqueImpl
+ };
+
+ /** @license See the LICENSE file. */
+ // This code is auto-generated, do not modify this file!
+ var version = '3.12.0';
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Side effects for default initialization of MathBackendCPU
+ tfjsCore.registerBackend('cpu', function () { return new MathBackendCPU(); }, 1 /* priority */);
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var elu = unaryKernelFunc(tfjsCore.Elu, function (xi) { return xi >= 0 ? xi : (Math.exp(xi) - 1); });
+ var eluConfig = {
+ kernelName: tfjsCore.Elu,
+ backendName: 'cpu',
+ kernelFunc: elu,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function leakyRelu(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var alpha = attrs.alpha;
+ assertNotComplex([x], 'leakyRelu');
+ var xSize = tfjsCore.util.sizeFromShape(x.shape);
+ var xVals = backend.data.get(x.dataId).values;
+ var outVals = tfjsCore.util.getTypedArrayFromDType('float32', xSize);
+ for (var i = 0; i < xVals.length; i++) {
+ outVals[i] = xVals[i] < 0 ? alpha * xVals[i] : xVals[i];
+ }
+ return backend.makeTensorInfo(x.shape, 'float32', outVals);
+ }
+ var leakyReluConfig = {
+ kernelName: tfjsCore.LeakyRelu,
+ backendName: 'cpu',
+ kernelFunc: leakyRelu
+ };
+
+ var preluImpl = createSimpleBinaryKernelImpl(function (xValue, aValue) { return xValue < 0 ? aValue * xValue : xValue; });
+ function prelu(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var x = inputs.x, alpha = inputs.alpha;
+ assertNotComplex([x, alpha], 'prelu');
+ var aVals = backend.data.get(x.dataId).values;
+ var bVals = backend.data.get(alpha.dataId).values;
+ var _a = __read(preluImpl(x.shape, alpha.shape, aVals, bVals, 'float32'), 2), resultData = _a[0], resultShape = _a[1];
+ return backend.makeTensorInfo(resultShape, 'float32', resultData);
+ }
+ var preluConfig = {
+ kernelName: tfjsCore.Prelu,
+ backendName: 'cpu',
+ kernelFunc: prelu,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var relu = unaryKernelFunc(tfjsCore.Relu, function (xi) { return Math.max(0, xi); });
+ var reluConfig = {
+ kernelName: tfjsCore.Relu,
+ backendName: 'cpu',
+ kernelFunc: relu,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var relu6 = unaryKernelFunc(tfjsCore.Relu6, function (xi) { return Math.min(Math.max(0, xi), 6); });
+ var relu6Config = {
+ kernelName: tfjsCore.Relu6,
+ backendName: 'cpu',
+ kernelFunc: relu6,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function applyActivation(backend, x, activation, preluActivationWeights, leakyreluAlpha) {
+ if (activation === 'linear') {
+ return identity({ inputs: { x: x }, backend: backend });
+ }
+ else if (activation === 'relu') {
+ return relu({ inputs: { x: x }, backend: backend });
+ }
+ else if (activation === 'elu') {
+ return elu({ inputs: { x: x }, backend: backend });
+ }
+ else if (activation === 'relu6') {
+ return relu6({ inputs: { x: x }, backend: backend });
+ }
+ else if (activation === 'prelu') {
+ return prelu({ inputs: { x: x, alpha: preluActivationWeights }, backend: backend });
+ }
+ else if (activation === 'leakyrelu') {
+ return leakyRelu({ inputs: { x: x }, backend: backend, attrs: { alpha: leakyreluAlpha } });
+ }
+ else if (activation === 'sigmoid') {
+ return sigmoid({ inputs: { x: x }, backend: backend });
+ }
+ throw new Error("Activation " + activation + " has not been implemented for the CPU backend.");
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function reshape(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var shape = attrs.shape;
+ var xSize = tfjsCore.util.sizeFromShape(x.shape);
+ var $shape = tfjsCore.util.inferFromImplicitShape(shape, xSize);
+ var $xSize = tfjsCore.util.sizeFromShape($shape);
+ tfjsCore.util.assert(xSize === $xSize, function () { return "The new shape (" + $shape + ") has " + $xSize + " elements and the old " +
+ ("shape (" + x.shape + ") has " + xSize + " elements. The new shape and old ") +
+ "shape must have the same number of elements."; });
+ backend.incRef(x.dataId);
+ var xData = backend.data.get(x.dataId);
+ if (xData.complexTensorInfos != null) {
+ var real = xData.complexTensorInfos.real;
+ var imag = xData.complexTensorInfos.imag;
+ real.shape = $shape;
+ imag.shape = $shape;
+ }
+ return { dataId: x.dataId, shape: $shape, dtype: x.dtype };
+ }
+ var reshapeConfig = {
+ kernelName: tfjsCore.Reshape,
+ backendName: 'cpu',
+ kernelFunc: reshape
+ };
+
+ function batchMatMul(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var a = inputs.a, b = inputs.b;
+ var transposeA = attrs.transposeA, transposeB = attrs.transposeB;
+ assertNotComplex([a, b], 'matMul');
+ var aRank = a.shape.length;
+ var bRank = b.shape.length;
+ var innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1];
+ var innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2];
+ var outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2];
+ var outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1];
+ var outerDimsA = a.shape.slice(0, -2);
+ var outerDimsB = b.shape.slice(0, -2);
+ var batchDimA = tfjsCore.util.sizeFromShape(outerDimsA);
+ var batchDimB = tfjsCore.util.sizeFromShape(outerDimsB);
+ var outShapeOuterDims = tfjsCore.broadcast_util.assertAndGetBroadcastShape(a.shape.slice(0, -2), b.shape.slice(0, -2));
+ var outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);
+ tfjsCore.util.assert(innerShapeA === innerShapeB, function () { return "Error in matMul: inner shapes (" + innerShapeA + ") and (" +
+ (innerShapeB + ") of Tensors with shapes " + a.shape + " and ") +
+ (b.shape + " and transposeA=" + transposeA) +
+ (" and transposeB=" + transposeB + " must match."); });
+ var a3dShape = transposeA ? [batchDimA, innerShapeA, outerShapeA] :
+ [batchDimA, outerShapeA, innerShapeA];
+ var b3dShape = transposeB ? [batchDimB, outerShapeB, innerShapeB] :
+ [batchDimB, innerShapeB, outerShapeB];
+ // The rest of the implementation is designed to operate on rank-3 tensors
+ var a3d = reshape({ inputs: { x: a }, backend: backend, attrs: { shape: a3dShape } });
+ var b3d = reshape({ inputs: { x: b }, backend: backend, attrs: { shape: b3dShape } });
+ var sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2];
+ var leftDim = transposeA ? a3d.shape[2] : a3d.shape[1];
+ var rightDim = transposeB ? b3d.shape[1] : b3d.shape[2];
+ var batchDim = Math.max(batchDimA, batchDimB);
+ var a3dValues = backend.data.get(a3d.dataId).values;
+ var b3dValues = backend.data.get(b3d.dataId).values;
+ var a3dStrides = tfjsCore.util.computeStrides(a3d.shape);
+ var b3dStrides = tfjsCore.util.computeStrides(b3d.shape);
+ var _a = __read(transposeA ?
+ [a3dStrides[0], 1, a3dStrides[1]] :
+ [a3dStrides[0], a3dStrides[1], 1], 3), aBatch = _a[0], aOuterStep = _a[1], aInnerStep = _a[2];
+ var _b = __read(transposeB ?
+ [1, b3dStrides[1], b3dStrides[0]] :
+ [b3dStrides[1], 1, b3dStrides[0]], 3), bInnerStep = _b[0], bOuterStep = _b[1], bBatch = _b[2];
+ var size = leftDim * rightDim;
+ var result = tfjsCore.buffer([batchDim, leftDim, rightDim], a3d.dtype);
+ var resVals = result.values;
+ var blockSize = backend.blockSize;
+ for (var bi = 0; bi < batchDim; bi++) {
+ for (var i0 = 0; i0 < leftDim; i0 += blockSize) {
+ for (var j0 = 0; j0 < rightDim; j0 += blockSize) {
+ for (var k0 = 0; k0 < sharedDim; k0 += blockSize) {
+ // for when blockSize doesn't evenly divide the input
+ var iBlock = Math.min(i0 + blockSize, leftDim);
+ var jBlock = Math.min(j0 + blockSize, rightDim);
+ var kBlock = Math.min(k0 + blockSize, sharedDim);
+ for (var i = i0; i < iBlock; i++) {
+ for (var j = j0; j < jBlock; j++) {
+ var sum = 0.0;
+ for (var k = k0; k < kBlock; k++) {
+ var batchOffsetA = Math.min(bi, batchDimA - 1) * aBatch;
+ var batchOffsetB = Math.min(bi, batchDimB - 1) * bBatch;
+ var aVal = a3dValues[batchOffsetA + i * aOuterStep + k * aInnerStep];
+ var bVal = b3dValues[k * bInnerStep + j * bOuterStep + batchOffsetB];
+ sum += aVal * bVal;
+ }
+ resVals[bi * size + (i * rightDim + j)] += sum;
+ }
+ }
+ }
+ }
+ }
+ }
+ backend.disposeIntermediateTensorInfo(a3d);
+ backend.disposeIntermediateTensorInfo(b3d);
+ // set correct shape on output.
+ return backend.makeTensorInfo(outShape, result.dtype, result.values);
+ }
+ var batchMatMulConfig = {
+ kernelName: tfjsCore.BatchMatMul,
+ backendName: 'cpu',
+ kernelFunc: batchMatMul,
+ };
+
+ function _fusedMatMul(args) {
+ var e_1, _a;
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var a = inputs.a, b = inputs.b, bias = inputs.bias, preluActivationWeights = inputs.preluActivationWeights;
+ var transposeA = attrs.transposeA, transposeB = attrs.transposeB, activation = attrs.activation, leakyreluAlpha = attrs.leakyreluAlpha;
+ var current;
+ var addRes;
+ var activationRes;
+ var intermediates = [];
+ var matMulRes = batchMatMul({ inputs: { a: a, b: b }, attrs: { transposeA: transposeA, transposeB: transposeB }, backend: backend });
+ current = matMulRes;
+ if (bias) {
+ addRes = add({ inputs: { a: current, b: bias }, backend: backend });
+ intermediates.push(current);
+ current = addRes;
+ }
+ if (activation) {
+ activationRes = applyActivation(backend, current, activation, preluActivationWeights, leakyreluAlpha);
+ intermediates.push(current);
+ current = activationRes;
+ }
+ try {
+ for (var intermediates_1 = __values(intermediates), intermediates_1_1 = intermediates_1.next(); !intermediates_1_1.done; intermediates_1_1 = intermediates_1.next()) {
+ var i = intermediates_1_1.value;
+ backend.disposeIntermediateTensorInfo(i);
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (intermediates_1_1 && !intermediates_1_1.done && (_a = intermediates_1.return)) _a.call(intermediates_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ return current;
+ }
+ var _fusedMatMulConfig = {
+ kernelName: tfjsCore._FusedMatMul,
+ backendName: 'cpu',
+ kernelFunc: _fusedMatMul,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var acos = unaryKernelFunc(tfjsCore.Acos, function (xi) { return Math.acos(xi); });
+ var acosConfig = {
+ kernelName: tfjsCore.Acos,
+ backendName: 'cpu',
+ kernelFunc: acos,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var acosh = unaryKernelFunc(tfjsCore.Acosh, function (xi) { return Math.acosh(xi); });
+ var acoshConfig = {
+ kernelName: tfjsCore.Acosh,
+ backendName: 'cpu',
+ kernelFunc: acosh,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function addN(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var tensors = inputs;
+ assertNotComplex(inputs, 'addN');
+ var vals = tensors.map(function (t) { return backend.data.get(t.dataId).values; });
+ var outBuf = tfjsCore.buffer(tensors[0].shape, tensors[0].dtype);
+ var outVals = outBuf.values;
+ for (var i = 0; i < tensors.length; i++) {
+ var currVals = vals[i];
+ for (var j = 0; j < outVals.length; j++) {
+ outVals[j] += currVals[j];
+ }
+ }
+ return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);
+ }
+ var addNConfig = {
+ kernelName: tfjsCore.AddN,
+ backendName: 'cpu',
+ kernelFunc: addN
+ };
+
+ function all(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, keepDims = attrs.keepDims;
+ assertNotComplex(x, 'all');
+ var origAxes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var axes = origAxes;
+ var permutedAxes = tfjsCore.backend_util.getAxesPermutation(axes, x.shape.length);
+ var $x = x;
+ if (permutedAxes != null) {
+ $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } });
+ axes = tfjsCore.backend_util.getInnerMostAxes(axes.length, x.shape.length);
+ }
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('all', axes, $x.shape.length);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes($x.shape, axes), 2), outShape = _a[0], reduceShape = _a[1];
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var vals = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(outShape), $x.dtype);
+ var aVals = backend.data.get($x.dataId).values;
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var all_1 = aVals[offset];
+ for (var j = 0; j < reduceSize; ++j) {
+ var value = aVals[offset + j];
+ all_1 = all_1 && value;
+ }
+ vals[i] = all_1;
+ }
+ if (permutedAxes != null) {
+ backend.disposeIntermediateTensorInfo($x);
+ }
+ var result = backend.makeTensorInfo(outShape, $x.dtype, vals);
+ if (keepDims) {
+ var expandedShape = tfjsCore.backend_util.expandShapeToKeepDim(outShape, origAxes);
+ var reshapedResult = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: expandedShape } });
+ backend.disposeIntermediateTensorInfo(result);
+ return reshapedResult;
+ }
+ return result;
+ }
+ var allConfig = {
+ kernelName: tfjsCore.All,
+ backendName: 'cpu',
+ kernelFunc: all
+ };
+
+ function any(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, keepDims = attrs.keepDims;
+ assertNotComplex(x, 'any');
+ var origAxes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var axes = origAxes;
+ var permutedAxes = tfjsCore.backend_util.getAxesPermutation(axes, x.shape.length);
+ var $x = x;
+ if (permutedAxes != null) {
+ $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } });
+ axes = tfjsCore.backend_util.getInnerMostAxes(axes.length, x.shape.length);
+ }
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('any', axes, $x.shape.length);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes($x.shape, axes), 2), outShape = _a[0], reduceShape = _a[1];
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var vals = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(outShape), $x.dtype);
+ var aVals = backend.data.get($x.dataId).values;
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var anyVal = aVals[offset];
+ for (var j = 0; j < reduceSize; ++j) {
+ var value = aVals[offset + j];
+ anyVal = anyVal || value;
+ }
+ vals[i] = anyVal;
+ }
+ if (permutedAxes != null) {
+ backend.disposeIntermediateTensorInfo($x);
+ }
+ var result = backend.makeTensorInfo(outShape, $x.dtype, vals);
+ if (keepDims) {
+ var expandedShape = tfjsCore.backend_util.expandShapeToKeepDim(outShape, origAxes);
+ var reshapedResult = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: expandedShape } });
+ backend.disposeIntermediateTensorInfo(result);
+ return reshapedResult;
+ }
+ return result;
+ }
+ var anyConfig = {
+ kernelName: tfjsCore.Any,
+ backendName: 'cpu',
+ kernelFunc: any
+ };
+
+ function argMax(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis;
+ assertNotComplex(x, 'argMax');
+ var axes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var permutedAxes = tfjsCore.backend_util.getAxesPermutation(axes, x.shape.length);
+ var $x = x;
+ var intermediateTensorInfos = [];
+ if (permutedAxes != null) {
+ $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } });
+ intermediateTensorInfos.push($x);
+ axes = tfjsCore.backend_util.getInnerMostAxes(axes.length, $x.shape.length);
+ }
+ axes = [axes[0]];
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('argMax', axes, $x.shape.length);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes($x.shape, axes), 2), outShape = _a[0], reduceShape = _a[1];
+ var outSize = tfjsCore.util.sizeFromShape(outShape);
+ var vals = tfjsCore.util.makeZerosTypedArray(outSize, 'int32');
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var aVals = backend.data.get($x.dataId).values;
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var max = aVals[offset];
+ var maxIndex = 0;
+ for (var j = 0; j < reduceSize; ++j) {
+ var value = aVals[offset + j];
+ if (value > max) {
+ max = value;
+ maxIndex = j;
+ }
+ }
+ vals[i] = maxIndex;
+ }
+ intermediateTensorInfos.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return backend.makeTensorInfo(outShape, 'int32', vals);
+ }
+ var argMaxConfig = {
+ kernelName: tfjsCore.ArgMax,
+ backendName: 'cpu',
+ kernelFunc: argMax
+ };
+
+ function argMin(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis;
+ assertNotComplex(x, 'argMin');
+ var axes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var permutedAxes = tfjsCore.backend_util.getAxesPermutation(axes, x.shape.length);
+ var $x = x;
+ var intermediateTensorInfos = [];
+ if (permutedAxes != null) {
+ $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } });
+ intermediateTensorInfos.push($x);
+ axes = tfjsCore.backend_util.getInnerMostAxes(axes.length, $x.shape.length);
+ }
+ axes = [axes[0]];
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('argMin', axes, $x.shape.length);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes($x.shape, axes), 2), outShape = _a[0], reduceShape = _a[1];
+ var outSize = tfjsCore.util.sizeFromShape(outShape);
+ var vals = tfjsCore.util.makeZerosTypedArray(outSize, 'int32');
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var aVals = backend.data.get($x.dataId).values;
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var min = aVals[offset];
+ var minIndex = 0;
+ for (var j = 0; j < reduceSize; ++j) {
+ var value = aVals[offset + j];
+ if (value < min) {
+ min = value;
+ minIndex = j;
+ }
+ }
+ vals[i] = minIndex;
+ }
+ intermediateTensorInfos.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return backend.makeTensorInfo(outShape, 'int32', vals);
+ }
+ var argMinConfig = {
+ kernelName: tfjsCore.ArgMin,
+ backendName: 'cpu',
+ kernelFunc: argMin
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var asin = unaryKernelFunc(tfjsCore.Asin, function (xi) { return Math.asin(xi); });
+ var asinConfig = {
+ kernelName: tfjsCore.Asin,
+ backendName: 'cpu',
+ kernelFunc: asin,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var asinh = unaryKernelFunc(tfjsCore.Asinh, function (xi) { return Math.asinh(xi); });
+ var asinhConfig = {
+ kernelName: tfjsCore.Asinh,
+ backendName: 'cpu',
+ kernelFunc: asinh,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var atan = unaryKernelFunc(tfjsCore.Atan, function (xi) { return Math.atan(xi); });
+ var atanConfig = {
+ kernelName: tfjsCore.Atan,
+ backendName: 'cpu',
+ kernelFunc: atan,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var atan2Impl = createSimpleBinaryKernelImpl(function (aValue, bValue) { return Math.atan2(aValue, bValue); });
+ var atan2 = binaryKernelFunc(tfjsCore.Atan2, atan2Impl);
+ var atan2Config = {
+ kernelName: tfjsCore.Atan2,
+ backendName: 'cpu',
+ kernelFunc: atan2,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var atanh = unaryKernelFunc(tfjsCore.Atanh, function (xi) { return Math.atanh(xi); });
+ var atanhConfig = {
+ kernelName: tfjsCore.Atanh,
+ backendName: 'cpu',
+ kernelFunc: atanh,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function pool(xValues, xShape, dtype, strides, convInfo, poolType) {
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padTop = convInfo.padInfo.top;
+ var padLeft = convInfo.padInfo.left;
+ var initialValue = (poolType === 'max' ? Number.NEGATIVE_INFINITY :
+ Number.POSITIVE_INFINITY);
+ var output = tfjsCore.buffer(convInfo.outShape, dtype);
+ var outputVals = output.values;
+ var outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] * convInfo.outShape[3];
+ var outputRowStrides = convInfo.outShape[2] * convInfo.outShape[3];
+ var outputColStrides = convInfo.outShape[3];
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ var outputBatchOffset = b * outputBatchStrides;
+ var inputBatchOffset = b * strides[0];
+ for (var d = 0; d < convInfo.inChannels; ++d) {
+ for (var yR = 0; yR < convInfo.outHeight; ++yR) {
+ var xRCorner = yR * strideHeight - padTop;
+ var xRMin = Math.max(0, xRCorner);
+ var xRMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner);
+ var outputRowOffset = outputBatchOffset + yR * outputRowStrides;
+ for (var yC = 0; yC < convInfo.outWidth; ++yC) {
+ var xCCorner = yC * strideWidth - padLeft;
+ var xCMin = Math.max(0, xCCorner);
+ var xCMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner);
+ var minMaxValue = initialValue;
+ var avgValue = 0;
+ var count = 0;
+ for (var xR = xRMin; xR < xRMax; xR += dilationHeight) {
+ var xROffset = inputBatchOffset + xR * strides[1];
+ for (var xC = xCMin; xC < xCMax; xC += dilationWidth) {
+ var xCOffset = xROffset + xC * strides[2];
+ var pixel = xValues[xCOffset + d];
+ if ((poolType === 'max' && pixel > minMaxValue)) {
+ minMaxValue = pixel;
+ }
+ else if (poolType === 'avg') {
+ avgValue += pixel;
+ count++;
+ }
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ var outputOffset = outputRowOffset + yC * outputColStrides + d;
+ outputVals[outputOffset] =
+ poolType === 'avg' ? avgValue / count : minMaxValue;
+ }
+ }
+ }
+ }
+ return output;
+ }
+ function maxPoolPositions(xValues, xShape, dtype, convInfo, flattenPositions, includeBatchInIndex) {
+ if (flattenPositions === void 0) { flattenPositions = false; }
+ if (includeBatchInIndex === void 0) { includeBatchInIndex = false; }
+ var maxPositions = tfjsCore.buffer(convInfo.outShape, 'int32');
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padTop = convInfo.padInfo.top;
+ var padLeft = convInfo.padInfo.left;
+ var xBuf = tfjsCore.buffer(xShape, dtype, xValues);
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ for (var d = 0; d < convInfo.inChannels; ++d) {
+ for (var yR = 0; yR < convInfo.outHeight; ++yR) {
+ var xRCorner = yR * strideHeight - padTop;
+ var xRMin = xRCorner;
+ while (xRMin < 0) {
+ xRMin += dilationHeight;
+ }
+ // const xRMin = Math.max(0, xRCorner);
+ var xRMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner);
+ for (var yC = 0; yC < convInfo.outWidth; ++yC) {
+ var xCCorner = yC * strideWidth - padLeft;
+ var xCMin = xCCorner;
+ while (xCMin < 0) {
+ xCMin += dilationWidth;
+ }
+ var xCMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner);
+ var maxValue = Number.NEGATIVE_INFINITY;
+ var maxPosition = -1;
+ for (var xR = xRMin; xR < xRMax; xR += dilationHeight) {
+ var wR = xR - xRCorner;
+ for (var xC = xCMin; xC < xCMax; xC += dilationWidth) {
+ var wC = xC - xCCorner;
+ var pixel = xBuf.get(b, xR, xC, d);
+ if (pixel > maxValue) {
+ maxValue = pixel;
+ if (flattenPositions) {
+ maxPosition = includeBatchInIndex ?
+ ((b * convInfo.inHeight + xR) * convInfo.inWidth + xC) *
+ convInfo.inChannels +
+ d :
+ (xR * convInfo.inWidth + xC) * convInfo.inChannels + d;
+ }
+ else {
+ maxPosition = wR * effectiveFilterWidth + wC;
+ }
+ }
+ }
+ }
+ maxPositions.set(maxPosition, b, yR, yC, d);
+ }
+ }
+ }
+ }
+ return maxPositions;
+ }
+ function pool3d(xValues, xShape, dtype, strides, convInfo, poolType) {
+ var strideDepth = convInfo.strideDepth;
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var dilationDepth = convInfo.dilationDepth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padFront = convInfo.padInfo.front;
+ var padTop = convInfo.padInfo.top;
+ var padLeft = convInfo.padInfo.left;
+ var initialValue = (poolType === 'max' ? Number.NEGATIVE_INFINITY :
+ Number.POSITIVE_INFINITY);
+ var output = tfjsCore.buffer(convInfo.outShape, dtype);
+ var outputVals = output.values;
+ var outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] *
+ convInfo.outShape[3] * convInfo.outShape[4];
+ var outputDepthStrides = convInfo.outShape[2] * convInfo.outShape[3] * convInfo.outShape[4];
+ var outputRowStrides = convInfo.outShape[3] * convInfo.outShape[4];
+ var outputColStrides = convInfo.outShape[4];
+ for (var batch = 0; batch < convInfo.batchSize; ++batch) {
+ var outputBatchOffset = batch * outputBatchStrides;
+ var inputBatchOffset = batch * strides[0];
+ for (var channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (var yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) {
+ var xDepthCorner = yDepth * strideDepth - padFront;
+ var xDepthMin = xDepthCorner;
+ while (xDepthMin < 0) {
+ xDepthMin += dilationDepth;
+ }
+ var xDepthMax = Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner);
+ var outputDepthOffset = outputBatchOffset + yDepth * outputDepthStrides;
+ for (var yRow = 0; yRow < convInfo.outHeight; ++yRow) {
+ var xRowCorner = yRow * strideHeight - padTop;
+ var xRowMin = xRowCorner;
+ while (xRowMin < 0) {
+ xRowMin += dilationHeight;
+ }
+ var xRowMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner);
+ var outputRowOffset = outputDepthOffset + yRow * outputRowStrides;
+ for (var yCol = 0; yCol < convInfo.outWidth; ++yCol) {
+ var xColCorner = yCol * strideWidth - padLeft;
+ var xColMin = xColCorner;
+ while (xColMin < 0) {
+ xColMin += dilationWidth;
+ }
+ var xColMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner);
+ // Shader code begins
+ var outputColOffset = outputRowOffset + yCol * outputColStrides;
+ var minMaxValue = initialValue;
+ var avgValue = 0;
+ var count = 0;
+ for (var xDepth = xDepthMin; xDepth < xDepthMax; xDepth += dilationDepth) {
+ var xDepthOffset = inputBatchOffset + xDepth * strides[1];
+ for (var xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) {
+ var xRowOffset = xDepthOffset + xRow * strides[2];
+ for (var xCol = xColMin; xCol < xColMax; xCol += dilationWidth) {
+ var xColOffset = xRowOffset + xCol * strides[3];
+ var pixel = xValues[xColOffset + channel];
+ if ((poolType === 'max' && pixel > minMaxValue)) {
+ minMaxValue = pixel;
+ }
+ else if (poolType === 'avg') {
+ avgValue += pixel;
+ count++;
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ var outputOffset = outputColOffset + channel;
+ outputVals[outputOffset] =
+ poolType === 'avg' ? avgValue / count : minMaxValue;
+ }
+ }
+ }
+ }
+ }
+ return output;
+ }
+ function maxPool3dPositions(xBuf, convInfo) {
+ var maxPositions = tfjsCore.buffer(convInfo.outShape, 'int32');
+ var strideDepth = convInfo.strideDepth;
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var dilationDepth = convInfo.dilationDepth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padFront = convInfo.padInfo.front;
+ var padTop = convInfo.padInfo.top;
+ var padLeft = convInfo.padInfo.left;
+ for (var batch = 0; batch < convInfo.batchSize; ++batch) {
+ for (var channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (var yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) {
+ var xDepthCorner = yDepth * strideDepth - padFront;
+ var xDepthMin = xDepthCorner;
+ while (xDepthMin < 0) {
+ xDepthMin += dilationDepth;
+ }
+ var xDepthMax = Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner);
+ for (var yRow = 0; yRow < convInfo.outHeight; ++yRow) {
+ var xRowCorner = yRow * strideHeight - padTop;
+ var xRowMin = xRowCorner;
+ while (xRowMin < 0) {
+ xRowMin += dilationHeight;
+ }
+ var xRowMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner);
+ for (var yCol = 0; yCol < convInfo.outWidth; ++yCol) {
+ var xColCorner = yCol * strideWidth - padLeft;
+ var xColMin = xColCorner;
+ while (xColMin < 0) {
+ xColMin += dilationWidth;
+ }
+ var xColMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner);
+ // Shader code begins
+ var maxValue = Number.NEGATIVE_INFINITY;
+ var maxPosition = -1;
+ for (var xDepth = xDepthMin; xDepth < xDepthMax; xDepth += dilationDepth) {
+ var wDepth = xDepth - xDepthCorner;
+ for (var xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) {
+ var wRow = xRow - xRowCorner;
+ for (var xCol = xColMin; xCol < xColMax; xCol += dilationWidth) {
+ var wCol = xCol - xColCorner;
+ var pixel = xBuf.get(batch, xDepth, xRow, xCol, channel);
+ if (pixel >= maxValue) {
+ maxValue = pixel;
+ maxPosition =
+ wDepth * effectiveFilterHeight * effectiveFilterWidth +
+ wRow * effectiveFilterHeight + wCol;
+ }
+ }
+ }
+ }
+ maxPositions.set(maxPosition, batch, yDepth, yRow, yCol, channel);
+ }
+ }
+ }
+ }
+ }
+ return maxPositions;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function avgPool(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ assertNotComplex(x, 'avgPool');
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode;
+ var dilations = 1;
+ tfjsCore.util.assert(tfjsCore.backend_util.eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in avgPool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var convInfo = tfjsCore.backend_util.computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode);
+ var res;
+ if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&
+ tfjsCore.util.arraysEqual(convInfo.inShape, convInfo.outShape)) {
+ res = identity({ inputs: { x: x }, backend: backend });
+ }
+ else {
+ var xValues = backend.data.get(x.dataId).values;
+ var strides_1 = tfjsCore.util.computeStrides(x.shape);
+ var buffer = pool(xValues, x.shape, x.dtype, strides_1, convInfo, 'avg');
+ res = backend.makeTensorInfo(convInfo.outShape, x.dtype, buffer.values);
+ }
+ return res;
+ }
+ var avgPoolConfig = {
+ kernelName: tfjsCore.AvgPool,
+ backendName: 'cpu',
+ kernelFunc: avgPool
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function avgPool3D(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode, dataFormat = attrs.dataFormat;
+ assertNotComplex(x, 'avgPool3d');
+ var convInfo = tfjsCore.backend_util.computePool3DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode, dataFormat);
+ var xValues = backend.data.get(x.dataId).values;
+ var outBuf = pool3d(xValues, x.shape, x.dtype, tfjsCore.util.computeStrides(x.shape), convInfo, 'avg');
+ return backend.makeTensorInfo(outBuf.shape, 'float32', outBuf.values);
+ }
+ var avgPool3DConfig = {
+ kernelName: tfjsCore.AvgPool3D,
+ backendName: 'cpu',
+ kernelFunc: avgPool3D
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function avgPool3DGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, input = inputs.input;
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode;
+ assertNotComplex([dy, input], 'avgPool3DGrad');
+ var convInfo = tfjsCore.backend_util.computePool3DInfo(input.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode);
+ var strideDepth = convInfo.strideDepth;
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var filterDepth = convInfo.filterDepth;
+ var filterHeight = convInfo.filterHeight;
+ var filterWidth = convInfo.filterWidth;
+ var dilationDepth = convInfo.dilationDepth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;
+ var padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ var padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ var dx = tfjsCore.buffer(input.shape, 'float32');
+ var avgMultiplier = 1 / (filterDepth * filterHeight * filterWidth);
+ var dyBuf = backend.bufferSync(dy);
+ for (var batch = 0; batch < convInfo.batchSize; ++batch) {
+ for (var channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (var dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) {
+ for (var dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) {
+ for (var dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) {
+ // Shader code begins.
+ var dyDepthCorner = dxDepth - padFront;
+ var dyRowCorner = dxRow - padTop;
+ var dyColCorner = dxCol - padLeft;
+ var dotProd = 0;
+ for (var wDepth = 0; wDepth < effectiveFilterDepth; wDepth += dilationDepth) {
+ var dyDepth = (dyDepthCorner + wDepth) / strideDepth;
+ if (dyDepth < 0 || dyDepth >= convInfo.outDepth ||
+ Math.floor(dyDepth) !== dyDepth) {
+ continue;
+ }
+ for (var wRow = 0; wRow < effectiveFilterHeight; wRow += dilationHeight) {
+ var dyRow = (dyRowCorner + wRow) / strideHeight;
+ if (dyRow < 0 || dyRow >= convInfo.outHeight ||
+ Math.floor(dyRow) !== dyRow) {
+ continue;
+ }
+ for (var wCol = 0; wCol < effectiveFilterWidth; wCol += dilationWidth) {
+ var dyCol = (dyColCorner + wCol) / strideWidth;
+ if (dyCol < 0 || dyCol >= convInfo.outWidth ||
+ Math.floor(dyCol) !== dyCol) {
+ continue;
+ }
+ var pixel = dyBuf.get(batch, dyDepth, dyRow, dyCol, channel);
+ dotProd += pixel;
+ }
+ }
+ }
+ dx.set(dotProd * avgMultiplier, batch, dxDepth, dxRow, dxCol, channel);
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var avgPool3DGradConfig = {
+ kernelName: tfjsCore.AvgPool3DGrad,
+ backendName: 'cpu',
+ kernelFunc: avgPool3DGrad
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function avgPoolGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, input = inputs.input;
+ var x = input;
+ assertNotComplex([dy, input], 'avgPoolGrad');
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad;
+ var convInfo = tfjsCore.backend_util.computePool2DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad);
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var filterHeight = convInfo.filterHeight;
+ var filterWidth = convInfo.filterWidth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ var padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ var dx = tfjsCore.buffer(x.shape, 'float32');
+ var avgMultiplier = 1 / (filterHeight * filterWidth);
+ var dyData = backend.data.get(dy.dataId).values;
+ var dyBuf = tfjsCore.buffer(dy.shape, 'float32', dyData);
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ for (var d = 0; d < convInfo.inChannels; ++d) {
+ for (var dxR = 0; dxR < convInfo.inHeight; ++dxR) {
+ for (var dxC = 0; dxC < convInfo.inWidth; ++dxC) {
+ // Shader code begins.
+ var dyRCorner = dxR - padTop;
+ var dyCCorner = dxC - padLeft;
+ var dotProd = 0;
+ for (var wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
+ var dyR = (dyRCorner + wR) / strideHeight;
+ if (dyR < 0 || dyR >= convInfo.outHeight ||
+ Math.floor(dyR) !== dyR) {
+ continue;
+ }
+ for (var wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
+ var dyC = (dyCCorner + wC) / strideWidth;
+ if (dyC < 0 || dyC >= convInfo.outWidth ||
+ Math.floor(dyC) !== dyC) {
+ continue;
+ }
+ var pixel = dyBuf.get(b, dyR, dyC, d);
+ dotProd += pixel;
+ }
+ }
+ dx.set(dotProd * avgMultiplier, b, dxR, dxC, d);
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var avgPoolGradConfig = {
+ kernelName: tfjsCore.AvgPoolGrad,
+ backendName: 'cpu',
+ kernelFunc: avgPoolGrad
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function batchNorm(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, scale = inputs.scale, offset = inputs.offset, mean = inputs.mean, variance = inputs.variance;
+ tfjsCore.util.assert(mean.shape.length === variance.shape.length, function () { return 'Batch normalization gradient requires mean and variance to have ' +
+ 'equal ranks.'; });
+ tfjsCore.util.assert(offset == null || mean.shape.length === offset.shape.length, function () { return 'Batch normalization gradient requires mean and offset to have ' +
+ 'equal ranks.'; });
+ tfjsCore.util.assert(scale == null || mean.shape.length === scale.shape.length, function () { return 'Batch normalization gradient requires mean and scale to have ' +
+ 'equal ranks.'; });
+ assertNotComplex([x, mean, variance, scale, offset], 'batchNorm');
+ var varianceEpsilon = attrs.varianceEpsilon;
+ if (varianceEpsilon == null) {
+ varianceEpsilon = 0.001;
+ }
+ var xVals = backend.data.get(x.dataId).values;
+ var mVals = backend.data.get(mean.dataId).values;
+ var varVals = backend.data.get(variance.dataId).values;
+ var sVals = scale ? backend.data.get(scale.dataId).values :
+ new Float32Array([1]);
+ var offVals = offset ?
+ backend.data.get(offset.dataId).values :
+ new Float32Array([0]);
+ var outVals = new Float32Array(xVals.length);
+ var offValsLength = offVals.length;
+ var sValsLength = sVals.length;
+ var varValsLength = varVals.length;
+ var mValsLength = mVals.length;
+ var offi = 0;
+ var mi = 0;
+ var si = 0;
+ var vi = 0;
+ for (var i = 0; i < xVals.length; ++i) {
+ outVals[i] = offVals[offi++] +
+ (xVals[i] - mVals[mi++]) * sVals[si++] /
+ Math.sqrt(varVals[vi++] + varianceEpsilon);
+ if (offi >= offValsLength) {
+ offi = 0;
+ }
+ if (mi >= mValsLength) {
+ mi = 0;
+ }
+ if (si >= sValsLength) {
+ si = 0;
+ }
+ if (vi >= varValsLength) {
+ vi = 0;
+ }
+ }
+ return backend.makeTensorInfo(x.shape, x.dtype, outVals);
+ }
+ var batchNormConfig = {
+ kernelName: tfjsCore.FusedBatchNorm,
+ backendName: 'cpu',
+ kernelFunc: batchNorm,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function batchToSpaceND(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var blockShape = attrs.blockShape, crops = attrs.crops;
+ assertNotComplex([x], 'batchToSpaceND');
+ var prod = blockShape.reduce(function (a, b) { return a * b; });
+ var reshaped = tfjsCore.backend_util.getReshaped(x.shape, blockShape, prod);
+ var permuted = tfjsCore.backend_util.getPermuted(reshaped.length, blockShape.length);
+ var reshapedPermuted = tfjsCore.backend_util.getReshapedPermuted(x.shape, blockShape, prod);
+ var sliceBeginCoords = tfjsCore.backend_util.getSliceBeginCoords(crops, blockShape.length);
+ var sliceSize = tfjsCore.backend_util.getSliceSize(reshapedPermuted, crops, blockShape.length);
+ var xReshaped = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: reshaped } });
+ var xTransposed = transpose({ inputs: { x: xReshaped }, backend: backend, attrs: { perm: permuted } });
+ var xTransposedReshaped = reshape({ inputs: { x: xTransposed }, backend: backend, attrs: { shape: reshapedPermuted } });
+ var result = slice({
+ inputs: { x: xTransposedReshaped },
+ backend: backend,
+ attrs: { begin: sliceBeginCoords, size: sliceSize }
+ });
+ backend.disposeIntermediateTensorInfo(xReshaped);
+ backend.disposeIntermediateTensorInfo(xTransposed);
+ backend.disposeIntermediateTensorInfo(xTransposedReshaped);
+ return result;
+ }
+ var batchToSpaceNDConfig = {
+ kernelName: tfjsCore.BatchToSpaceND,
+ backendName: 'cpu',
+ kernelFunc: batchToSpaceND
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function bincount(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, weights = inputs.weights;
+ var size = attrs.size;
+ var xVals = backend.data.get(x.dataId).values;
+ var weightsVals = backend.data.get(weights.dataId).values;
+ var outVals = bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size);
+ return backend.makeTensorInfo([size], weights.dtype, outVals);
+ }
+ var bincountConfig = {
+ kernelName: tfjsCore.Bincount,
+ backendName: 'cpu',
+ kernelFunc: bincount
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function broadcastArgs(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var s0 = inputs.s0, s1 = inputs.s1;
+ var s0Vals = backend.data.get(s0.dataId).values;
+ var s1Vals = backend.data.get(s1.dataId).values;
+ var broadcastShape = tfjsCore.backend_util.assertAndGetBroadcastShape(Array.from(s0Vals), Array.from(s1Vals));
+ return backend.makeTensorInfo([broadcastShape.length], 'int32', Int32Array.from(broadcastShape));
+ }
+ var broadcastArgsConfig = {
+ kernelName: tfjsCore.BroadcastArgs,
+ backendName: 'cpu',
+ kernelFunc: broadcastArgs
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var clip = unaryKernelFunc(tfjsCore.ClipByValue, function (xi, attrs) {
+ var clipAttrs = attrs;
+ if (xi > clipAttrs.clipValueMax) {
+ return clipAttrs.clipValueMax;
+ }
+ return xi < clipAttrs.clipValueMin ? clipAttrs.clipValueMin : xi;
+ });
+ var clipConfig = {
+ kernelName: tfjsCore.ClipByValue,
+ backendName: 'cpu',
+ kernelFunc: clip,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var complexAbs = function (args) {
+ var x = args.inputs.x;
+ var cpuBackend = args.backend;
+ var resultValues = new Float32Array(tfjsCore.util.sizeFromShape(x.shape));
+ var complexVals = cpuBackend.data.get(x.dataId);
+ var real = complexVals.complexTensorInfos.real;
+ var imag = complexVals.complexTensorInfos.imag;
+ var realVals = cpuBackend.data.get(real.dataId).values;
+ var imagVals = cpuBackend.data.get(imag.dataId).values;
+ for (var i = 0; i < realVals.length; i++) {
+ var real_1 = realVals[i];
+ var imag_1 = imagVals[i];
+ resultValues[i] = Math.hypot(real_1, imag_1);
+ }
+ return cpuBackend.makeOutput(resultValues, x.shape, 'float32');
+ };
+ var complexAbsConfig = {
+ kernelName: tfjsCore.ComplexAbs,
+ backendName: 'cpu',
+ kernelFunc: complexAbs,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function imag(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var input = inputs.input;
+ var imag = backend.data.get(input.dataId).complexTensorInfos.imag;
+ var imagVal = backend.data.get(imag.dataId).values;
+ // When complex tensor is disposed, its underlying parts will be disposed too.
+ // Make new tensor out of the imag value of the complex. This makes sure the
+ // value is still accessible even if complex tensor is disposed.
+ return backend.makeTensorInfo(imag.shape, imag.dtype, imagVal);
+ }
+ var imagConfig = {
+ kernelName: tfjsCore.Imag,
+ backendName: 'cpu',
+ kernelFunc: imag
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function concat(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var axis = attrs.axis;
+ var $axis = tfjsCore.util.parseAxisParam(axis, inputs[0].shape)[0];
+ var outShape = tfjsCore.backend_util.computeOutShape(inputs.map(function (t) { return t.shape; }), $axis);
+ if (tfjsCore.util.sizeFromShape(outShape) === 0) {
+ return backend.makeTensorInfo(outShape, inputs[0].dtype, []);
+ }
+ // Keep only non-empty tensors (ignore tensors with 0 in their shape).
+ var $inputs = inputs.filter(function (t) { return tfjsCore.util.sizeFromShape(t.shape) > 0; });
+ if ($inputs.length === 1) {
+ return identity({ inputs: { x: $inputs[0] }, backend: backend });
+ }
+ var shapes = $inputs.map(function (t) { return t.shape; });
+ tfjsCore.backend_util.assertParamsConsistent(shapes, $axis);
+ if ($inputs[0].dtype === 'complex64') {
+ var reals = $inputs.map(function (t) { return real({ inputs: { input: t }, backend: backend }); });
+ var imags = $inputs.map(function (t) { return imag({ inputs: { input: t }, backend: backend }); });
+ var realConcated = concat({ inputs: reals, backend: backend, attrs: { axis: $axis } });
+ var imagConcated = concat({ inputs: imags, backend: backend, attrs: { axis: $axis } });
+ var result = complex({ inputs: { real: realConcated, imag: imagConcated }, backend: backend });
+ reals.forEach(function (r) { return backend.disposeIntermediateTensorInfo(r); });
+ imags.forEach(function (i) { return backend.disposeIntermediateTensorInfo(i); });
+ backend.disposeIntermediateTensorInfo(realConcated);
+ backend.disposeIntermediateTensorInfo(imagConcated);
+ return result;
+ }
+ // Any concat of n-dimensional tensors across any axis can be reduced to
+ // a concatenation of two-dimensional tensors across the axis 1 by first
+ // partitioning the axes of the original tensors into those less than the
+ // axis to be concatenated and the rest. Then reshape the tensors
+ // into a two-dimensional tensor by collapsing these two sets of axes and
+ // concatenate the resulting matrices across the axis 1, finally reshaping
+ // the result to have the proper shape.
+ var inputs2D = $inputs.map(function (t) {
+ var innerSize = tfjsCore.util.sizeFromShape(t.shape.slice($axis));
+ var shape = [-1, innerSize];
+ return reshape({ inputs: { x: t }, backend: backend, attrs: { shape: shape } });
+ });
+ var inputsValShapes = inputs2D.map(function (t) {
+ return { vals: backend.data.get(t.dataId).values, shape: t.shape };
+ });
+ // Concats 2d tensors along axis=1.
+ outShape =
+ tfjsCore.backend_util.computeOutShape(inputs2D.map(function (t) { return t.shape; }), 1 /* axis */);
+ var simplyConcat = inputs2D[0].shape[0] === 1;
+ var outVals = concatImpl(inputsValShapes, outShape, inputs[0].dtype, simplyConcat);
+ var finalOutShape = tfjsCore.backend_util.computeOutShape($inputs.map(function (t) { return t.shape; }), $axis);
+ var outInfo = backend.makeTensorInfo(finalOutShape, inputs[0].dtype, outVals);
+ inputs2D.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return outInfo;
+ }
+ var concatConfig = {
+ kernelName: tfjsCore.Concat,
+ backendName: 'cpu',
+ kernelFunc: concat
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function conv2D(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, filter = inputs.filter;
+ var strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode;
+ assertNotComplex([x, filter], 'conv2d');
+ var $dataFormat = tfjsCore.backend_util.convertConv2DDataFormat(dataFormat);
+ var convInfo = tfjsCore.backend_util.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad, dimRoundingMode, false /* depthwise */, $dataFormat);
+ var filterHeight = convInfo.filterHeight;
+ var filterWidth = convInfo.filterWidth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var padLeft = convInfo.padInfo.left;
+ var padTop = convInfo.padInfo.top;
+ var isChannelsLast = convInfo.dataFormat === 'channelsLast';
+ var y = new tfjsCore.TensorBuffer(convInfo.outShape, x.dtype);
+ var xStrides = tfjsCore.util.computeStrides(x.shape);
+ var filterStrides = tfjsCore.util.computeStrides(filter.shape);
+ var xBatchStride = xStrides[0];
+ var xRowStride = isChannelsLast ? xStrides[1] : xStrides[2];
+ var xColStride = isChannelsLast ? xStrides[2] : 1;
+ var xChannelStride = isChannelsLast ? 1 : xStrides[1];
+ var yBatchStride = y.strides[0];
+ var yRowStride = isChannelsLast ? y.strides[1] : y.strides[2];
+ var yColStride = isChannelsLast ? y.strides[2] : 1;
+ var yChannelStride = isChannelsLast ? 1 : y.strides[1];
+ var xVals = backend.data.get(x.dataId).values;
+ var wVals = backend.data.get(filter.dataId).values;
+ var yVals = y.values;
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ var xOffset1 = b * xBatchStride;
+ var yOffset1 = b * yBatchStride;
+ for (var yR = 0; yR < convInfo.outHeight; ++yR) {
+ var yOffset2 = yOffset1 + yR * yRowStride;
+ var xRCorner = yR * convInfo.strideHeight - padTop;
+ for (var wR = 0; wR < filterHeight; ++wR) {
+ var xR = xRCorner + wR * dilationHeight;
+ if (xR < 0 || xR >= convInfo.inHeight) {
+ continue;
+ }
+ var wOffset1 = wR * filterStrides[0];
+ var xOffset2 = xOffset1 + xR * xRowStride;
+ for (var yC = 0; yC < convInfo.outWidth; ++yC) {
+ var yOffset3 = yOffset2 + yC * yColStride;
+ var xCCorner = yC * convInfo.strideWidth - padLeft;
+ for (var wC = 0; wC < filterWidth; ++wC) {
+ var xC = xCCorner + wC * dilationWidth;
+ if (xC < 0 || xC >= convInfo.inWidth) {
+ continue;
+ }
+ var wOffset2 = wOffset1 + wC * filterStrides[1];
+ var xOffset3 = xOffset2 + xC * xColStride;
+ var wOffset3 = wOffset2;
+ for (var d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ var xVal = xVals[xOffset3 + d1 * xChannelStride];
+ for (var d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ yVals[yOffset3 + d2 * yChannelStride] +=
+ xVal * wVals[wOffset3 + d2];
+ }
+ wOffset3 += convInfo.outChannels;
+ }
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(y.shape, y.dtype, yVals);
+ }
+ var conv2DConfig = {
+ kernelName: tfjsCore.Conv2D,
+ backendName: 'cpu',
+ kernelFunc: conv2D
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function conv2DBackpropFilter(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, dy = inputs.dy;
+ var strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dimRoundingMode = attrs.dimRoundingMode, filterShape = attrs.filterShape;
+ assertNotComplex([x, dy], 'conv2dBackpropFilter');
+ var $dataFormat = tfjsCore.backend_util.convertConv2DDataFormat(dataFormat);
+ var convInfo = tfjsCore.backend_util.computeConv2DInfo(x.shape, filterShape, strides, 1 /* dilations */, pad, dimRoundingMode, false /* depthwise */, $dataFormat);
+ var strideHeight = convInfo.strideHeight, strideWidth = convInfo.strideWidth, filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth;
+ var isChannelsLast = convInfo.dataFormat === 'channelsLast';
+ var dW = new tfjsCore.TensorBuffer(convInfo.filterShape, 'float32');
+ var leftPad = convInfo.padInfo.left;
+ var topPad = convInfo.padInfo.top;
+ var xVals = backend.data.get(x.dataId).values;
+ var dyVals = backend.data.get(dy.dataId).values;
+ var xBuf = new tfjsCore.TensorBuffer(x.shape, x.dtype, xVals);
+ var dyBuf = new tfjsCore.TensorBuffer(dy.shape, dy.dtype, dyVals);
+ for (var wR = 0; wR < filterHeight; ++wR) {
+ var yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));
+ var yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);
+ for (var wC = 0; wC < filterWidth; ++wC) {
+ var yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));
+ var yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);
+ for (var d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ for (var d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ var dotProd = 0;
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ for (var yR = yRMin; yR < yRMax; ++yR) {
+ var xR = wR + yR * strideHeight - topPad;
+ for (var yC = yCMin; yC < yCMax; ++yC) {
+ var xC = wC + yC * strideWidth - leftPad;
+ if (isChannelsLast) {
+ dotProd += xBuf.get(b, xR, xC, d1) *
+ dyBuf.get(b, yR, yC, d2);
+ }
+ else {
+ dotProd += xBuf.get(b, d1, xR, xC) *
+ dyBuf.get(b, d2, yR, yC);
+ }
+ }
+ }
+ }
+ dW.set(dotProd, wR, wC, d1, d2);
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dW.shape, dW.dtype, dW.values);
+ }
+ var conv2DBackpropFilterConfig = {
+ kernelName: tfjsCore.Conv2DBackpropFilter,
+ backendName: 'cpu',
+ kernelFunc: conv2DBackpropFilter
+ };
+
+ function conv2DBackpropInput(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, filter = inputs.filter;
+ var inputShape = attrs.inputShape, strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dimRoundingMode = attrs.dimRoundingMode;
+ assertNotComplex([dy, filter], 'conv2dBackpropInput');
+ var filterStrides = tfjsCore.util.computeStrides(filter.shape);
+ var dyStrides = tfjsCore.util.computeStrides(dy.shape);
+ var $dataFormat = tfjsCore.backend_util.convertConv2DDataFormat(dataFormat);
+ var convInfo = tfjsCore.backend_util.computeConv2DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat);
+ var dx = new tfjsCore.TensorBuffer(convInfo.inShape, 'float32');
+ var dxValues = dx.values;
+ var dyValues = backend.data.get(dy.dataId).values;
+ var fltValues = backend.data.get(filter.dataId).values;
+ var _a = __read(filterStrides, 3), fltS0 = _a[0], fltS1 = _a[1], fltS2 = _a[2];
+ var batchSize = convInfo.batchSize, filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth, inChannels = convInfo.inChannels, inHeight = convInfo.inHeight, inWidth = convInfo.inWidth, outChannels = convInfo.outChannels, outHeight = convInfo.outHeight, outWidth = convInfo.outWidth, strideHeight = convInfo.strideHeight, strideWidth = convInfo.strideWidth;
+ $dataFormat = convInfo.dataFormat;
+ var topPad = filterHeight - 1 - convInfo.padInfo.top;
+ var leftPad = filterWidth - 1 - convInfo.padInfo.left;
+ var isChannelsLast = $dataFormat === 'channelsLast';
+ var xBatchStride = dx.strides[0];
+ var xRowStride = isChannelsLast ? dx.strides[1] : dx.strides[2];
+ var xColStride = isChannelsLast ? dx.strides[2] : 1;
+ var xChannelStride = isChannelsLast ? 1 : dx.strides[1];
+ var yBatchStride = dyStrides[0];
+ var yRowStride = isChannelsLast ? dyStrides[1] : dyStrides[2];
+ var yColStride = isChannelsLast ? dyStrides[2] : 1;
+ var yChannelStride = isChannelsLast ? 1 : dyStrides[1];
+ for (var b = 0; b < batchSize; ++b) {
+ for (var d1 = 0; d1 < inChannels; ++d1) {
+ for (var xR = 0; xR < inHeight; ++xR) {
+ var xRCorner = xR - topPad;
+ var xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));
+ var yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);
+ for (var xC = 0; xC < inWidth; ++xC) {
+ var xCCorner = xC - leftPad;
+ var xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));
+ var yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);
+ var dotProd = 0;
+ for (var yR = xRMin; yR < yRMax; ++yR) {
+ var wR = yR * strideHeight - xRCorner;
+ for (var yC = xCMin; yC < yCMax; ++yC) {
+ var wC = yC * strideWidth - xCCorner;
+ var dyOffset = yBatchStride * b + yRowStride * yR + yColStride * yC;
+ var fltOffset = fltS0 * (filterHeight - 1 - wR) +
+ fltS1 * (filterWidth - 1 - wC) + fltS2 * d1;
+ for (var d2 = 0; d2 < outChannels; ++d2) {
+ var pixel = dyValues[dyOffset + yChannelStride * d2];
+ var weight = fltValues[fltOffset + d2];
+ dotProd += pixel * weight;
+ }
+ }
+ }
+ var dxOffset = xBatchStride * b + xRowStride * xR +
+ xColStride * xC + xChannelStride * d1;
+ dxValues[dxOffset] = dotProd;
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var conv2DBackpropInputConfig = {
+ kernelName: tfjsCore.Conv2DBackpropInput,
+ backendName: 'cpu',
+ kernelFunc: conv2DBackpropInput
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function conv3D(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, filter = inputs.filter;
+ var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations;
+ assertNotComplex([x, filter], 'conv3d');
+ var convInfo = tfjsCore.backend_util.computeConv3DInfo(x.shape, filter.shape, strides, dilations, pad);
+ var filterDepth = convInfo.filterDepth, filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth, dilationDepth = convInfo.dilationDepth, dilationHeight = convInfo.dilationHeight, dilationWidth = convInfo.dilationWidth, padInfo = convInfo.padInfo;
+ var padFront = padInfo.front;
+ var padLeft = padInfo.left;
+ var padTop = padInfo.top;
+ var y = new tfjsCore.TensorBuffer(convInfo.outShape, x.dtype);
+ var xVals = backend.data.get(x.dataId).values;
+ var wVals = backend.data.get(filter.dataId).values;
+ var yVals = y.values;
+ var xStrides = tfjsCore.util.computeStrides(x.shape);
+ var filterStrides = tfjsCore.util.computeStrides(filter.shape);
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ var xOffset1 = b * xStrides[0];
+ var yOffset1 = b * y.strides[0];
+ for (var yF = 0; yF < convInfo.outDepth; ++yF) {
+ var yOffset2 = yOffset1 + yF * y.strides[1];
+ var xFCorner = yF * convInfo.strideDepth - padFront;
+ for (var wF = 0; wF < filterDepth; ++wF) {
+ var xF = xFCorner + wF * dilationDepth;
+ if (xF < 0 || xF >= convInfo.inDepth) {
+ continue;
+ }
+ var wOffset1 = wF * filterStrides[0];
+ var xOffset2 = xOffset1 + xF * xStrides[1];
+ for (var yR = 0; yR < convInfo.outHeight; ++yR) {
+ var yOffset3 = yOffset2 + yR * y.strides[2];
+ var xRCorner = yR * convInfo.strideHeight - padTop;
+ for (var wR = 0; wR < filterHeight; ++wR) {
+ var xR = xRCorner + wR * dilationHeight;
+ if (xR < 0 || xR >= convInfo.inHeight) {
+ continue;
+ }
+ var wOffset2 = wOffset1 + wR * filterStrides[1];
+ var xOffset3 = xOffset2 + xR * xStrides[2];
+ for (var yC = 0; yC < convInfo.outWidth; ++yC) {
+ var yOffset4 = yOffset3 + yC * convInfo.outChannels;
+ var xCCorner = yC * convInfo.strideWidth - padLeft;
+ for (var wC = 0; wC < filterWidth; ++wC) {
+ var xC = xCCorner + wC * dilationWidth;
+ if (xC < 0 || xC >= convInfo.inWidth) {
+ continue;
+ }
+ var wOffset3 = wOffset2 + wC * filterStrides[2];
+ var xOffset4 = xOffset3 + xC * convInfo.inChannels;
+ var wOffset4 = wOffset3;
+ for (var d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ var xVal = xVals[xOffset4 + d1];
+ for (var d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ yVals[yOffset4 + d2] += xVal * wVals[wOffset4 + d2];
+ }
+ wOffset4 += convInfo.outChannels;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(y.shape, y.dtype, y.values);
+ }
+ var conv3DConfig = {
+ kernelName: tfjsCore.Conv3D,
+ backendName: 'cpu',
+ kernelFunc: conv3D
+ };
+
+ function conv3DBackpropFilterV2(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, dy = inputs.dy;
+ var strides = attrs.strides, pad = attrs.pad, filterShape = attrs.filterShape;
+ assertNotComplex([x, dy], 'conv3dBackpropFilterV2');
+ var xStrides = tfjsCore.util.computeStrides(x.shape);
+ var dyStrides = tfjsCore.util.computeStrides(dy.shape);
+ var convInfo = tfjsCore.backend_util.computeConv3DInfo(x.shape, filterShape, strides, 1 /* dilations */, pad);
+ var strideDepth = convInfo.strideDepth;
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var filterDepth = convInfo.filterDepth;
+ var filterHeight = convInfo.filterHeight;
+ var filterWidth = convInfo.filterWidth;
+ var dw = new tfjsCore.TensorBuffer(convInfo.filterShape, 'float32');
+ var dwValues = dw.values;
+ var _a = __read(dw.strides, 4), dwS0 = _a[0], dwS1 = _a[1], dwS2 = _a[2], dwS3 = _a[3];
+ var dyValues = backend.data.get(dy.dataId).values;
+ var _b = __read(dyStrides, 4), dyS0 = _b[0], dyS1 = _b[1], dyS2 = _b[2], dyS3 = _b[3];
+ var xValues = backend.data.get(x.dataId).values;
+ var _c = __read(xStrides, 4), xS0 = _c[0], xS1 = _c[1], xS2 = _c[2], xS3 = _c[3];
+ var frontPad = convInfo.padInfo.front;
+ var leftPad = convInfo.padInfo.left;
+ var topPad = convInfo.padInfo.top;
+ for (var wF = 0; wF < filterDepth; ++wF) {
+ var yFMin = Math.max(0, Math.ceil((frontPad - wF) / strideDepth));
+ var yFMax = Math.min(convInfo.outDepth, (convInfo.inDepth + frontPad - wF) / strideDepth);
+ var wOffset1 = wF * dwS0;
+ for (var wR = 0; wR < filterHeight; ++wR) {
+ var yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));
+ var yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);
+ var wOffset2 = wR * dwS1 + wOffset1;
+ for (var wC = 0; wC < filterWidth; ++wC) {
+ var yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));
+ var yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);
+ var wOffset3 = wC * dwS2 + wOffset2;
+ for (var d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ var wOffset4 = d1 * dwS3 + wOffset3;
+ for (var d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ var dotProd = 0;
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ var xOffset1 = b * xS0;
+ var yOffset1 = b * dyS0;
+ for (var yF = yFMin; yF < yFMax; ++yF) {
+ var xF = wF + yF * strideDepth - frontPad;
+ var xOffset2 = xF * xS1 + xOffset1;
+ var yOffset2 = yF * dyS1 + yOffset1;
+ for (var yR = yRMin; yR < yRMax; ++yR) {
+ var xR = wR + yR * strideHeight - topPad;
+ var xOffset3 = xR * xS2 + xOffset2;
+ var yOffset3 = yR * dyS2 + yOffset2;
+ for (var yC = yCMin; yC < yCMax; ++yC) {
+ var xC = wC + yC * strideWidth - leftPad;
+ var xOffset4 = xC * xS3 + xOffset3;
+ var yOffset4 = yC * dyS3 + yOffset3;
+ dotProd += xValues[xOffset4 + d1] * dyValues[yOffset4 + d2];
+ }
+ }
+ }
+ }
+ dwValues[wOffset4 + d2] = dotProd;
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dw.shape, dw.dtype, dw.values);
+ }
+ var conv3DBackpropFilterV2Config = {
+ kernelName: tfjsCore.Conv3DBackpropFilterV2,
+ backendName: 'cpu',
+ kernelFunc: conv3DBackpropFilterV2
+ };
+
+ function conv3DBackpropInputV2(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, filter = inputs.filter;
+ var pad = attrs.pad, strides = attrs.strides, inputShape = attrs.inputShape;
+ assertNotComplex([dy], 'conv3dBackpropInputV2');
+ var dyStrides = tfjsCore.util.computeStrides(dy.shape);
+ var filterStrides = tfjsCore.util.computeStrides(filter.shape);
+ var convInfo = tfjsCore.backend_util.computeConv3DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad);
+ var dx = new tfjsCore.TensorBuffer(convInfo.inShape, 'float32');
+ var dxValues = dx.values;
+ var _a = __read(dx.strides, 4), dxS0 = _a[0], dxS1 = _a[1], dxS2 = _a[2], dxS3 = _a[3];
+ var dyValues = backend.data.get(dy.dataId).values;
+ var _b = __read(dyStrides, 4), dyS0 = _b[0], dyS1 = _b[1], dyS2 = _b[2], dyS3 = _b[3];
+ var fltValues = backend.data.get(filter.dataId).values;
+ var _c = __read(filterStrides, 4), fltS0 = _c[0], fltS1 = _c[1], fltS2 = _c[2], fltS3 = _c[3];
+ var batchSize = convInfo.batchSize, filterDepth = convInfo.filterDepth, filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth, inChannels = convInfo.inChannels, inDepth = convInfo.inDepth, inHeight = convInfo.inHeight, inWidth = convInfo.inWidth, outChannels = convInfo.outChannels, outDepth = convInfo.outDepth, outHeight = convInfo.outHeight, outWidth = convInfo.outWidth, strideDepth = convInfo.strideDepth, strideHeight = convInfo.strideHeight, strideWidth = convInfo.strideWidth;
+ var frontPad = filterDepth - 1 - convInfo.padInfo.front;
+ var topPad = filterHeight - 1 - convInfo.padInfo.top;
+ var leftPad = filterWidth - 1 - convInfo.padInfo.left;
+ for (var b = 0; b < batchSize; ++b) {
+ for (var d1 = 0; d1 < inChannels; ++d1) {
+ // Frames of depth
+ for (var xF = 0; xF < inDepth; ++xF) {
+ var xFCorner = xF - frontPad;
+ var xFMin = Math.max(0, Math.ceil(xFCorner / strideDepth));
+ var yFMax = Math.min(outDepth, (filterDepth + xFCorner) / strideDepth);
+ // Rows as per standard 2d matrix notation
+ for (var xR = 0; xR < inHeight; ++xR) {
+ var xRCorner = xR - topPad;
+ var xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));
+ var yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);
+ // Columns as per standard 2d matrix notation
+ for (var xC = 0; xC < inWidth; ++xC) {
+ var xCCorner = xC - leftPad;
+ var xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));
+ var yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);
+ var dotProd = 0;
+ for (var yF = xFMin; yF < yFMax; ++yF) {
+ var wF = yF * strideDepth - xFCorner;
+ for (var yR = xRMin; yR < yRMax; ++yR) {
+ var wR = yR * strideHeight - xRCorner;
+ for (var yC = xCMin; yC < yCMax; ++yC) {
+ var wC = yC * strideWidth - xCCorner;
+ var dyOffset = dyS0 * b + dyS1 * yF + dyS2 * yR + dyS3 * yC;
+ var fltOffset = fltS0 * (filterDepth - 1 - wF) +
+ fltS1 * (filterHeight - 1 - wR) +
+ fltS2 * (filterWidth - 1 - wC) + fltS3 * d1;
+ for (var d2 = 0; d2 < outChannels; ++d2) {
+ var pixel = dyValues[dyOffset + d2];
+ var weight = fltValues[fltOffset + d2];
+ dotProd += pixel * weight;
+ }
+ }
+ }
+ }
+ dxValues[dxS0 * b + dxS1 * xF + dxS2 * xR + dxS3 * xC + d1] =
+ dotProd;
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var conv3DBackpropInputV2Config = {
+ kernelName: tfjsCore.Conv3DBackpropInputV2,
+ backendName: 'cpu',
+ kernelFunc: conv3DBackpropInputV2
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var cos = unaryKernelFunc(tfjsCore.Cos, function (xi) { return Math.cos(xi); });
+ var cosConfig = {
+ kernelName: tfjsCore.Cos,
+ backendName: 'cpu',
+ kernelFunc: cos,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var cosh = unaryKernelFunc(tfjsCore.Cosh, function (xi) { return Math.cosh(xi); });
+ var coshConfig = {
+ kernelName: tfjsCore.Cosh,
+ backendName: 'cpu',
+ kernelFunc: cosh,
+ };
+
+ function cropAndResize(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var image = inputs.image, boxes = inputs.boxes, boxInd = inputs.boxInd;
+ var cropSize = attrs.cropSize, method = attrs.method, extrapolationValue = attrs.extrapolationValue;
+ var _a = __read(image.shape, 4), batch = _a[0], imageHeight = _a[1], imageWidth = _a[2], numChannels = _a[3];
+ var numBoxes = boxes.shape[0];
+ var _b = __read(cropSize, 2), cropHeight = _b[0], cropWidth = _b[1];
+ var output = tfjsCore.buffer([numBoxes, cropHeight, cropWidth, numChannels], 'float32');
+ var boxVals = backend.data.get(boxes.dataId).values;
+ var boxIndVals = backend.data.get(boxInd.dataId).values;
+ var imageVals = backend.data.get(image.dataId).values;
+ var inStride = tfjsCore.util.computeStrides(image.shape); // to calculate flat indexes into image
+ var outStride = tfjsCore.util.computeStrides(output.shape); // to calculate flat indexes into output
+ // Reference implementation
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op.cc
+ for (var b = 0; b < numBoxes; b++) {
+ var startInd = b * 4;
+ var y1 = boxVals[startInd];
+ var x1 = boxVals[startInd + 1];
+ var y2 = boxVals[startInd + 2];
+ var x2 = boxVals[startInd + 3];
+ var bInd = boxIndVals[b];
+ if (bInd >= batch) {
+ continue;
+ }
+ var heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / (cropHeight - 1) : 0;
+ var widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / (cropWidth - 1) : 0;
+ for (var y = 0; y < cropHeight; y++) {
+ var yInd = (cropHeight > 1) ?
+ y1 * (imageHeight - 1) + y * (heightScale) :
+ 0.5 * (y1 + y2) * (imageHeight - 1);
+ if (yInd < 0 || yInd > imageHeight - 1) {
+ for (var x = 0; x < cropWidth; x++) {
+ for (var c = 0; c < numChannels; c++) {
+ var ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = extrapolationValue;
+ }
+ }
+ continue;
+ }
+ if (method === 'bilinear') {
+ var topInd = Math.floor(yInd);
+ var bottomInd = Math.ceil(yInd);
+ var yLerp = yInd - topInd;
+ for (var x = 0; x < cropWidth; x++) {
+ var xInd = (cropWidth > 1) ?
+ x1 * (imageWidth - 1) + x * widthScale :
+ 0.5 * (x1 + x2) * (imageWidth - 1);
+ if (xInd < 0 || xInd > imageWidth - 1) {
+ for (var c = 0; c < numChannels; c++) {
+ var ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = extrapolationValue;
+ }
+ continue;
+ }
+ var leftInd = Math.floor(xInd);
+ var rightInd = Math.ceil(xInd);
+ var xLerp = xInd - leftInd;
+ for (var c = 0; c < numChannels; c++) {
+ var ind = c + leftInd * inStride[2] + topInd * inStride[1] +
+ bInd * inStride[0];
+ var topLeft = imageVals[ind];
+ ind = c + rightInd * inStride[2] + topInd * inStride[1] +
+ bInd * inStride[0];
+ var topRight = imageVals[ind];
+ ind = c + leftInd * inStride[2] + bottomInd * inStride[1] +
+ bInd * inStride[0];
+ var bottomLeft = imageVals[ind];
+ ind = c + rightInd * inStride[2] + bottomInd * inStride[1] +
+ bInd * inStride[0];
+ var bottomRight = imageVals[ind];
+ var top = topLeft + (topRight - topLeft) * xLerp;
+ var bottom = bottomLeft + (bottomRight - bottomLeft) * xLerp;
+ ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = top + ((bottom - top) * yLerp);
+ }
+ }
+ }
+ else { // method == "nearest"
+ for (var x = 0; x < cropWidth; ++x) {
+ var xInd = (cropWidth > 1) ?
+ x1 * (imageWidth - 1) + x * widthScale :
+ 0.5 * (x1 + x2) * (imageWidth - 1);
+ if (xInd < 0 || xInd > imageWidth - 1) {
+ for (var c = 0; c < numChannels; c++) {
+ var ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = extrapolationValue;
+ }
+ continue;
+ }
+ var closestX = Math.round(xInd);
+ var closestY = Math.round(yInd);
+ for (var c = 0; c < numChannels; c++) {
+ var inInd = c + closestX * inStride[2] + closestY * inStride[1] +
+ bInd * inStride[0];
+ var outInd = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[outInd] = imageVals[inInd];
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(output.shape, output.dtype, output.values);
+ }
+ var cropAndResizeConfig = {
+ kernelName: tfjsCore.CropAndResize,
+ backendName: 'cpu',
+ kernelFunc: cropAndResize
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function cumsum(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, exclusive = attrs.exclusive, reverse = attrs.reverse;
+ assertNotComplex(x, 'cumsum');
+ var permutation = tfjsCore.backend_util.getAxesPermutation([axis], x.shape.length);
+ var $x = x;
+ if (permutation != null) {
+ $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutation } });
+ }
+ var permutedAxis = tfjsCore.backend_util.getInnerMostAxes(1, x.shape.length)[0];
+ if (permutedAxis !== $x.shape.length - 1) {
+ throw new Error("backend.cumsum in CPU expects an inner-most " +
+ ("axis=" + ($x.shape.length - 1) + " but got axis=" + permutedAxis));
+ }
+ var resultDtype = tfjsCore.upcastType($x.dtype, 'int32');
+ var vals = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape($x.shape), resultDtype);
+ var aVals = backend.data.get($x.dataId).values;
+ var finalDim = $x.shape[$x.shape.length - 1];
+ var indexAdjuster = reverse ?
+ function (i, j) { return i + finalDim - j - 1; } :
+ function (i, j) { return i + j; };
+ for (var i = 0; i < aVals.length; i += finalDim) {
+ for (var j = 0; j < finalDim; j++) {
+ var idx = indexAdjuster(i, j);
+ if (j === 0) {
+ vals[idx] = exclusive ? 0 : aVals[idx];
+ }
+ else {
+ var prevIdx = indexAdjuster(i, j - 1);
+ vals[idx] = exclusive ? aVals[prevIdx] + vals[prevIdx] :
+ aVals[idx] + vals[prevIdx];
+ }
+ }
+ }
+ var result = backend.makeTensorInfo($x.shape, resultDtype, vals);
+ if (permutation != null) {
+ var reversePermutation = tfjsCore.backend_util.getUndoAxesPermutation(permutation);
+ var reverseTransposedResult = transpose({ inputs: { x: result }, backend: backend, attrs: { perm: reversePermutation } });
+ backend.disposeIntermediateTensorInfo(result);
+ backend.disposeIntermediateTensorInfo($x);
+ return reverseTransposedResult;
+ }
+ return result;
+ }
+ var cumsumConfig = {
+ kernelName: tfjsCore.Cumsum,
+ backendName: 'cpu',
+ kernelFunc: cumsum
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function denseBincount(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, weights = inputs.weights;
+ var size = attrs.size, binaryOutput = attrs.binaryOutput;
+ if (x.shape.length === 1) {
+ var xVals = backend.data.get(x.dataId).values;
+ var weightsVals = backend.data.get(weights.dataId).values;
+ var outVals = bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size);
+ return backend.makeTensorInfo([size], weights.dtype, outVals);
+ }
+ else if (x.shape.length === 2) {
+ var xBuf = backend.bufferSync(x);
+ var weightsBuf = backend.bufferSync(weights);
+ var outBuf = bincountReduceImpl(xBuf, weightsBuf, size, binaryOutput);
+ return backend.makeTensorInfo(outBuf.shape, weights.dtype, outBuf.values);
+ }
+ throw new Error("Error in denseBincount: input must be at most rank 2, but got rank" +
+ (x.shape.length + "."));
+ }
+ var denseBincountConfig = {
+ kernelName: tfjsCore.DenseBincount,
+ backendName: 'cpu',
+ kernelFunc: denseBincount
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthToSpace(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var blockSize = attrs.blockSize, dataFormat = attrs.dataFormat;
+ tfjsCore.util.assert(dataFormat === 'NHWC', function () { return "Only NHWC dataFormat supported on CPU for depthToSpace. Got " + dataFormat; });
+ var batchSize = x.shape[0];
+ var inputHeight = x.shape[1];
+ var inputWidth = x.shape[2];
+ var inputDepth = x.shape[3];
+ var outputHeight = inputHeight * blockSize;
+ var outputWidth = inputWidth * blockSize;
+ var outputDepth = inputDepth / (blockSize * blockSize);
+ var xValues = backend.data.get(x.dataId).values;
+ var result = new Float32Array(batchSize * outputHeight * outputWidth * outputDepth);
+ var outputIdx = 0;
+ for (var b = 0; b < batchSize; ++b) {
+ for (var h = 0; h < outputHeight; ++h) {
+ var inH = Math.floor(h / blockSize);
+ var offsetH = (h % blockSize);
+ for (var w = 0; w < outputWidth; ++w) {
+ var inW = Math.floor(w / blockSize);
+ var offsetW = (w % blockSize);
+ var offsetD = (offsetH * blockSize + offsetW) * outputDepth;
+ for (var d = 0; d < outputDepth; ++d) {
+ var inD = d + offsetD;
+ var inputIdx = inD + inputDepth * (inW + inputWidth * (inH + inputHeight * b));
+ result[outputIdx++] = xValues[inputIdx];
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo([batchSize, outputHeight, outputWidth, outputDepth], x.dtype, result);
+ }
+ var depthToSpaceConfig = {
+ kernelName: tfjsCore.DepthToSpace,
+ backendName: 'cpu',
+ kernelFunc: depthToSpace
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthwiseConv2dNative(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, filter = inputs.filter;
+ var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode;
+ assertNotComplex([x, filter], 'depthwiseConv2DNative');
+ var xStrides = tfjsCore.util.computeStrides(x.shape);
+ var filterStrides = tfjsCore.util.computeStrides(filter.shape);
+ var $dilations = dilations;
+ if ($dilations == null) {
+ $dilations = [1, 1];
+ }
+ tfjsCore.util.assert(tfjsCore.backend_util.eitherStridesOrDilationsAreOne(strides, $dilations), function () { return 'Error in depthwiseConv2d: Either strides or dilations must be ' +
+ ("1. Got strides " + strides + " and dilations '" + $dilations + "'"); });
+ var convInfo = tfjsCore.backend_util.computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad, dimRoundingMode, true /* depthwise */);
+ var filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth, dilationHeight = convInfo.dilationHeight, dilationWidth = convInfo.dilationWidth, padInfo = convInfo.padInfo;
+ var padLeft = padInfo.left;
+ var padTop = padInfo.top;
+ var chMul = convInfo.outChannels / convInfo.inChannels;
+ var y = new tfjsCore.TensorBuffer(convInfo.outShape, x.dtype);
+ var xVals = backend.data.get(x.dataId).values;
+ var wVals = backend.data.get(filter.dataId).values;
+ var yVals = y.values;
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ var xOffset1 = b * xStrides[0];
+ var yOffset1 = b * y.strides[0];
+ for (var yR = 0; yR < convInfo.outHeight; ++yR) {
+ var yOffset2 = yOffset1 + yR * y.strides[1];
+ var xRCorner = yR * convInfo.strideHeight - padTop;
+ for (var wR = 0; wR < filterHeight; ++wR) {
+ var xR = xRCorner + wR * dilationHeight;
+ if (xR < 0 || xR >= convInfo.inHeight) {
+ continue;
+ }
+ var wOffset1 = wR * filterStrides[0];
+ var xOffset2 = xOffset1 + xR * xStrides[1];
+ for (var yC = 0; yC < convInfo.outWidth; ++yC) {
+ var yOffset3 = yOffset2 + yC * y.strides[2];
+ var xCCorner = yC * convInfo.strideWidth - padLeft;
+ for (var wC = 0; wC < filterWidth; ++wC) {
+ var xC = xCCorner + wC * dilationWidth;
+ if (xC < 0 || xC >= convInfo.inWidth) {
+ continue;
+ }
+ var wOffset2 = wOffset1 + wC * filterStrides[1];
+ var xOffset3 = xOffset2 + xC * convInfo.inChannels;
+ var yOffset4 = yOffset3;
+ var wOffset3 = wOffset2;
+ for (var d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ var xVal = xVals[xOffset3 + d1];
+ for (var q = 0; q < chMul; ++q) {
+ yVals[yOffset4 + q] += xVal * wVals[wOffset3 + q];
+ }
+ yOffset4 += chMul;
+ wOffset3 += chMul;
+ }
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(y.shape, y.dtype, y.values);
+ }
+ var depthwiseConv2dNativeConfig = {
+ kernelName: tfjsCore.DepthwiseConv2dNative,
+ backendName: 'cpu',
+ kernelFunc: depthwiseConv2dNative
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthwiseConv2dNativeBackpropFilter(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, dy = inputs.dy;
+ var strides = attrs.strides, dilations = attrs.dilations, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode, filterShape = attrs.filterShape;
+ assertNotComplex([x, dy], 'depthwiseConv2dNativeBackpropFilter');
+ var convInfo = tfjsCore.backend_util.computeConv2DInfo(x.shape, filterShape, strides, dilations, pad, dimRoundingMode, true /* depthwise */);
+ var strideHeight = convInfo.strideHeight, strideWidth = convInfo.strideWidth, filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth;
+ var dW = new tfjsCore.TensorBuffer(convInfo.filterShape, 'float32');
+ var leftPad = convInfo.padInfo.left;
+ var topPad = convInfo.padInfo.top;
+ var chMul = convInfo.outChannels / convInfo.inChannels;
+ var xVals = backend.data.get(x.dataId).values;
+ var xBuf = new tfjsCore.TensorBuffer(x.shape, x.dtype, xVals);
+ var dyVals = backend.data.get(dy.dataId).values;
+ var dyBuf = new tfjsCore.TensorBuffer(dy.shape, dy.dtype, dyVals);
+ for (var wR = 0; wR < filterHeight; ++wR) {
+ var yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));
+ var yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);
+ for (var wC = 0; wC < filterWidth; ++wC) {
+ var yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));
+ var yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);
+ for (var d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ var d1 = Math.trunc(d2 / chMul);
+ var dm = d2 % chMul;
+ var dotProd = 0;
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ for (var yR = yRMin; yR < yRMax; ++yR) {
+ var xR = wR + yR * strideHeight - topPad;
+ for (var yC = yCMin; yC < yCMax; ++yC) {
+ var xC = wC + yC * strideWidth - leftPad;
+ dotProd += xBuf.get(b, xR, xC, d1) *
+ dyBuf.get(b, yR, yC, d2);
+ }
+ }
+ }
+ dW.set(dotProd, wR, wC, d1, dm);
+ }
+ }
+ }
+ return backend.makeTensorInfo(dW.shape, dW.dtype, dW.values);
+ }
+ var depthwiseConv2dNativeBackpropFilterConfig = {
+ kernelName: tfjsCore.DepthwiseConv2dNativeBackpropFilter,
+ backendName: 'cpu',
+ kernelFunc: depthwiseConv2dNativeBackpropFilter
+ };
+
+ function depthwiseConv2dNativeBackpropInput(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, filter = inputs.filter;
+ var strides = attrs.strides, dilations = attrs.dilations, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode, inputShape = attrs.inputShape;
+ assertNotComplex([dy, filter], 'depthwiseConv2DNativeBackpropInput');
+ var dyStrides = tfjsCore.util.computeStrides(dy.shape);
+ var filterStrides = tfjsCore.util.computeStrides(filter.shape);
+ var convInfo = tfjsCore.backend_util.computeConv2DInfo(inputShape, filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */);
+ var dx = new tfjsCore.TensorBuffer(convInfo.inShape, 'float32');
+ var dxValues = dx.values;
+ var _a = __read(dx.strides, 3), dxS0 = _a[0], dxS1 = _a[1], dxS2 = _a[2];
+ var dyValues = backend.data.get(dy.dataId).values;
+ var _b = __read(dyStrides, 3), dyS0 = _b[0], dyS1 = _b[1], dyS2 = _b[2];
+ var fltValues = backend.data.get(filter.dataId).values;
+ var _c = __read(filterStrides, 3), fltS0 = _c[0], fltS1 = _c[1], fltS2 = _c[2];
+ var batchSize = convInfo.batchSize, filterHeight = convInfo.filterHeight, filterWidth = convInfo.filterWidth, inChannels = convInfo.inChannels, inHeight = convInfo.inHeight, inWidth = convInfo.inWidth, outChannels = convInfo.outChannels, outHeight = convInfo.outHeight, outWidth = convInfo.outWidth, strideHeight = convInfo.strideHeight, strideWidth = convInfo.strideWidth;
+ var topPad = filterHeight - 1 - convInfo.padInfo.top;
+ var leftPad = filterWidth - 1 - convInfo.padInfo.left;
+ var chMul = outChannels / inChannels;
+ for (var b = 0; b < batchSize; ++b) {
+ for (var d1 = 0; d1 < inChannels; ++d1) {
+ for (var xR = 0; xR < inHeight; ++xR) {
+ var xRCorner = xR - topPad;
+ var xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));
+ var yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);
+ for (var xC = 0; xC < inWidth; ++xC) {
+ var xCCorner = xC - leftPad;
+ var xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));
+ var yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);
+ var dotProd = 0;
+ for (var yR = xRMin; yR < yRMax; ++yR) {
+ var wR = yR * strideHeight - xRCorner;
+ for (var yC = xCMin; yC < yCMax; ++yC) {
+ var wC = yC * strideWidth - xCCorner;
+ var dyOffset = dyS0 * b + dyS1 * yR + dyS2 * yC;
+ var fltOffset = fltS0 * (filterHeight - 1 - wR) +
+ fltS1 * (filterWidth - 1 - wC) + fltS2 * d1;
+ for (var dm = 0; dm < chMul; ++dm) {
+ var d2 = d1 * chMul + dm;
+ var pixel = dyValues[dyOffset + d2];
+ var weight = fltValues[fltOffset + dm];
+ dotProd += pixel * weight;
+ }
+ }
+ }
+ dxValues[dxS0 * b + dxS1 * xR + dxS2 * xC + d1] = dotProd;
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var depthwiseConv2dNativeBackpropInputConfig = {
+ kernelName: tfjsCore.DepthwiseConv2dNativeBackpropInput,
+ backendName: 'cpu',
+ kernelFunc: depthwiseConv2dNativeBackpropInput
+ };
+
+ function diag(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var x = inputs.x;
+ var xSize = tfjsCore.util.sizeFromShape(x.shape);
+ var xVals = backend.data.get(x.dataId).values;
+ var outBuf = tfjsCore.buffer([xSize, xSize], x.dtype);
+ var vals = outBuf.values;
+ for (var i = 0; i < xVals.length; i++) {
+ vals[i * xSize + i] = xVals[i];
+ }
+ var outShape = __spread(x.shape, x.shape);
+ return backend.makeTensorInfo(outShape, outBuf.dtype, outBuf.values);
+ }
+ var diagConfig = {
+ kernelName: tfjsCore.Diag,
+ backendName: 'cpu',
+ kernelFunc: diag
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var dilation2dConfig = {
+ kernelName: tfjsCore.Dilation2D,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs, backend = _a.backend, attrs = _a.attrs;
+ var x = inputs.x, filter = inputs.filter;
+ var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations;
+ var cpuBackend = backend;
+ var xVals = cpuBackend.data.get(x.dataId).values;
+ var xRank = x.shape.length;
+ var filterVals = cpuBackend.data.get(filter.dataId).values;
+ var filterRank = filter.shape.length;
+ var _b = tfjsCore.backend_util.computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations), batchSize = _b.batchSize, inHeight = _b.inHeight, inWidth = _b.inWidth, inChannels = _b.inChannels, outHeight = _b.outHeight, outWidth = _b.outWidth, padInfo = _b.padInfo, strideHeight = _b.strideHeight, strideWidth = _b.strideWidth, filterHeight = _b.filterHeight, filterWidth = _b.filterWidth, dilationHeight = _b.dilationHeight, dilationWidth = _b.dilationWidth, outShape = _b.outShape;
+ var outSize = tfjsCore.util.sizeFromShape(outShape);
+ var outRank = outShape.length;
+ var outputVals = tfjsCore.util.getArrayFromDType(x.dtype, outSize);
+ // Upsampling the input by fill in `dilation size - 1` values between each
+ // input value.
+ // This implementation follows the TF c++ implementation:
+ // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc
+ for (var b = 0; b < batchSize; ++b) {
+ for (var hOut = 0; hOut < outHeight; ++hOut) {
+ var hBeg = hOut * strideHeight - padInfo.top;
+ for (var wOut = 0; wOut < outWidth; ++wOut) {
+ var wBeg = wOut * strideWidth - padInfo.left;
+ for (var d = 0; d < inChannels; ++d) {
+ var curVal = Number.MIN_SAFE_INTEGER;
+ for (var h = 0; h < filterHeight; ++h) {
+ var hIn = hBeg + h * dilationHeight;
+ if (hIn >= 0 && hIn < inHeight) {
+ for (var w = 0; w < filterWidth; ++w) {
+ var wIn = wBeg + w * dilationWidth;
+ if (wIn >= 0 && wIn < inWidth) {
+ var xIndex = tfjsCore.util.locToIndex([b, hIn, wIn, d], xRank, tfjsCore.util.computeStrides(x.shape));
+ var filterIndex = tfjsCore.util.locToIndex([h, w, d], filterRank, tfjsCore.util.computeStrides(filter.shape));
+ var val = xVals[xIndex] + filterVals[filterIndex];
+ if (val > curVal) {
+ curVal = val;
+ }
+ }
+ }
+ }
+ }
+ var outputIndex = tfjsCore.util.locToIndex([b, hOut, wOut, d], outRank, tfjsCore.util.computeStrides(outShape));
+ outputVals[outputIndex] = curVal;
+ }
+ }
+ }
+ }
+ var dataId = cpuBackend.write(tfjsCore.util.toTypedArray(outputVals, x.dtype), outShape, x.dtype);
+ return { dataId: dataId, shape: outShape, dtype: x.dtype };
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var dilation2dBackpropFilterConfig = {
+ kernelName: tfjsCore.Dilation2DBackpropFilter,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs, backend = _a.backend, attrs = _a.attrs;
+ var x = inputs.x, filter = inputs.filter, dy = inputs.dy;
+ var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations;
+ var cpuBackend = backend;
+ var $x = tfjsCore.util.toNestedArray(x.shape, cpuBackend.data.get(x.dataId).values);
+ var $filter = tfjsCore.util.toNestedArray(filter.shape, cpuBackend.data.get(filter.dataId).values);
+ var _b = tfjsCore.backend_util.computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations), batchSize = _b.batchSize, inHeight = _b.inHeight, inWidth = _b.inWidth, inChannels = _b.inChannels, outHeight = _b.outHeight, outWidth = _b.outWidth, padInfo = _b.padInfo, strideHeight = _b.strideHeight, strideWidth = _b.strideWidth, filterHeight = _b.filterHeight, filterWidth = _b.filterWidth, dilationHeight = _b.dilationHeight, dilationWidth = _b.dilationWidth, outShape = _b.outShape;
+ tfjsCore.util.assert(dy.rank === outShape.length, function () { return "Error in " + tfjsCore.Dilation2DBackpropFilter + ", dy " +
+ ("must have the same rank as output " + outShape.length + ", but got ") +
+ ("" + dy.rank); });
+ var $dy = tfjsCore.util.toNestedArray(outShape, cpuBackend.data.get(dy.dataId).values);
+ // The computed filter gradients has the same dimensions as the filter:
+ // [filterHeight, filterWidth, depth]
+ var gradients = tfjsCore.util.makeZerosNestedTypedArray(filter.shape, filter.dtype);
+ // In the case of multiple argmax branches, we only back-propagate along the
+ // last branch, i.e., the one with largest value of `h * filter_cols + w`,
+ // similarly to the max-pooling backward routines.
+ // This implementation follows the TF c++ implementation:
+ // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc
+ for (var b = 0; b < batchSize; ++b) {
+ for (var hOut = 0; hOut < outHeight; ++hOut) {
+ var hBeg = hOut * strideHeight - padInfo.top;
+ for (var wOut = 0; wOut < outWidth; ++wOut) {
+ var wBeg = wOut * strideWidth - padInfo.left;
+ for (var d = 0; d < inChannels; ++d) {
+ var curVal = Number.MIN_SAFE_INTEGER;
+ var hMax = 0;
+ var wMax = 0;
+ for (var h = 0; h < filterHeight; ++h) {
+ var hIn = hBeg + h * dilationHeight;
+ if (hIn >= 0 && hIn < inHeight) {
+ for (var w = 0; w < filterWidth; ++w) {
+ var wIn = wBeg + w * dilationWidth;
+ if (wIn >= 0 && wIn < inWidth) {
+ var val = $x[b][hIn][wIn][d] + $filter[h][w][d];
+ if (val > curVal) {
+ curVal = val;
+ hMax = h;
+ wMax = w;
+ }
+ }
+ }
+ }
+ }
+ gradients[hMax][wMax][d] += $dy[b][hOut][wOut][d];
+ }
+ }
+ }
+ }
+ var dataId = cpuBackend.write(tfjsCore.util.toTypedArray(gradients, x.dtype), filter.shape, filter.dtype);
+ return { dataId: dataId, shape: filter.shape, dtype: filter.dtype };
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var dilation2dBackpropInputConfig = {
+ kernelName: tfjsCore.Dilation2DBackpropInput,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs, backend = _a.backend, attrs = _a.attrs;
+ var x = inputs.x, filter = inputs.filter, dy = inputs.dy;
+ var strides = attrs.strides, pad = attrs.pad, dilations = attrs.dilations;
+ var cpuBackend = backend;
+ var $x = tfjsCore.util.toNestedArray(x.shape, cpuBackend.data.get(x.dataId).values);
+ var $filter = tfjsCore.util.toNestedArray(filter.shape, cpuBackend.data.get(filter.dataId).values);
+ var _b = tfjsCore.backend_util.computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations), batchSize = _b.batchSize, inHeight = _b.inHeight, inWidth = _b.inWidth, inChannels = _b.inChannels, outHeight = _b.outHeight, outWidth = _b.outWidth, padInfo = _b.padInfo, strideHeight = _b.strideHeight, strideWidth = _b.strideWidth, filterHeight = _b.filterHeight, filterWidth = _b.filterWidth, dilationHeight = _b.dilationHeight, dilationWidth = _b.dilationWidth, outShape = _b.outShape;
+ tfjsCore.util.assert(dy.rank === outShape.length, function () { return "Error in " + tfjsCore.Dilation2DBackpropInput + ", dy " +
+ ("must have the same rank as output " + outShape.length + ", but got ") +
+ ("" + dy.rank); });
+ var $dy = tfjsCore.util.toNestedArray(outShape, cpuBackend.data.get(dy.dataId).values);
+ // The computed gradients has the same dimensions as the input:
+ // [batch, inputHeight, inputCols, inChannel]
+ var gradients = tfjsCore.util.makeZerosNestedTypedArray(x.shape, x.dtype);
+ // In the case of multiple argmax branches, we only back-propagate along the
+ // last branch, i.e., the one with largest value of `h * filter_cols + w`,
+ // similarly to the max-pooling backward routines.
+ // This implementation follows the TF c++ implementation:
+ // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc
+ for (var b = 0; b < batchSize; ++b) {
+ for (var hOut = 0; hOut < outHeight; ++hOut) {
+ var hBeg = hOut * strideHeight - padInfo.top;
+ for (var wOut = 0; wOut < outWidth; ++wOut) {
+ var wBeg = wOut * strideWidth - padInfo.left;
+ for (var d = 0; d < inChannels; ++d) {
+ var curVal = Number.MIN_SAFE_INTEGER;
+ var hInMax = (hBeg < 0) ? 0 : hBeg;
+ var wInMax = (wBeg < 0) ? 0 : wBeg;
+ for (var h = 0; h < filterHeight; ++h) {
+ var hIn = hBeg + h * dilationHeight;
+ if (hIn >= 0 && hIn < inHeight) {
+ for (var w = 0; w < filterWidth; ++w) {
+ var wIn = wBeg + w * dilationWidth;
+ if (wIn >= 0 && wIn < inWidth) {
+ var val = $x[b][hIn][wIn][d] + $filter[h][w][d];
+ if (val > curVal) {
+ curVal = val;
+ hInMax = hIn;
+ wInMax = wIn;
+ }
+ }
+ }
+ }
+ }
+ gradients[b][hInMax][wInMax][d] += $dy[b][hOut][wOut][d];
+ }
+ }
+ }
+ }
+ var dataId = cpuBackend.write(tfjsCore.util.toTypedArray(gradients, x.dtype), x.shape, x.dtype);
+ return { dataId: dataId, shape: x.shape, dtype: x.dtype };
+ }
+ };
+
+ function sum(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, keepDims = attrs.keepDims;
+ assertNotComplex(x, 'sum');
+ var $x;
+ if (x.dtype === 'bool') {
+ $x = cast({ inputs: { x: x }, backend: backend, attrs: { dtype: 'int32' } });
+ }
+ else {
+ $x = identity({ inputs: { x: x }, backend: backend });
+ }
+ var xRank = $x.shape.length;
+ var axes = tfjsCore.util.parseAxisParam(axis, $x.shape);
+ var permutation = tfjsCore.backend_util.getAxesPermutation(axes, xRank);
+ var reductionAxes = axes;
+ var permutedX = $x;
+ if (permutation != null) {
+ permutedX =
+ transpose({ inputs: { x: $x }, backend: backend, attrs: { perm: permutation } });
+ reductionAxes = tfjsCore.backend_util.getInnerMostAxes(reductionAxes.length, xRank);
+ }
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('sum', reductionAxes, permutedX.shape.length);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes(permutedX.shape, reductionAxes), 2), outShape = _a[0], reduceShape = _a[1];
+ var resultDtype = tfjsCore.backend_util.upcastType(permutedX.dtype, 'int32');
+ var result = zeros(backend, outShape, resultDtype);
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var vals = backend.data.get(result.dataId).values;
+ var aVals = backend.data.get(permutedX.dataId).values;
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var sum_1 = 0;
+ for (var j = 0; j < reduceSize; ++j) {
+ sum_1 += aVals[offset + j];
+ }
+ vals[i] = sum_1;
+ }
+ if (keepDims) {
+ var newShape = tfjsCore.backend_util.expandShapeToKeepDim(result.shape, axes);
+ var oldResult = result;
+ result = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: newShape } });
+ backend.disposeIntermediateTensorInfo(oldResult);
+ }
+ backend.disposeIntermediateTensorInfo($x);
+ if (permutation != null) {
+ backend.disposeIntermediateTensorInfo(permutedX);
+ }
+ return result;
+ }
+ var sumConfig = {
+ kernelName: tfjsCore.Sum,
+ backendName: 'cpu',
+ kernelFunc: sum
+ };
+
+ function einsum(args) {
+ var e_1, _a, e_2, _b;
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var equation = attrs.equation;
+ var tensors = inputs;
+ var _c = tfjsCore.backend_util.decodeEinsumEquation(equation, tensors.length), allDims = _c.allDims, summedDims = _c.summedDims, idDims = _c.idDims;
+ tfjsCore.backend_util.checkEinsumDimSizes(allDims.length, idDims, tensors);
+ var _d = tfjsCore.backend_util.getEinsumComputePath(summedDims, idDims), path = _d.path, steps = _d.steps;
+ var nSteps = steps.length;
+ var out = null;
+ var numDimsRemaining = allDims.length;
+ var tensorsToDispose = [];
+ for (var i = 0; i < nSteps; ++i) {
+ try {
+ for (var _e = (e_1 = void 0, __values(steps[i])), _f = _e.next(); !_f.done; _f = _e.next()) {
+ var idTerm = _f.value;
+ var _g = tfjsCore.backend_util.getEinsumPermutation(numDimsRemaining, idDims[idTerm]), perm = _g.permutationIndices, dimsToExpand = _g.expandDims;
+ var x = void 0;
+ if (tfjsCore.backend_util.isIdentityPermutation(perm)) {
+ x = tensors[idTerm];
+ }
+ else {
+ x = transpose({ inputs: { x: tensors[idTerm] }, backend: backend, attrs: { perm: perm } });
+ tensorsToDispose.push(x);
+ }
+ var targetShape = x.shape.slice();
+ for (var k = 0; k < dimsToExpand.length; ++k) {
+ targetShape.splice(dimsToExpand[k], 0, 1);
+ }
+ if (!tfjsCore.util.arraysEqual(x.shape, targetShape)) {
+ x = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: targetShape } });
+ tensorsToDispose.push(x);
+ }
+ if (out === null) {
+ out = x;
+ }
+ else {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ out = multiply({ inputs: { a: x, b: out }, backend: backend });
+ tensorsToDispose.push(out);
+ }
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (_f && !_f.done && (_a = _e.return)) _a.call(_e);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ if (i < nSteps - 1) {
+ if (path[i] >= 0) {
+ out = sum({
+ inputs: { x: out },
+ backend: backend,
+ attrs: {
+ axis: path[i] - (allDims.length - numDimsRemaining),
+ keepDims: false
+ }
+ });
+ tensorsToDispose.push(out);
+ }
+ numDimsRemaining--;
+ }
+ }
+ try {
+ // Clean up intermediate tensors.
+ for (var tensorsToDispose_1 = __values(tensorsToDispose), tensorsToDispose_1_1 = tensorsToDispose_1.next(); !tensorsToDispose_1_1.done; tensorsToDispose_1_1 = tensorsToDispose_1.next()) {
+ var tensorInfo = tensorsToDispose_1_1.value;
+ if (tensorInfo === out) {
+ continue;
+ }
+ backend.disposeIntermediateTensorInfo(tensorInfo);
+ }
+ }
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
+ finally {
+ try {
+ if (tensorsToDispose_1_1 && !tensorsToDispose_1_1.done && (_b = tensorsToDispose_1.return)) _b.call(tensorsToDispose_1);
+ }
+ finally { if (e_2) throw e_2.error; }
+ }
+ return out;
+ }
+ var einsumConfig = {
+ kernelName: tfjsCore.Einsum,
+ backendName: 'cpu',
+ kernelFunc: einsum
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function eluGrad(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var dy = inputs.dy, y = inputs.y;
+ assertNotComplex([dy, y], 'eluGrad');
+ var resultValues = new Float32Array(tfjsCore.util.sizeFromShape(y.shape));
+ var values = backend.data.get(y.dataId).values;
+ var dyValues = backend.data.get(dy.dataId).values;
+ for (var i = 0; i < values.length; ++i) {
+ var v = values[i];
+ if (v >= 1) {
+ resultValues[i] = dyValues[i];
+ }
+ else {
+ resultValues[i] = dyValues[i] * (v + 1);
+ }
+ }
+ return backend.makeTensorInfo(y.shape, 'float32', resultValues);
+ }
+ var eluGradConfig = {
+ kernelName: tfjsCore.EluGrad,
+ backendName: 'cpu',
+ kernelFunc: eluGrad
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var p = tfjsCore.backend_util.ERF_P;
+ var a1 = tfjsCore.backend_util.ERF_A1;
+ var a2 = tfjsCore.backend_util.ERF_A2;
+ var a3 = tfjsCore.backend_util.ERF_A3;
+ var a4 = tfjsCore.backend_util.ERF_A4;
+ var a5 = tfjsCore.backend_util.ERF_A5;
+ var erf = unaryKernelFunc(tfjsCore.Erf, function (xi) {
+ var sign = Math.sign(xi);
+ var v = Math.abs(xi);
+ var t = 1.0 / (1.0 + p * v);
+ return sign *
+ (1.0 -
+ (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t *
+ Math.exp(-v * v));
+ });
+ var erfConfig = {
+ kernelName: tfjsCore.Erf,
+ backendName: 'cpu',
+ kernelFunc: erf,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function expandDims(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var input = inputs.input;
+ var dim = attrs.dim;
+ var inputRank = input.shape.length;
+ var newShape = input.shape.slice();
+ var $dim = dim;
+ if (dim < 0) {
+ // Negative value is counted from the tail of rank.
+ tfjsCore.util.assert(-(inputRank + 1) <= dim, function () { return "Axis must be in the interval [" + -(inputRank + 1) + ", " + inputRank + "]"; });
+ $dim = inputRank + dim + 1;
+ }
+ newShape.splice($dim, 0, 1);
+ return reshape({ inputs: { x: input }, backend: backend, attrs: { shape: newShape } });
+ }
+ var expandDimsConfig = {
+ kernelName: tfjsCore.ExpandDims,
+ backendName: 'cpu',
+ kernelFunc: expandDims
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var realDivImpl = createSimpleBinaryKernelImpl(function (a, b) { return a / b; });
+ var div = binaryKernelFunc(tfjsCore.RealDiv, realDivImpl);
+ var realDivConfig = {
+ kernelName: tfjsCore.RealDiv,
+ backendName: 'cpu',
+ kernelFunc: div
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Calculate FFT of inner most elements of batch tensor.
+ */
+ function fftBatch(input, inverse, cpuBackend) {
+ var inputShape = input.shape;
+ var batch = inputShape[0];
+ var innerDim = inputShape[1];
+ var inputVals = cpuBackend.data.get(input.dataId);
+ var real2D = inputVals.complexTensorInfos.real;
+ var imag2D = inputVals.complexTensorInfos.imag;
+ // Collects real and imaginary values separately.
+ var resultShape = [batch, innerDim];
+ var resultSize = tfjsCore.util.sizeFromShape(resultShape);
+ var resultReal = tfjsCore.util.getTypedArrayFromDType('float32', resultSize);
+ var resultImag = tfjsCore.util.getTypedArrayFromDType('float32', resultSize);
+ for (var b = 0; b < batch; b++) {
+ // TODO: Support slice ops for complex type.
+ var r = slice({
+ inputs: { x: real2D },
+ backend: cpuBackend,
+ attrs: { begin: [b, 0], size: [1, innerDim] }
+ });
+ var i = slice({
+ inputs: { x: imag2D },
+ backend: cpuBackend,
+ attrs: { begin: [b, 0], size: [1, innerDim] }
+ });
+ var input_1 = complex({ inputs: { real: r, imag: i }, backend: cpuBackend });
+ // Run FFT by batch element.
+ var _a = fftImpl(input_1, inverse, cpuBackend), real_1 = _a.real, imag_1 = _a.imag;
+ var res = tfjsCore.backend_util.mergeRealAndImagArrays(real_1, imag_1);
+ for (var d = 0; d < innerDim; d++) {
+ var c = tfjsCore.backend_util.getComplexWithIndex(res, d);
+ resultReal[b * innerDim + d] = c.real;
+ resultImag[b * innerDim + d] = c.imag;
+ }
+ cpuBackend.disposeIntermediateTensorInfo(r);
+ cpuBackend.disposeIntermediateTensorInfo(i);
+ cpuBackend.disposeIntermediateTensorInfo(input_1);
+ }
+ var $realInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', resultReal);
+ var $imagInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', resultImag);
+ var result = complex({ inputs: { real: $realInfo, imag: $imagInfo }, backend: cpuBackend });
+ cpuBackend.disposeIntermediateTensorInfo($realInfo);
+ cpuBackend.disposeIntermediateTensorInfo($imagInfo);
+ return result;
+ }
+ function fftImpl(input, inverse, cpuBackend) {
+ var inputSize = tfjsCore.util.sizeFromShape(input.shape);
+ var inputVals = cpuBackend.data.get(input.dataId);
+ var realVals = cpuBackend.data.get(inputVals.complexTensorInfos.real.dataId).values;
+ var imagVals = cpuBackend.data.get(inputVals.complexTensorInfos.imag.dataId).values;
+ if (isExponentOf2(inputSize)) {
+ var result = fftRadix2(realVals, imagVals, inputSize, inverse, cpuBackend);
+ var resultShape = [input.shape[0], input.shape[1]];
+ if (inverse) {
+ var realInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', result.real);
+ var imagInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', result.imag);
+ var sizeInfo = cpuBackend.makeTensorInfo([], 'float32', tfjsCore.util.createScalarValue(inputSize, 'float32'));
+ var sizeInfoCopy = identity({ inputs: { x: sizeInfo }, backend: cpuBackend });
+ var divRealInfo = realDivConfig.kernelFunc({ inputs: { a: realInfo, b: sizeInfo }, backend: cpuBackend });
+ var divImagInfo = realDivConfig.kernelFunc({ inputs: { a: imagInfo, b: sizeInfoCopy }, backend: cpuBackend });
+ var divRealVals = cpuBackend.data.get(divRealInfo.dataId).values;
+ var divImagVals = cpuBackend.data.get(divImagInfo.dataId).values;
+ cpuBackend.disposeIntermediateTensorInfo(realInfo);
+ cpuBackend.disposeIntermediateTensorInfo(imagInfo);
+ cpuBackend.disposeIntermediateTensorInfo(sizeInfo);
+ cpuBackend.disposeIntermediateTensorInfo(sizeInfoCopy);
+ cpuBackend.disposeIntermediateTensorInfo(divRealInfo);
+ cpuBackend.disposeIntermediateTensorInfo(divImagInfo);
+ return { real: divRealVals, imag: divImagVals };
+ }
+ return result;
+ }
+ else {
+ var data = tfjsCore.backend_util.mergeRealAndImagArrays(realVals, imagVals);
+ var rawOutput = fourierTransformByMatmul(data, inputSize, inverse);
+ return tfjsCore.backend_util.splitRealAndImagArrays(rawOutput);
+ }
+ }
+ function isExponentOf2(size) {
+ return (size & size - 1) === 0;
+ }
+ // FFT using Cooley-Tukey algorithm on radix 2 dimensional input.
+ function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) {
+ if (size === 1) {
+ return { real: realVals, imag: imagVals };
+ }
+ var data = tfjsCore.backend_util.mergeRealAndImagArrays(realVals, imagVals);
+ var half = size / 2;
+ var evenComplex = tfjsCore.backend_util.complexWithEvenIndex(data);
+ var evenRealVals = evenComplex.real;
+ var evenImagVals = evenComplex.imag;
+ var evenShape = [evenRealVals.length];
+ var evenRealInfo = cpuBackend.makeTensorInfo(evenShape, 'float32', evenRealVals);
+ var evenImagInfo = cpuBackend.makeTensorInfo(evenShape, 'float32', evenImagVals);
+ var evenTensorInfo = complex({ inputs: { real: evenRealInfo, imag: evenImagInfo }, backend: cpuBackend });
+ var oddComplex = tfjsCore.backend_util.complexWithOddIndex(data);
+ var oddRealVals = oddComplex.real;
+ var oddImagVals = oddComplex.imag;
+ var oddShape = [oddRealVals.length];
+ var oddRealInfo = cpuBackend.makeTensorInfo(oddShape, 'float32', oddRealVals);
+ var oddImagInfo = cpuBackend.makeTensorInfo(oddShape, 'float32', oddImagVals);
+ var oddTensorInfo = complex({ inputs: { real: oddRealInfo, imag: oddImagInfo }, backend: cpuBackend });
+ // Recursive call for half part of original input.
+ var $evenComplex = fftRadix2(evenRealVals, evenImagVals, half, inverse, cpuBackend);
+ var $evenRealVals = $evenComplex.real;
+ var $evenImagVals = $evenComplex.imag;
+ var $evenShape = [$evenRealVals.length];
+ var $evenRealInfo = cpuBackend.makeTensorInfo($evenShape, 'float32', $evenRealVals);
+ var $evenImagInfo = cpuBackend.makeTensorInfo($evenShape, 'float32', $evenImagVals);
+ var $evenTensorInfo = complex({
+ inputs: { real: $evenRealInfo, imag: $evenImagInfo },
+ backend: cpuBackend
+ });
+ var $oddComplex = fftRadix2(oddRealVals, oddImagVals, half, inverse, cpuBackend);
+ var $oddRealVals = $oddComplex.real;
+ var $oddImagVals = $oddComplex.imag;
+ var $oddShape = [$oddRealVals.length];
+ var $oddRealInfo = cpuBackend.makeTensorInfo($oddShape, 'float32', $oddRealVals);
+ var $oddImagInfo = cpuBackend.makeTensorInfo($oddShape, 'float32', $oddImagVals);
+ var $oddTensorInfo = complex({ inputs: { real: $oddRealInfo, imag: $oddImagInfo }, backend: cpuBackend });
+ var e = tfjsCore.backend_util.exponents(size, inverse);
+ var eShape = [e.real.length];
+ var eRealInfo = cpuBackend.makeTensorInfo(eShape, 'float32', e.real);
+ var eImagInfo = cpuBackend.makeTensorInfo(eShape, 'float32', e.imag);
+ var complexInfo = complex({ inputs: { real: eRealInfo, imag: eImagInfo }, backend: cpuBackend });
+ var exponentInfo = multiply({ inputs: { a: complexInfo, b: $oddTensorInfo }, backend: cpuBackend });
+ var addPart = add({
+ inputs: { a: $evenTensorInfo, b: exponentInfo },
+ backend: cpuBackend
+ });
+ var subPart = sub({
+ inputs: { a: $evenTensorInfo, b: exponentInfo },
+ backend: cpuBackend
+ });
+ var addPartReal = real({ inputs: { input: addPart }, backend: cpuBackend });
+ var subPartReal = real({ inputs: { input: subPart }, backend: cpuBackend });
+ var addPartImag = imag({ inputs: { input: addPart }, backend: cpuBackend });
+ var subPartImag = imag({ inputs: { input: subPart }, backend: cpuBackend });
+ var $real = concat({
+ inputs: [addPartReal, subPartReal],
+ backend: cpuBackend,
+ attrs: { axis: 0 }
+ });
+ var $imag = concat({
+ inputs: [addPartImag, subPartImag],
+ backend: cpuBackend,
+ attrs: { axis: 0 }
+ });
+ var $realVals = cpuBackend.data.get($real.dataId).values;
+ var $imagVals = cpuBackend.data.get($imag.dataId).values;
+ cpuBackend.disposeIntermediateTensorInfo(evenRealInfo);
+ cpuBackend.disposeIntermediateTensorInfo(evenImagInfo);
+ cpuBackend.disposeIntermediateTensorInfo(evenTensorInfo);
+ cpuBackend.disposeIntermediateTensorInfo(oddRealInfo);
+ cpuBackend.disposeIntermediateTensorInfo(oddImagInfo);
+ cpuBackend.disposeIntermediateTensorInfo(oddTensorInfo);
+ cpuBackend.disposeIntermediateTensorInfo($evenRealInfo);
+ cpuBackend.disposeIntermediateTensorInfo($evenImagInfo);
+ cpuBackend.disposeIntermediateTensorInfo($evenTensorInfo);
+ cpuBackend.disposeIntermediateTensorInfo($oddRealInfo);
+ cpuBackend.disposeIntermediateTensorInfo($oddImagInfo);
+ cpuBackend.disposeIntermediateTensorInfo($oddTensorInfo);
+ cpuBackend.disposeIntermediateTensorInfo(eRealInfo);
+ cpuBackend.disposeIntermediateTensorInfo(eImagInfo);
+ cpuBackend.disposeIntermediateTensorInfo(complexInfo);
+ cpuBackend.disposeIntermediateTensorInfo(exponentInfo);
+ cpuBackend.disposeIntermediateTensorInfo(addPart);
+ cpuBackend.disposeIntermediateTensorInfo(subPart);
+ cpuBackend.disposeIntermediateTensorInfo(addPartReal);
+ cpuBackend.disposeIntermediateTensorInfo(addPartImag);
+ cpuBackend.disposeIntermediateTensorInfo(subPartReal);
+ cpuBackend.disposeIntermediateTensorInfo(subPartImag);
+ cpuBackend.disposeIntermediateTensorInfo($real);
+ cpuBackend.disposeIntermediateTensorInfo($imag);
+ return { real: $realVals, imag: $imagVals };
+ }
+ // Calculate fourier transform by multplying sinusoid matrix.
+ function fourierTransformByMatmul(data, size, inverse) {
+ var ret = new Float32Array(size * 2);
+ // TODO: Use matmul instead once it supports complex64 type.
+ for (var r = 0; r < size; r++) {
+ var real_2 = 0.0;
+ var imag_2 = 0.0;
+ for (var c = 0; c < size; c++) {
+ var e = tfjsCore.backend_util.exponent(r * c, size, inverse);
+ var term = tfjsCore.backend_util.getComplexWithIndex(data, c);
+ real_2 += term.real * e.real - term.imag * e.imag;
+ imag_2 += term.real * e.imag + term.imag * e.real;
+ }
+ if (inverse) {
+ real_2 /= size;
+ imag_2 /= size;
+ }
+ tfjsCore.backend_util.assignToTypedArray(ret, real_2, imag_2, r);
+ }
+ return ret;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function fft(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var input = inputs.input;
+ var inputSize = tfjsCore.util.sizeFromShape(input.shape);
+ // Collapse all outer dimensions to a single batch dimension.
+ var innerDimensionSize = input.shape[input.shape.length - 1];
+ var batch = inputSize / innerDimensionSize;
+ var input2D = reshape({
+ inputs: { x: input },
+ backend: backend,
+ attrs: { shape: [batch, innerDimensionSize] }
+ });
+ var result = fftBatch(input2D, false, backend);
+ var resultReshaped = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: input.shape } });
+ backend.disposeIntermediateTensorInfo(input2D);
+ backend.disposeIntermediateTensorInfo(result);
+ return resultReshaped;
+ }
+ var fftConfig = {
+ kernelName: tfjsCore.FFT,
+ backendName: 'cpu',
+ kernelFunc: fft
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function fill(args) {
+ var backend = args.backend, attrs = args.attrs;
+ var shape = attrs.shape, value = attrs.value, dtype = attrs.dtype;
+ var $dtype = dtype || tfjsCore.util.inferDtype(value);
+ var values = tfjsCore.util.getArrayFromDType($dtype, tfjsCore.util.sizeFromShape(shape));
+ fillValues(values, value, $dtype);
+ return backend.makeTensorInfo(shape, $dtype, values);
+ }
+ var fillConfig = {
+ kernelName: tfjsCore.Fill,
+ backendName: 'cpu',
+ kernelFunc: fill
+ };
+ function fillValues(values, value, dtype) {
+ if (dtype === 'string') {
+ values.fill(value);
+ }
+ else {
+ values.fill(value);
+ }
+ }
+
+ var flipLeftRightConfig = {
+ kernelName: tfjsCore.FlipLeftRight,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs; _a.attrs; var backend = _a.backend;
+ var image = inputs.image;
+ var cpuBackend = backend;
+ var output = tfjsCore.util.getTypedArrayFromDType(image.dtype, tfjsCore.util.sizeFromShape(image.shape));
+ var _b = __read(image.shape, 4), batch = _b[0], imageHeight = _b[1], imageWidth = _b[2], numChannels = _b[3];
+ var imageVals = cpuBackend.data.get(image.dataId).values;
+ for (var batchIdx = 0; batchIdx < batch; batchIdx++) {
+ var batchOffset = batchIdx * imageWidth * imageHeight * numChannels;
+ for (var row = 0; row < imageHeight; row++) {
+ var rowOffset = row * (imageWidth * numChannels);
+ for (var col = 0; col < imageWidth; col++) {
+ var colOffset = col * numChannels;
+ for (var channel = 0; channel < numChannels; channel++) {
+ var coordX = Math.round(imageWidth - col - 1);
+ var outIdx = batchOffset + rowOffset + colOffset + channel;
+ var outputValue = imageVals[outIdx];
+ // If the coordinate position falls within the image boundaries...
+ if (coordX >= 0 && coordX < imageWidth) {
+ // set the output to the image value at the coordinate position.
+ var rotatedColOffset = coordX * numChannels;
+ var imageIdx = batchOffset + rowOffset + rotatedColOffset + channel;
+ outputValue = imageVals[imageIdx];
+ }
+ output[outIdx] = outputValue;
+ }
+ }
+ }
+ }
+ var dataId = cpuBackend.write(output, image.shape, image.dtype);
+ return { dataId: dataId, shape: image.shape, dtype: image.dtype };
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var floorDivImpl = createSimpleBinaryKernelImpl(function (a, b) { return Math.floor(a / b); });
+ var floorDiv = binaryKernelFunc(tfjsCore.FloorDiv, floorDivImpl, null /* complexImpl */, 'int32');
+ var floorDivConfig = {
+ kernelName: tfjsCore.FloorDiv,
+ backendName: 'cpu',
+ kernelFunc: floorDiv
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function fusedConv2D(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, filter = inputs.filter, bias = inputs.bias, preluActivationWeights = inputs.preluActivationWeights;
+ var strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode, activation = attrs.activation, leakyreluAlpha = attrs.leakyreluAlpha;
+ var result = conv2D({
+ inputs: { x: x, filter: filter },
+ backend: backend,
+ attrs: { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations, dimRoundingMode: dimRoundingMode }
+ });
+ if (bias) {
+ var resultOld = result;
+ result = add({ inputs: { a: result, b: bias }, backend: backend });
+ backend.disposeIntermediateTensorInfo(resultOld);
+ }
+ if (activation) {
+ var resultOld = result;
+ result = applyActivation(backend, result, activation, preluActivationWeights, leakyreluAlpha);
+ backend.disposeIntermediateTensorInfo(resultOld);
+ }
+ return result;
+ }
+ var fusedConv2DConfig = {
+ kernelName: tfjsCore.FusedConv2D,
+ backendName: 'cpu',
+ kernelFunc: fusedConv2D
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function fusedDepthwiseConv2D(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, filter = inputs.filter, bias = inputs.bias, preluActivationWeights = inputs.preluActivationWeights;
+ var strides = attrs.strides, pad = attrs.pad, dataFormat = attrs.dataFormat, dilations = attrs.dilations, dimRoundingMode = attrs.dimRoundingMode, activation = attrs.activation, leakyreluAlpha = attrs.leakyreluAlpha;
+ var result = depthwiseConv2dNative({
+ inputs: { x: x, filter: filter },
+ backend: backend,
+ attrs: { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations, dimRoundingMode: dimRoundingMode }
+ });
+ if (bias) {
+ var oldResult = result;
+ result = add({ inputs: { a: result, b: bias }, backend: backend });
+ backend.disposeIntermediateTensorInfo(oldResult);
+ }
+ if (activation) {
+ var oldResult = result;
+ result = applyActivation(backend, result, activation, preluActivationWeights, leakyreluAlpha);
+ backend.disposeIntermediateTensorInfo(oldResult);
+ }
+ return result;
+ }
+ var fusedDepthwiseConv2DConfig = {
+ kernelName: tfjsCore.FusedDepthwiseConv2D,
+ backendName: 'cpu',
+ kernelFunc: fusedDepthwiseConv2D
+ };
+
+ function gatherNd(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var params = inputs.params, indices = inputs.indices;
+ var paramsSize = tfjsCore.util.sizeFromShape(params.shape);
+ var indicesShape = indices.shape;
+ var sliceRank = indicesShape[indicesShape.length - 1];
+ var _a = __read(tfjsCore.backend_util.prepareAndValidate(params, indices), 4), resultShape = _a[0], numSlices = _a[1], sliceSize = _a[2], strides = _a[3];
+ if (numSlices === 0) {
+ return backend.makeTensorInfo(resultShape, params.dtype, []);
+ }
+ var indicesData = backend.data.get(indices.dataId).values;
+ var paramsBuf = backend.bufferSync(params);
+ var outBuf = gatherNdImpl(indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize, strides, params.shape, paramsSize);
+ return backend.makeTensorInfo(resultShape, params.dtype, outBuf.values);
+ }
+ var gatherNdConfig = {
+ kernelName: tfjsCore.GatherNd,
+ backendName: 'cpu',
+ kernelFunc: gatherNd
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function gatherV2(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, indices = inputs.indices;
+ var axis = attrs.axis, batchDims = attrs.batchDims;
+ assertNotComplex([x, indices], 'gatherV2');
+ // Throw error when any index is out of bound.
+ var parsedAxis = tfjsCore.util.parseAxisParam(axis, x.shape)[0];
+ var indicesVals = backend.data.get(indices.dataId).values;
+ var axisDim = x.shape[parsedAxis];
+ var _loop_1 = function (i) {
+ var index = indicesVals[i];
+ tfjsCore.util.assert(index <= axisDim - 1 && index >= 0, function () { return "GatherV2: the index value " + index + " is not in [0, " + (axisDim - 1) + "]"; });
+ };
+ for (var i = 0; i < indicesVals.length; ++i) {
+ _loop_1(i);
+ }
+ var $batchDims = batchDims;
+ if (batchDims == null) {
+ $batchDims = 0;
+ }
+ var indicesSize = tfjsCore.util.sizeFromShape(indices.shape);
+ var shapeInfo = tfjsCore.backend_util.segment_util.collectGatherOpShapeInfo(x, indices, parsedAxis, $batchDims);
+ var flattenX = reshape({
+ inputs: { x: x },
+ backend: backend,
+ attrs: {
+ shape: [
+ shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize,
+ shapeInfo.sliceSize
+ ]
+ }
+ });
+ var flattenIndex = reshape({
+ inputs: { x: indices },
+ backend: backend,
+ attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] }
+ });
+ var flattenOutputShape = [
+ shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize,
+ shapeInfo.sliceSize
+ ];
+ var indicesBuf = backend.bufferSync(flattenIndex);
+ var xBuf = backend.bufferSync(flattenX);
+ var outBuf = gatherV2Impl(xBuf, indicesBuf, flattenOutputShape);
+ backend.disposeIntermediateTensorInfo(flattenX);
+ backend.disposeIntermediateTensorInfo(flattenIndex);
+ return backend.makeTensorInfo(shapeInfo.outputShape, outBuf.dtype, outBuf.values);
+ }
+ var gatherV2Config = {
+ kernelName: tfjsCore.GatherV2,
+ backendName: 'cpu',
+ kernelFunc: gatherV2
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function ifft(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var input = inputs.input;
+ var inputSize = tfjsCore.util.sizeFromShape(input.shape);
+ // Collapse all outer dimensions to a single batch dimension.
+ var innerDimensionSize = input.shape[input.shape.length - 1];
+ var batch = inputSize / innerDimensionSize;
+ var input2D = reshape({
+ inputs: { x: input },
+ backend: backend,
+ attrs: { shape: [batch, innerDimensionSize] }
+ });
+ var result = fftBatch(input2D, true, backend);
+ var resultReshaped = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: input.shape } });
+ backend.disposeIntermediateTensorInfo(input2D);
+ backend.disposeIntermediateTensorInfo(result);
+ return resultReshaped;
+ }
+ var ifftConfig = {
+ kernelName: tfjsCore.IFFT,
+ backendName: 'cpu',
+ kernelFunc: ifft
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var isFinite = unaryKernelFunc(tfjsCore.IsFinite, function (xi) { return Number.isFinite(xi) ? 1 : 0; }, 'bool');
+ var isFiniteConfig = {
+ kernelName: tfjsCore.IsFinite,
+ backendName: 'cpu',
+ kernelFunc: isFinite,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var isInf = unaryKernelFunc(tfjsCore.IsInf, function (xi) { return Math.abs(xi) === Infinity ? 1 : 0; }, 'bool');
+ var isInfConfig = {
+ kernelName: tfjsCore.IsInf,
+ backendName: 'cpu',
+ kernelFunc: isInf,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var isNaN$1 = unaryKernelFunc(tfjsCore.IsNan, function (xi) { return Number.isNaN(xi) ? 1 : 0; }, 'bool');
+ var isNaNConfig = {
+ kernelName: tfjsCore.IsNan,
+ backendName: 'cpu',
+ kernelFunc: isNaN$1,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function linSpace(args) {
+ var backend = args.backend, attrs = args.attrs;
+ var start = attrs.start, stop = attrs.stop, num = attrs.num;
+ var outVals = linSpaceImpl(start, stop, num);
+ return backend.makeTensorInfo([outVals.length], 'float32', outVals);
+ }
+ var linSpaceConfig = {
+ kernelName: tfjsCore.LinSpace,
+ backendName: 'cpu',
+ kernelFunc: linSpace
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var log1p = unaryKernelFunc(tfjsCore.Log1p, function (xi) { return Math.log1p(xi); });
+ var log1pConfig = {
+ kernelName: tfjsCore.Log1p,
+ backendName: 'cpu',
+ kernelFunc: log1p,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var logicalAndImpl = createSimpleBinaryKernelImpl(function (a, b) { return a && b; });
+ var logicalAnd = binaryKernelFunc(tfjsCore.LogicalAnd, logicalAndImpl, null /* complexImpl */, 'bool');
+ var logicalAndConfig = {
+ kernelName: tfjsCore.LogicalAnd,
+ backendName: 'cpu',
+ kernelFunc: logicalAnd
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var logicalNot = unaryKernelFunc(tfjsCore.LogicalNot, function (xi) { return xi ? 0 : 1; }, 'bool');
+ var logicalNotConfig = {
+ kernelName: tfjsCore.LogicalNot,
+ backendName: 'cpu',
+ kernelFunc: logicalNot,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var logicalOrImpl = createSimpleBinaryKernelImpl(function (a, b) { return a || b; });
+ var logicalOr = binaryKernelFunc(tfjsCore.LogicalOr, logicalOrImpl, null /* complexImpl */, 'bool');
+ var logicalOrConfig = {
+ kernelName: tfjsCore.LogicalOr,
+ backendName: 'cpu',
+ kernelFunc: logicalOr
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function lRN(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var depthRadius = attrs.depthRadius, bias = attrs.bias, alpha = attrs.alpha, beta = attrs.beta;
+ assertNotComplex(x, 'LRN');
+ var channels = x.shape[3];
+ var maxD = channels - 1;
+ var xValues = backend.data.get(x.dataId).values;
+ var size = tfjsCore.util.sizeFromShape(x.shape);
+ var result = new Float32Array(size);
+ function sumAcrossChannels(offset) {
+ var currentChannel = offset % channels;
+ var beginSumOffset = offset - currentChannel + Math.max(0, currentChannel - depthRadius);
+ var endSumOffset = offset - currentChannel + Math.min(currentChannel + depthRadius, maxD);
+ var sum = 0.0;
+ for (; beginSumOffset <= endSumOffset; beginSumOffset++) {
+ var z = xValues[beginSumOffset];
+ sum += z * z;
+ }
+ return sum;
+ }
+ for (var offset = 0; offset < size; offset++) {
+ var sum = sumAcrossChannels(offset);
+ var val = xValues[offset] * Math.pow(bias + alpha * sum, -beta);
+ result[offset] = val;
+ }
+ return backend.makeTensorInfo(x.shape, x.dtype, result);
+ }
+ var lRNConfig = {
+ kernelName: tfjsCore.LRN,
+ backendName: 'cpu',
+ kernelFunc: lRN
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function lRNGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, y = inputs.y, dy = inputs.dy;
+ var depthRadius = attrs.depthRadius, bias = attrs.bias, alpha = attrs.alpha, beta = attrs.beta;
+ assertNotComplex(dy, 'LRNGrad');
+ var dySize = tfjsCore.util.sizeFromShape(dy.shape);
+ var channels = dy.shape[3];
+ var dyValues = backend.data.get(dy.dataId).values;
+ var xValues = backend.data.get(x.dataId).values;
+ var yValues = backend.data.get(y.dataId).values;
+ var result = new Float32Array(dySize);
+ var size = dySize;
+ for (var offset = 0; offset < size; offset++) {
+ var currentChannel = offset % channels;
+ var depthBegin = (offset - currentChannel) + Math.max(0, currentChannel - depthRadius);
+ var depthEnd = (offset - currentChannel) +
+ Math.min(channels, currentChannel + depthRadius + 1);
+ var norm = 0;
+ for (var k = depthBegin; k < depthEnd; k++) {
+ norm += Math.pow(xValues[k], 2);
+ }
+ norm = alpha * norm + bias;
+ for (var k = depthBegin; k < depthEnd; k++) {
+ var dyi = -2 * alpha * beta * xValues[k] * yValues[offset] / norm;
+ if (offset === k) {
+ dyi += Math.pow(norm, -beta);
+ }
+ dyi *= dyValues[offset];
+ result[k] += dyi;
+ }
+ }
+ return backend.makeTensorInfo(dy.shape, x.dtype, result);
+ }
+ var lRNGradConfig = {
+ kernelName: tfjsCore.LRNGrad,
+ backendName: 'cpu',
+ kernelFunc: lRNGrad
+ };
+
+ function max(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var reductionIndices = attrs.reductionIndices, keepDims = attrs.keepDims;
+ var cpuBackend = backend;
+ var xShape = x.shape;
+ var xRank = xShape.length;
+ var origAxes = tfjsCore.util.parseAxisParam(reductionIndices, xShape);
+ var axes = origAxes;
+ var permutedAxes = tfjsCore.backend_util.getAxesPermutation(axes, xRank);
+ var xVals = cpuBackend.data.get(x.dataId).values;
+ if (permutedAxes != null) {
+ var newShape = new Array(xRank);
+ for (var i = 0; i < newShape.length; i++) {
+ newShape[i] = xShape[permutedAxes[i]];
+ }
+ xVals = transposeImpl(xVals, xShape, x.dtype, permutedAxes, newShape);
+ axes = tfjsCore.backend_util.getInnerMostAxes(axes.length, xRank);
+ xShape = newShape;
+ }
+ assertNotComplex(x, 'max');
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('max', axes, xRank);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes(xShape, axes), 2), maxOutShape = _a[0], reduceShape = _a[1];
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var result = maxImpl(xVals, reduceSize, maxOutShape, x.dtype);
+ var dataId = cpuBackend.write(result, maxOutShape, x.dtype);
+ var outShape = maxOutShape;
+ if (keepDims) {
+ // reshape
+ var newShape = tfjsCore.backend_util.expandShapeToKeepDim(maxOutShape, origAxes);
+ outShape = newShape;
+ }
+ return { dataId: dataId, shape: outShape, dtype: x.dtype };
+ }
+ var maxConfig = {
+ kernelName: tfjsCore.Max,
+ backendName: 'cpu',
+ kernelFunc: max
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function maxPool(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ assertNotComplex(x, 'maxPool');
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode;
+ var dilations = 1;
+ tfjsCore.util.assert(tfjsCore.backend_util.eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in maxPool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var convInfo = tfjsCore.backend_util.computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode);
+ var res;
+ if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&
+ tfjsCore.util.arraysEqual(convInfo.inShape, convInfo.outShape)) {
+ res = identity({ inputs: { x: x }, backend: backend });
+ }
+ else {
+ var xValues = backend.data.get(x.dataId).values;
+ var strides_1 = tfjsCore.util.computeStrides(x.shape);
+ var buffer = pool(xValues, x.shape, x.dtype, strides_1, convInfo, 'max');
+ res = backend.makeTensorInfo(convInfo.outShape, x.dtype, buffer.values);
+ }
+ return res;
+ }
+ var maxPoolConfig = {
+ kernelName: tfjsCore.MaxPool,
+ backendName: 'cpu',
+ kernelFunc: maxPool
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function maxPool3D(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode, dataFormat = attrs.dataFormat;
+ assertNotComplex(x, 'maxPool3d');
+ var convInfo = tfjsCore.backend_util.computePool3DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode, dataFormat);
+ var xValues = backend.data.get(x.dataId).values;
+ var outBuf = pool3d(xValues, x.shape, x.dtype, tfjsCore.util.computeStrides(x.shape), convInfo, 'max');
+ return backend.makeTensorInfo(outBuf.shape, 'float32', outBuf.values);
+ }
+ var maxPool3DConfig = {
+ kernelName: tfjsCore.MaxPool3D,
+ backendName: 'cpu',
+ kernelFunc: maxPool3D
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function maxPool3DGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, input = inputs.input;
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode;
+ assertNotComplex([dy, input], 'maxPool3DGrad');
+ var convInfo = tfjsCore.backend_util.computePool3DInfo(input.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode);
+ var inputBuf = backend.bufferSync(input);
+ var maxPosBuf = maxPool3dPositions(inputBuf, convInfo);
+ var strideDepth = convInfo.strideDepth;
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var dilationDepth = convInfo.dilationDepth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;
+ var padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ var padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ var dx = tfjsCore.buffer(input.shape, 'float32');
+ var dyBuf = backend.bufferSync(dy);
+ for (var batch = 0; batch < convInfo.batchSize; ++batch) {
+ for (var channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (var dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) {
+ for (var dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) {
+ for (var dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) {
+ // Shader code begins
+ var dyDepthCorner = dxDepth - padFront;
+ var dyRowCorner = dxRow - padTop;
+ var dyColCorner = dxCol - padLeft;
+ var dotProd = 0;
+ for (var wDepth = 0; wDepth < effectiveFilterDepth; wDepth += dilationDepth) {
+ var dyDepth = (dyDepthCorner + wDepth) / strideDepth;
+ if (dyDepth < 0 || dyDepth >= convInfo.outDepth ||
+ Math.floor(dyDepth) !== dyDepth) {
+ continue;
+ }
+ for (var wRow = 0; wRow < effectiveFilterHeight; wRow += dilationHeight) {
+ var dyRow = (dyRowCorner + wRow) / strideHeight;
+ if (dyRow < 0 || dyRow >= convInfo.outHeight ||
+ Math.floor(dyRow) !== dyRow) {
+ continue;
+ }
+ for (var wCol = 0; wCol < effectiveFilterWidth; wCol += dilationWidth) {
+ var dyCol = (dyColCorner + wCol) / strideWidth;
+ if (dyCol < 0 || dyCol >= convInfo.outWidth ||
+ Math.floor(dyCol) !== dyCol) {
+ continue;
+ }
+ var maxPos = effectiveFilterDepth * effectiveFilterHeight *
+ effectiveFilterWidth -
+ 1 -
+ maxPosBuf.get(batch, dyDepth, dyRow, dyCol, channel);
+ var curPos = wDepth * effectiveFilterHeight * effectiveFilterWidth +
+ wRow * effectiveFilterWidth + wCol;
+ var mask = maxPos === curPos ? 1 : 0;
+ if (mask === 0) {
+ continue;
+ }
+ var pixel = dyBuf.get(batch, dyDepth, dyRow, dyCol, channel);
+ dotProd += pixel * mask;
+ }
+ }
+ }
+ dx.set(dotProd, batch, dxDepth, dxRow, dxCol, channel);
+ }
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var maxPool3DGradConfig = {
+ kernelName: tfjsCore.MaxPool3DGrad,
+ backendName: 'cpu',
+ kernelFunc: maxPool3DGrad
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function maxPoolGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var dy = inputs.dy, input = inputs.input, output = inputs.output;
+ var x = input;
+ assertNotComplex([input, output], 'maxPoolGrad');
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, dimRoundingMode = attrs.dimRoundingMode;
+ var convInfo = tfjsCore.backend_util.computePool2DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode);
+ var xValues = backend.data.get(x.dataId).values;
+ var maxPosBuf = tfjsCore.buffer(convInfo.outShape, x.dtype, maxPoolPositions(xValues, x.shape, x.dtype, convInfo).values);
+ var strideHeight = convInfo.strideHeight;
+ var strideWidth = convInfo.strideWidth;
+ var dilationHeight = convInfo.dilationHeight;
+ var dilationWidth = convInfo.dilationWidth;
+ var effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ var effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ var padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ var padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ var dx = tfjsCore.buffer(x.shape, 'float32');
+ var dyData = backend.data.get(dy.dataId).values;
+ var dyBuf = tfjsCore.buffer(dy.shape, 'float32', dyData);
+ for (var b = 0; b < convInfo.batchSize; ++b) {
+ for (var d = 0; d < convInfo.inChannels; ++d) {
+ for (var dxR = 0; dxR < convInfo.inHeight; ++dxR) {
+ for (var dxC = 0; dxC < convInfo.inWidth; ++dxC) {
+ // Shader code begins.
+ var dyRCorner = dxR - padTop;
+ var dyCCorner = dxC - padLeft;
+ var dotProd = 0;
+ for (var wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
+ var dyR = (dyRCorner + wR) / strideHeight;
+ if (dyR < 0 || dyR >= convInfo.outHeight ||
+ Math.floor(dyR) !== dyR) {
+ continue;
+ }
+ for (var wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
+ var dyC = (dyCCorner + wC) / strideWidth;
+ if (dyC < 0 || dyC >= convInfo.outWidth ||
+ Math.floor(dyC) !== dyC) {
+ continue;
+ }
+ var maxPos = effectiveFilterHeight * effectiveFilterWidth - 1 -
+ maxPosBuf.get(b, dyR, dyC, d);
+ var curPos = wR * effectiveFilterWidth + wC;
+ var mask = maxPos === curPos ? 1 : 0;
+ if (mask === 0) {
+ continue;
+ }
+ var pixel = dyBuf.get(b, dyR, dyC, d);
+ dotProd += pixel * mask;
+ }
+ }
+ dx.set(dotProd, b, dxR, dxC, d);
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
+ }
+ var maxPoolGradConfig = {
+ kernelName: tfjsCore.MaxPoolGrad,
+ backendName: 'cpu',
+ kernelFunc: maxPoolGrad
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function maxPoolWithArgmaxImpl(xValues, xShape, dtype, includeBatchInIndex, convInfo) {
+ var strides = tfjsCore.util.computeStrides(xShape);
+ var maxPools = pool(xValues, xShape, dtype, strides, convInfo, 'max');
+ var maxPositions = maxPoolPositions(xValues, xShape, dtype, convInfo, true, includeBatchInIndex);
+ return [maxPools.values, maxPositions.values];
+ }
+
+ var maxPoolWithArgmaxConfig = {
+ kernelName: tfjsCore.MaxPoolWithArgmax,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend;
+ var x = inputs.x;
+ var filterSize = attrs.filterSize, strides = attrs.strides, pad = attrs.pad, includeBatchInIndex = attrs.includeBatchInIndex;
+ var cpuBackend = backend;
+ assertNotComplex(x, 'MaxPoolWithArgmax');
+ var values = cpuBackend.data.get(x.dataId).values;
+ var convInfo = tfjsCore.backend_util.computePool2DInfo(x.shape, filterSize, strides, [1, 1], pad);
+ var _b = __read(maxPoolWithArgmaxImpl(values, x.shape, x.dtype, includeBatchInIndex, convInfo), 2), pooled = _b[0], indexes = _b[1];
+ var pooledDataId = cpuBackend.write(pooled, convInfo.outShape, x.dtype);
+ var indexesDataId = cpuBackend.write(indexes, convInfo.outShape, x.dtype);
+ return [
+ { dataId: pooledDataId, shape: convInfo.outShape, dtype: x.dtype },
+ { dataId: indexesDataId, shape: convInfo.outShape, dtype: 'int32' }
+ ];
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function mean(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, keepDims = attrs.keepDims;
+ var axes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var shapes = tfjsCore.backend_util.computeOutAndReduceShapes(x.shape, axes);
+ var reduceShape = shapes[1];
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var toDispose = [];
+ var reduceSizeScalar = backend.makeTensorInfo([], 'float32', new Float32Array([reduceSize]));
+ toDispose.push(reduceSizeScalar);
+ var $x = cast({ inputs: { x: x }, backend: backend, attrs: { dtype: 'float32' } });
+ toDispose.push($x);
+ var res = div({ inputs: { a: $x, b: reduceSizeScalar }, backend: backend });
+ toDispose.push(res);
+ var result = sum({ inputs: { x: res }, backend: backend, attrs: { axis: axis, keepDims: keepDims } });
+ toDispose.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return result;
+ }
+ var meanConfig = {
+ kernelName: tfjsCore.Mean,
+ backendName: 'cpu',
+ kernelFunc: mean
+ };
+
+ function min(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var axis = attrs.axis, keepDims = attrs.keepDims;
+ assertNotComplex(x, 'min');
+ var origAxes = tfjsCore.util.parseAxisParam(axis, x.shape);
+ var axes = origAxes;
+ var permutedAxes = tfjsCore.backend_util.getAxesPermutation(axes, x.shape.length);
+ var $x = x;
+ if (permutedAxes != null) {
+ $x = transpose({ inputs: { x: x }, backend: backend, attrs: { perm: permutedAxes } });
+ axes = tfjsCore.backend_util.getInnerMostAxes(axes.length, x.shape.length);
+ }
+ tfjsCore.backend_util.assertAxesAreInnerMostDims('min', axes, $x.shape.length);
+ var _a = __read(tfjsCore.backend_util.computeOutAndReduceShapes($x.shape, axes), 2), outShape = _a[0], reduceShape = _a[1];
+ var reduceSize = tfjsCore.util.sizeFromShape(reduceShape);
+ var vals = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(outShape), $x.dtype);
+ var aVals = backend.data.get($x.dataId).values;
+ for (var i = 0; i < vals.length; ++i) {
+ var offset = i * reduceSize;
+ var min_1 = aVals[offset];
+ for (var j = 0; j < reduceSize; ++j) {
+ var value = aVals[offset + j];
+ if (Number.isNaN(value) ||
+ value < min_1) { // comparison with NaN always return false
+ min_1 = value;
+ }
+ }
+ vals[i] = min_1;
+ }
+ if (permutedAxes != null) {
+ backend.disposeIntermediateTensorInfo($x);
+ }
+ var result = backend.makeTensorInfo(outShape, $x.dtype, vals);
+ if (keepDims) {
+ var expandedShape = tfjsCore.backend_util.expandShapeToKeepDim(outShape, origAxes);
+ var reshapedResult = reshape({ inputs: { x: result }, backend: backend, attrs: { shape: expandedShape } });
+ backend.disposeIntermediateTensorInfo(result);
+ return reshapedResult;
+ }
+ return result;
+ }
+ var minConfig = {
+ kernelName: tfjsCore.Min,
+ backendName: 'cpu',
+ kernelFunc: min
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function mirrorPad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var paddings = attrs.paddings, mode = attrs.mode;
+ assertNotComplex(x, 'mirrorPad');
+ var outShape = paddings.map(function (p, i) { return p[0] /* beforePad */ + x.shape[i] + p[1]; } /* afterPad */);
+ var start = paddings.map(function (p) { return p[0]; });
+ var end = paddings.map(function (p, i) { return p[0] + x.shape[i]; });
+ var offset = mode === 'reflect' ? 0 : 1;
+ var xVals = backend.data.get(x.dataId).values;
+ var xRank = x.shape.length;
+ var xStrides = tfjsCore.util.computeStrides(x.shape);
+ var resultSize = tfjsCore.util.sizeFromShape(outShape);
+ var resultRank = outShape.length;
+ var resultStrides = tfjsCore.util.computeStrides(outShape);
+ var resVals = tfjsCore.util.getTypedArrayFromDType(x.dtype, resultSize);
+ for (var i = 0; i < resultSize; i++) {
+ var coords = tfjsCore.util.indexToLoc(i, resultRank, resultStrides);
+ for (var i_1 = 0; i_1 < resultRank; i_1++) {
+ if (coords[i_1] < start[i_1]) {
+ coords[i_1] = start[i_1] * 2 - coords[i_1] - offset;
+ }
+ else if (coords[i_1] >= end[i_1]) {
+ coords[i_1] = (end[i_1] - 1) * 2 - coords[i_1] + offset;
+ }
+ }
+ coords = coords.map(function (c, i) { return c - start[i]; });
+ var inIndex = tfjsCore.util.locToIndex(coords, xRank, xStrides);
+ resVals[i] = xVals[inIndex];
+ }
+ var outId = backend.write(resVals, outShape, x.dtype);
+ return { dataId: outId, shape: outShape, dtype: x.dtype };
+ }
+ var mirrorPadConfig = {
+ kernelName: tfjsCore.MirrorPad,
+ backendName: 'cpu',
+ kernelFunc: mirrorPad
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var modImpl = createSimpleBinaryKernelImpl((function (aValue, bValue) {
+ var rem = aValue % bValue;
+ if ((aValue < 0 && bValue < 0) || (aValue >= 0 && bValue >= 0)) {
+ return rem;
+ }
+ else {
+ return (rem + bValue) % bValue;
+ }
+ }));
+ var mod = binaryKernelFunc(tfjsCore.Mod, modImpl);
+ var modConfig = {
+ kernelName: tfjsCore.Mod,
+ backendName: 'cpu',
+ kernelFunc: mod
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function softmax(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var logits = inputs.logits;
+ var dim = attrs.dim;
+ var logitsRank = logits.shape.length;
+ var $dim = dim;
+ if ($dim === -1) {
+ $dim = logitsRank - 1;
+ }
+ if ($dim !== logitsRank - 1) {
+ throw Error('Softmax along a non-last dimension is not yet supported. ' +
+ ("Logits was rank " + logitsRank + " and dim was " + $dim));
+ }
+ var axes = tfjsCore.util.parseAxisParam([$dim], logits.shape);
+ var maxLogit = max({
+ inputs: { x: logits },
+ backend: backend,
+ attrs: { reductionIndices: axes, keepDims: false }
+ });
+ var expandedShape = tfjsCore.backend_util.expandShapeToKeepDim(maxLogit.shape, axes);
+ var maxLogitReshaped = reshape({ inputs: { x: maxLogit }, backend: backend, attrs: { shape: expandedShape } });
+ var a = sub({ inputs: { a: logits, b: maxLogitReshaped }, backend: backend });
+ var b = exp({ inputs: { x: a }, backend: backend });
+ var sumExp = sum({ inputs: { x: b }, backend: backend, attrs: { axis: axes, keepDims: false } });
+ var sumReshaped = reshape({ inputs: { x: sumExp }, backend: backend, attrs: { shape: expandedShape } });
+ var result = div({ inputs: { a: b, b: sumReshaped }, backend: backend });
+ backend.disposeIntermediateTensorInfo(maxLogit);
+ backend.disposeIntermediateTensorInfo(maxLogitReshaped);
+ backend.disposeIntermediateTensorInfo(a);
+ backend.disposeIntermediateTensorInfo(b);
+ backend.disposeIntermediateTensorInfo(sumExp);
+ backend.disposeIntermediateTensorInfo(sumReshaped);
+ return result;
+ }
+ var softmaxConfig = {
+ kernelName: tfjsCore.Softmax,
+ backendName: 'cpu',
+ kernelFunc: softmax
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function multinomial(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var logits = inputs.logits;
+ var numSamples = attrs.numSamples, seed = attrs.seed, normalized = attrs.normalized;
+ assertNotComplex(logits, 'multinomial');
+ var probabilities = normalized ?
+ logits :
+ softmax({ inputs: { logits: logits }, backend: backend, attrs: { dim: -1 } });
+ var batchSize = probabilities.shape[0];
+ var numEvents = probabilities.shape[1];
+ var probVals = backend.data.get(probabilities.dataId).values;
+ var resShape = [batchSize, numSamples];
+ var resVals = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(resShape), 'int32');
+ for (var b = 0; b < batchSize; ++b) {
+ var offset = b * numEvents;
+ // The cdf won't include the last event. It will be implicit if no other
+ // event happened.
+ var cdf = new Float32Array(numEvents - 1);
+ cdf[0] = probVals[offset];
+ for (var event = 1; event < cdf.length; ++event) {
+ cdf[event] = cdf[event - 1] + probVals[offset + event];
+ }
+ var random = seedrandom__namespace.alea(seed.toString());
+ var outOffset = b * numSamples;
+ for (var sampleId = 0; sampleId < numSamples; ++sampleId) {
+ var r = random();
+ // Assume last event happened by default.
+ resVals[outOffset + sampleId] = cdf.length;
+ for (var event = 0; event < cdf.length; event++) {
+ if (r < cdf[event]) {
+ resVals[outOffset + sampleId] = event;
+ break;
+ }
+ }
+ }
+ }
+ if (!normalized) {
+ backend.disposeIntermediateTensorInfo(probabilities);
+ }
+ return backend.makeTensorInfo(resShape, 'int32', resVals);
+ }
+ var multinomialConfig = {
+ kernelName: tfjsCore.Multinomial,
+ backendName: 'cpu',
+ kernelFunc: multinomial
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var nonMaxSuppressionV3Impl = tfjsCore.kernel_impls.nonMaxSuppressionV3Impl;
+ function nonMaxSuppressionV3(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var boxes = inputs.boxes, scores = inputs.scores;
+ var maxOutputSize = attrs.maxOutputSize, iouThreshold = attrs.iouThreshold, scoreThreshold = attrs.scoreThreshold;
+ assertNotComplex(boxes, 'NonMaxSuppression');
+ var boxesVals = backend.data.get(boxes.dataId).values;
+ var scoresVals = backend.data.get(scores.dataId).values;
+ var selectedIndices = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold).selectedIndices;
+ return backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices));
+ }
+ var nonMaxSuppressionV3Config = {
+ kernelName: tfjsCore.NonMaxSuppressionV3,
+ backendName: 'cpu',
+ kernelFunc: nonMaxSuppressionV3
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var nonMaxSuppressionV4Impl = tfjsCore.kernel_impls.nonMaxSuppressionV4Impl;
+ function nonMaxSuppressionV4(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var boxes = inputs.boxes, scores = inputs.scores;
+ var maxOutputSize = attrs.maxOutputSize, iouThreshold = attrs.iouThreshold, scoreThreshold = attrs.scoreThreshold, padToMaxOutputSize = attrs.padToMaxOutputSize;
+ assertNotComplex(boxes, 'NonMaxSuppressionPadded');
+ var boxesVals = backend.data.get(boxes.dataId).values;
+ var scoresVals = backend.data.get(scores.dataId).values;
+ var _a = nonMaxSuppressionV4Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize), selectedIndices = _a.selectedIndices, validOutputs = _a.validOutputs;
+ return [
+ backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)),
+ backend.makeTensorInfo([], 'int32', new Int32Array([validOutputs]))
+ ];
+ }
+ var nonMaxSuppressionV4Config = {
+ kernelName: tfjsCore.NonMaxSuppressionV4,
+ backendName: 'cpu',
+ kernelFunc: nonMaxSuppressionV4
+ };
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var nonMaxSuppressionV5Impl = tfjsCore.kernel_impls.nonMaxSuppressionV5Impl;
+ function nonMaxSuppressionV5(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var boxes = inputs.boxes, scores = inputs.scores;
+ var maxOutputSize = attrs.maxOutputSize, iouThreshold = attrs.iouThreshold, scoreThreshold = attrs.scoreThreshold, softNmsSigma = attrs.softNmsSigma;
+ assertNotComplex(boxes, 'NonMaxSuppressionWithScore');
+ var boxesVals = backend.data.get(boxes.dataId).values;
+ var scoresVals = backend.data.get(scores.dataId).values;
+ var maxOutputSizeVal = maxOutputSize;
+ var iouThresholdVal = iouThreshold;
+ var scoreThresholdVal = scoreThreshold;
+ var softNmsSigmaVal = softNmsSigma;
+ var _a = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal), selectedIndices = _a.selectedIndices, selectedScores = _a.selectedScores;
+ return [
+ backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)),
+ backend.makeTensorInfo([selectedScores.length], 'float32', new Float32Array(selectedScores))
+ ];
+ }
+ var nonMaxSuppressionV5Config = {
+ kernelName: tfjsCore.NonMaxSuppressionV5,
+ backendName: 'cpu',
+ kernelFunc: nonMaxSuppressionV5
+ };
+
+ function oneHot(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var indices = inputs.indices;
+ var depth = attrs.depth, onValue = attrs.onValue, offValue = attrs.offValue;
+ assertNotComplex(indices, 'oneHot');
+ var indicesSize = tfjsCore.util.sizeFromShape(indices.shape);
+ var res = new Float32Array(indicesSize * depth);
+ res.fill(offValue);
+ var indicesVal = backend.data.get(indices.dataId).values;
+ for (var event = 0; event < indicesSize; ++event) {
+ if (indicesVal[event] >= 0 && indicesVal[event] < depth) {
+ res[event * depth + indicesVal[event]] = onValue;
+ }
+ }
+ return backend.makeTensorInfo(__spread(indices.shape, [depth]), 'int32', res);
+ }
+ var oneHotConfig = {
+ kernelName: tfjsCore.OneHot,
+ backendName: 'cpu',
+ kernelFunc: oneHot
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function zerosLike(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var x = inputs.x;
+ if (x.dtype === 'string') {
+ throw new Error('zerosLike is not supported for string tensors');
+ }
+ else if (x.dtype === 'complex64') {
+ var realPart = real({ inputs: { input: x }, backend: backend });
+ var r = zerosLike({ inputs: { x: realPart }, backend: backend });
+ var imagPart = imag({ inputs: { input: x }, backend: backend });
+ var i = zerosLike({ inputs: { x: imagPart }, backend: backend });
+ var result = complex({ inputs: { real: r, imag: i }, backend: backend });
+ backend.disposeIntermediateTensorInfo(realPart);
+ backend.disposeIntermediateTensorInfo(r);
+ backend.disposeIntermediateTensorInfo(imagPart);
+ backend.disposeIntermediateTensorInfo(i);
+ return result;
+ }
+ else {
+ return fill({ backend: backend, attrs: { shape: x.shape, value: 0, dtype: x.dtype } });
+ }
+ }
+ var zerosLikeConfig = {
+ kernelName: tfjsCore.ZerosLike,
+ backendName: 'cpu',
+ kernelFunc: zerosLike
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function onesLike(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var x = inputs.x;
+ if (x.dtype === 'string') {
+ throw new Error('onesLike is not supported for string tensors');
+ }
+ else if (x.dtype === 'complex64') {
+ var realPart = real({ inputs: { input: x }, backend: backend });
+ var r = onesLike({ inputs: { x: realPart }, backend: backend });
+ var imagPart = imag({ inputs: { input: x }, backend: backend });
+ var i = zerosLike({ inputs: { x: imagPart }, backend: backend });
+ var result = complex({ inputs: { real: r, imag: i }, backend: backend });
+ backend.disposeIntermediateTensorInfo(realPart);
+ backend.disposeIntermediateTensorInfo(r);
+ backend.disposeIntermediateTensorInfo(imagPart);
+ backend.disposeIntermediateTensorInfo(i);
+ return result;
+ }
+ else {
+ return fill({ backend: backend, attrs: { shape: x.shape, value: 1, dtype: x.dtype } });
+ }
+ }
+ var onesLikeConfig = {
+ kernelName: tfjsCore.OnesLike,
+ backendName: 'cpu',
+ kernelFunc: onesLike
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function pack(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var axis = attrs.axis;
+ if (inputs.length === 1) {
+ return expandDims({ inputs: { input: inputs[0] }, backend: backend, attrs: { dim: axis } });
+ }
+ var shape = inputs[0].shape;
+ var dtype = inputs[0].dtype;
+ inputs.forEach(function (t) {
+ tfjsCore.util.assertShapesMatch(shape, t.shape, 'All tensors passed to stack must have matching shapes');
+ tfjsCore.util.assert(dtype === t.dtype, function () { return 'All tensors passed to stack must have matching dtypes'; });
+ });
+ var intermediateTensorInfos = [];
+ var expandedTensors = inputs.map(function (t) {
+ var expandedT = expandDims({ inputs: { input: t }, backend: backend, attrs: { dim: axis } });
+ intermediateTensorInfos.push(expandedT);
+ return expandedT;
+ });
+ var result = concat({ inputs: expandedTensors, backend: backend, attrs: { axis: axis } });
+ intermediateTensorInfos.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return result;
+ }
+ var packConfig = {
+ kernelName: tfjsCore.Pack,
+ backendName: 'cpu',
+ kernelFunc: pack
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function padV2(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var paddings = attrs.paddings, constantValue = attrs.constantValue;
+ assertNotComplex(x, 'pad');
+ var outShape = paddings.map(function (p, i) { return p[0] /* beforePad */ + x.shape[i] + p[1]; } /* afterPad */);
+ var start = paddings.map(function (p) { return p[0]; });
+ var xVals = backend.data.get(x.dataId).values;
+ var xSize = tfjsCore.util.sizeFromShape(x.shape);
+ var xRank = x.shape.length;
+ var xStrides = tfjsCore.util.computeStrides(x.shape);
+ var resultSize = tfjsCore.util.sizeFromShape(outShape);
+ var resultRank = outShape.length;
+ var resultStrides = tfjsCore.util.computeStrides(outShape);
+ var resVals = tfjsCore.util.getTypedArrayFromDType(x.dtype, resultSize);
+ if (constantValue !== 0) {
+ resVals.fill(constantValue);
+ }
+ for (var i = 0; i < xSize; i++) {
+ var coords = tfjsCore.util.indexToLoc(i, xRank, xStrides);
+ var outCoords = coords.map(function (c, i) { return c + start[i]; });
+ var outIndex = tfjsCore.util.locToIndex(outCoords, resultRank, resultStrides);
+ resVals[outIndex] = xVals[i];
+ }
+ var outId = backend.write(resVals, outShape, x.dtype);
+ return { dataId: outId, shape: outShape, dtype: x.dtype };
+ }
+ var padV2Config = {
+ kernelName: tfjsCore.PadV2,
+ backendName: 'cpu',
+ kernelFunc: padV2
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var powImpl = createSimpleBinaryKernelImpl(function (a, b) { return Math.pow(a, b); });
+ var pow = binaryKernelFunc(tfjsCore.Pow, powImpl);
+ var powConfig = {
+ kernelName: tfjsCore.Pow,
+ backendName: 'cpu',
+ kernelFunc: pow
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function range(args) {
+ var backend = args.backend, attrs = args.attrs;
+ var start = attrs.start, stop = attrs.stop, dtype = attrs.dtype, step = attrs.step;
+ var values = rangeImpl(start, stop, step, dtype);
+ return backend.makeTensorInfo([values.length], dtype, values);
+ }
+ var rangeConfig = {
+ kernelName: tfjsCore.Range,
+ backendName: 'cpu',
+ kernelFunc: range
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var reciprocal = unaryKernelFunc(tfjsCore.Reciprocal, function (xi) { return 1 / xi; });
+ var reciprocalConfig = {
+ kernelName: tfjsCore.Reciprocal,
+ backendName: 'cpu',
+ kernelFunc: reciprocal,
+ };
+
+ function resizeBilinear(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var images = inputs.images;
+ var alignCorners = attrs.alignCorners, halfPixelCenters = attrs.halfPixelCenters, size = attrs.size;
+ assertNotComplex(images, 'resizeBilinear');
+ var imagesStrides = tfjsCore.util.computeStrides(images.shape);
+ var _a = __read(size, 2), newHeight = _a[0], newWidth = _a[1];
+ var _b = __read(images.shape, 4), batch = _b[0], oldHeight = _b[1], oldWidth = _b[2], numChannels = _b[3];
+ var xValues = backend.data.get(images.dataId).values;
+ var result = new Float32Array(tfjsCore.util.sizeFromShape([batch, newHeight, newWidth, numChannels]));
+ var effectiveInputSize = [
+ (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,
+ (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth
+ ];
+ var effectiveOutputSize = [
+ (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,
+ (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth
+ ];
+ var outputIdx = 0;
+ var effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0];
+ var effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1];
+ for (var b = 0; b < batch; b++) {
+ for (var r = 0; r < newHeight; r++) {
+ var sourceFracRow = void 0;
+ if (halfPixelCenters) {
+ sourceFracRow = effectiveRowSizeRatio * (r + 0.5) - 0.5;
+ }
+ else {
+ sourceFracRow = effectiveRowSizeRatio * r;
+ }
+ var sourceRowFloor = Math.max(0, Math.floor(sourceFracRow));
+ var rowFrac = sourceFracRow - sourceRowFloor;
+ var sourceRowCeil = Math.min(oldHeight - 1, Math.ceil(sourceFracRow));
+ var topRowOffset = b * imagesStrides[0] + sourceRowFloor * imagesStrides[1];
+ var botRowOffset = b * imagesStrides[0] + sourceRowCeil * imagesStrides[1];
+ for (var c = 0; c < newWidth; c++) {
+ var sourceFracCol = void 0;
+ if (halfPixelCenters) {
+ sourceFracCol = effectiveColSizeRatio * (c + 0.5) - 0.5;
+ }
+ else {
+ sourceFracCol = effectiveColSizeRatio * c;
+ }
+ var sourceColFloor = Math.max(0, Math.floor(sourceFracCol));
+ var colFrac = sourceFracCol - sourceColFloor;
+ var sourceColCeil = Math.min(oldWidth - 1, Math.ceil(sourceFracCol));
+ var topLeftOffest = topRowOffset + sourceColFloor * imagesStrides[2];
+ var botLeftOffset = botRowOffset + sourceColFloor * imagesStrides[2];
+ var topRightOffset = topRowOffset + sourceColCeil * imagesStrides[2];
+ var botRightOffest = botRowOffset + sourceColCeil * imagesStrides[2];
+ for (var d = 0; d < numChannels; d++) {
+ // Begin shader.
+ // Compute the fractional index of the source.
+ var topLeft = xValues[topLeftOffest + d];
+ var bottomLeft = xValues[botLeftOffset + d];
+ var topRight = xValues[topRightOffset + d];
+ var bottomRight = xValues[botRightOffest + d];
+ var top = topLeft + (topRight - topLeft) * colFrac;
+ var bottom = bottomLeft + (bottomRight - bottomLeft) * colFrac;
+ var newValue = top + (bottom - top) * rowFrac;
+ result[outputIdx++] = newValue;
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo([batch, newHeight, newWidth, numChannels], 'float32', result);
+ }
+ var resizeBilinearConfig = {
+ kernelName: tfjsCore.ResizeBilinear,
+ backendName: 'cpu',
+ kernelFunc: resizeBilinear
+ };
+
+ function resizeBilinearGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var images = inputs.images, dy = inputs.dy;
+ var alignCorners = attrs.alignCorners;
+ assertNotComplex([dy, images], 'resizeBilinearGrad');
+ var imagesStrides = tfjsCore.util.computeStrides(images.shape);
+ var _a = __read(images.shape, 4), batch = _a[0], xHeight = _a[1], xWidth = _a[2], depth = _a[3];
+ var _b = __read(dy.shape, 3), yHeight = _b[1], yWidth = _b[2];
+ var output = new Float32Array(batch * xHeight * xWidth * depth);
+ // In the backwards pass, we want to find the pixels that were generated
+ // for each pixel in the input image the forward pass and add the
+ // corresponding coefficient from dy to the gradient (with some
+ // interpolation).
+ var effectiveXSize = [
+ (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,
+ (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth
+ ];
+ var effectiveYSize = [
+ (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,
+ (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth
+ ];
+ var heightScale = effectiveXSize[0] / effectiveYSize[0];
+ var widthScale = effectiveXSize[1] / effectiveYSize[1];
+ // Reference implementation
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/3039375c86a5bbc9610c7725dcaa95d635f87ba2/tensorflow/core/kernels/resize_bilinear_op.cc#L275
+ var dyValues = backend.data.get(dy.dataId).values;
+ var offset = 0;
+ for (var b = 0; b < batch; b++) {
+ var bOffset = b * imagesStrides[0];
+ for (var r = 0; r < yHeight; r++) {
+ var dxR = r * heightScale;
+ var topDxRIndex = Math.floor(dxR);
+ var bottomDxRIndex = Math.min(Math.ceil(dxR), xHeight - 1);
+ var topDxROffset = bOffset + topDxRIndex * imagesStrides[1];
+ var bottomDxROffset = bOffset + bottomDxRIndex * imagesStrides[1];
+ var dxRLerp = dxR - topDxRIndex;
+ var inverseDxRLerp = 1.0 - dxRLerp;
+ for (var c = 0; c < yWidth; c++) {
+ var dxC = c * widthScale;
+ var leftDxCIndex = Math.floor(dxC);
+ var rightDxCIndex = Math.min(Math.ceil(dxC), xWidth - 1);
+ var dxCLerp = dxC - leftDxCIndex;
+ var inverseDxCLerp = 1.0 - dxCLerp;
+ var topLeftRCOffset = topDxROffset + leftDxCIndex * imagesStrides[2];
+ var topRightRCOffset = topDxROffset + rightDxCIndex * imagesStrides[2];
+ var bottomLeftRCOffset = bottomDxROffset + leftDxCIndex * imagesStrides[2];
+ var bottomRightRCOffset = bottomDxROffset + rightDxCIndex * imagesStrides[2];
+ var inverseDxRLerpTimesInverseDxCLerp = inverseDxRLerp * inverseDxCLerp;
+ var inverseDxRLerpTimesDxCLerp = inverseDxRLerp * dxCLerp;
+ var dxRLerpTimesInverseDxCLerp = dxRLerp * inverseDxCLerp;
+ var dxRLerpTimesDxCLerp = dxRLerp * dxCLerp;
+ for (var d = 0; d < depth; d++) {
+ var dyVal = dyValues[offset++];
+ output[topLeftRCOffset + d] +=
+ dyVal * inverseDxRLerpTimesInverseDxCLerp;
+ output[topRightRCOffset + d] += dyVal * inverseDxRLerpTimesDxCLerp;
+ output[bottomLeftRCOffset + d] += dyVal * dxRLerpTimesInverseDxCLerp;
+ output[bottomRightRCOffset + d] += dyVal * dxRLerpTimesDxCLerp;
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo([batch, xWidth, xHeight, depth], 'float32', output);
+ }
+ var resizeBilinearGradConfig = {
+ kernelName: tfjsCore.ResizeBilinearGrad,
+ backendName: 'cpu',
+ kernelFunc: resizeBilinearGrad
+ };
+
+ function resizeNearestNeighbor(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var images = inputs.images;
+ var alignCorners = attrs.alignCorners, halfPixelCenters = attrs.halfPixelCenters, size = attrs.size;
+ assertNotComplex(images, 'resizeNearestNeighbor');
+ var imagesStrides = tfjsCore.util.computeStrides(images.shape);
+ var _a = __read(size, 2), newHeight = _a[0], newWidth = _a[1];
+ var _b = __read(images.shape, 4), batch = _b[0], oldHeight = _b[1], oldWidth = _b[2], numChannels = _b[3];
+ var xValues = backend.data.get(images.dataId).values;
+ var output = new Float32Array(batch * newHeight * newWidth * numChannels);
+ var effectiveInputSize = [
+ (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,
+ (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth
+ ];
+ var effectiveOutputSize = [
+ (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,
+ (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth
+ ];
+ var effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0];
+ var effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1];
+ var outputOffset = 0;
+ for (var b = 0; b < batch; b++) {
+ var batchOffset = b * imagesStrides[0];
+ for (var r = 0; r < newHeight; r++) {
+ var sourceFracRow = halfPixelCenters ?
+ effectiveRowSizeRatio * (r + 0.5) :
+ effectiveRowSizeRatio * r;
+ var sourceNearestRow = Math.min(oldHeight - 1, alignCorners ? Math.round(sourceFracRow) : Math.floor(sourceFracRow));
+ if (halfPixelCenters) {
+ sourceNearestRow = Math.max(0, sourceNearestRow);
+ }
+ var rowOffset = batchOffset + sourceNearestRow * imagesStrides[1];
+ for (var c = 0; c < newWidth; c++) {
+ var sourceFracCol = halfPixelCenters ?
+ effectiveColSizeRatio * (c + 0.5) :
+ effectiveColSizeRatio * c;
+ var sourceNearestCol = Math.min(oldWidth - 1, alignCorners ? Math.round(sourceFracCol) :
+ Math.floor(sourceFracCol));
+ if (halfPixelCenters) {
+ sourceNearestCol = Math.max(0, sourceNearestCol);
+ }
+ var colOffset = rowOffset + sourceNearestCol * imagesStrides[2];
+ for (var d = 0; d < numChannels; d++) {
+ // Begin shader.
+ // Compute the fractional index of the source.
+ var newVal = xValues[colOffset + d];
+ output[outputOffset++] = newVal;
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo([batch, newHeight, newWidth, numChannels], images.dtype, output);
+ }
+ var resizeNearestNeighborConfig = {
+ kernelName: tfjsCore.ResizeNearestNeighbor,
+ backendName: 'cpu',
+ kernelFunc: resizeNearestNeighbor
+ };
+
+ function resizeNearestNeighborGrad(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var images = inputs.images, dy = inputs.dy;
+ var alignCorners = attrs.alignCorners;
+ assertNotComplex([dy, images], 'resizeNearestNeighborGrad');
+ var imagesStrides = tfjsCore.util.computeStrides(images.shape);
+ var dyStrides = tfjsCore.util.computeStrides(dy.shape);
+ var _a = __read(images.shape, 4), batch = _a[0], xHeight = _a[1], xWidth = _a[2], depth = _a[3];
+ var _b = __read(dy.shape, 3), yHeight = _b[1], yWidth = _b[2];
+ var output = new Float32Array(batch * xHeight * xWidth * depth);
+ var dyValues = backend.data.get(dy.dataId).values;
+ // In the backwards pass, we want to find the pixels that were generated
+ // for each pixel in the input image the forward pass
+ var effectiveXSize = [
+ (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,
+ (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth
+ ];
+ var effectiveYSize = [
+ (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,
+ (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth
+ ];
+ var heightScale = effectiveXSize[0] / effectiveYSize[0];
+ var widthScale = effectiveXSize[1] / effectiveYSize[1];
+ var invHeightScale = 1 / heightScale;
+ var invWidthScale = 1 / widthScale;
+ // This defines the size of the window of values around a particular
+ // index in dy that we want to search for contributions to dx.
+ var winHeight = (Math.ceil(invHeightScale) * 2) + 2;
+ var winWidth = (Math.ceil(invWidthScale) * 2) + 2;
+ // Loop over the output space.
+ for (var b = 0; b < batch; b++) {
+ var batchOffset = b * imagesStrides[0];
+ for (var r = 0; r < xHeight; r++) {
+ var rowOffset = batchOffset + r * imagesStrides[1];
+ // Compute bounds for where in dy we will look
+ var startRLerp = Math.floor(r * invHeightScale);
+ var startDyR = Math.floor(startRLerp - (winHeight / 2));
+ for (var c = 0; c < xWidth; c++) {
+ var colOffset = rowOffset + c * imagesStrides[2];
+ // Compute bounds for where in dy we will look
+ var startCLerp = Math.floor(c * invWidthScale);
+ var startDyC = Math.floor(startCLerp - (winWidth / 2));
+ for (var d = 0; d < depth; d++) {
+ var accum = 0;
+ // loop over dy
+ for (var dyRIndex = 0; dyRIndex < winHeight; dyRIndex++) {
+ var dyR = dyRIndex + startDyR;
+ // Guard against the window exceeding the bounds of dy
+ if (dyR < 0 || dyR >= yHeight) {
+ continue;
+ }
+ var dyROffset = batchOffset + dyR * dyStrides[1];
+ var sourceFracRow = dyR * heightScale;
+ var sourceNearestRow = Math.min(xHeight - 1, alignCorners ? Math.round(sourceFracRow) :
+ Math.floor(sourceFracRow));
+ if (r !== sourceNearestRow) {
+ continue;
+ }
+ for (var dyCIndex = 0; dyCIndex < winWidth; dyCIndex++) {
+ var dyC = dyCIndex + startDyC;
+ // Guard against the window exceeding the bounds of dy
+ if (dyC < 0 || dyC >= yWidth) {
+ continue;
+ }
+ var dyCOffset = dyROffset + dyC * dyStrides[2];
+ var sourceFracCol = dyC * widthScale;
+ var sourceNearestCol = Math.min(xWidth - 1, alignCorners ? Math.round(sourceFracCol) :
+ Math.floor(sourceFracCol));
+ if (c === sourceNearestCol) {
+ accum += dyValues[dyCOffset + d];
+ }
+ }
+ }
+ output[colOffset + d] = accum;
+ }
+ }
+ }
+ }
+ return backend.makeTensorInfo(images.shape, images.dtype, output);
+ }
+ var resizeNearestNeighborGradConfig = {
+ kernelName: tfjsCore.ResizeNearestNeighborGrad,
+ backendName: 'cpu',
+ kernelFunc: resizeNearestNeighborGrad
+ };
+
+ function reverse(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var dims = attrs.dims;
+ assertNotComplex(x, 'reverse');
+ var xRank = x.shape.length;
+ var $dims = tfjsCore.util.parseAxisParam(dims, x.shape);
+ if (xRank === 0) {
+ return identity({ inputs: { x: x }, backend: backend });
+ }
+ var outBuf = new tfjsCore.TensorBuffer(x.shape, x.dtype);
+ var xBuf = backend.bufferSync(x);
+ var _loop_1 = function (i) {
+ var outLoc = outBuf.indexToLoc(i);
+ var inLoc = outLoc.slice();
+ $dims.forEach(function (d) { return inLoc[d] = x.shape[d] - 1 - inLoc[d]; });
+ outBuf.set.apply(outBuf, __spread([xBuf.get.apply(xBuf, __spread(inLoc))], outLoc));
+ };
+ for (var i = 0; i < outBuf.size; i++) {
+ _loop_1(i);
+ }
+ return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);
+ }
+ var reverseConfig = {
+ kernelName: tfjsCore.Reverse,
+ backendName: 'cpu',
+ kernelFunc: reverse
+ };
+
+ var rotateWithOffsetConfig = {
+ kernelName: tfjsCore.RotateWithOffset,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs, attrs = _a.attrs, backend = _a.backend;
+ var image = inputs.image;
+ var radians = attrs.radians, fillValue = attrs.fillValue, center = attrs.center;
+ var cpuBackend = backend;
+ var output = tfjsCore.util.getTypedArrayFromDType(image.dtype, tfjsCore.util.sizeFromShape(image.shape));
+ var _b = __read(image.shape, 4), batch = _b[0], imageHeight = _b[1], imageWidth = _b[2], numChannels = _b[3];
+ var _c = __read(tfjsCore.backend_util.getImageCenter(center, imageHeight, imageWidth), 2), centerX = _c[0], centerY = _c[1];
+ var fullOpacityValue = 255;
+ var sinFactor = Math.sin(radians);
+ var cosFactor = Math.cos(radians);
+ var imageVals = cpuBackend.data.get(image.dataId).values;
+ for (var batchIdx = 0; batchIdx < batch; batchIdx++) {
+ var batchOffset = batchIdx * imageWidth * imageHeight * numChannels;
+ for (var row = 0; row < imageHeight; row++) {
+ var rowOffset = row * (imageWidth * numChannels);
+ for (var col = 0; col < imageWidth; col++) {
+ var colOffset = col * numChannels;
+ for (var channel = 0; channel < numChannels; channel++) {
+ var coords = [batch, row, col, channel];
+ var x = coords[2];
+ var y = coords[1];
+ // coordX/coordY are the result of rotating and translating x/y.
+ var coordX = (x - centerX) * cosFactor - (y - centerY) * sinFactor;
+ var coordY = (x - centerX) * sinFactor + (y - centerY) * cosFactor;
+ coordX = Math.round(coordX + centerX);
+ coordY = Math.round(coordY + centerY);
+ var outputValue = fillValue;
+ if (typeof fillValue !== 'number') {
+ if (channel === 3) {
+ outputValue = fullOpacityValue;
+ }
+ else {
+ outputValue = fillValue[channel];
+ }
+ }
+ // If the coordinate position falls within the image boundaries...
+ if (coordX >= 0 && coordX < imageWidth && coordY >= 0 &&
+ coordY < imageHeight) {
+ // set the output to the image value at the coordinate position.
+ var rotatedRowOffset = coordY * (imageWidth * numChannels);
+ var rotatedColOffset = coordX * numChannels;
+ var imageIdx = batchOffset + rotatedRowOffset + rotatedColOffset + channel;
+ outputValue = imageVals[imageIdx];
+ }
+ var outIdx = batchOffset + rowOffset + colOffset + channel;
+ output[outIdx] = outputValue;
+ }
+ }
+ }
+ }
+ var dataId = cpuBackend.write(output, image.shape, image.dtype);
+ return { dataId: dataId, shape: image.shape, dtype: image.dtype };
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var round = unaryKernelFunc(tfjsCore.Round, function (xi) {
+ // The algorithm is based on banker's rounding.
+ var base = Math.floor(xi);
+ if (xi - base < 0.5) {
+ return Math.floor(xi);
+ }
+ else if (xi - base > 0.5) {
+ return Math.ceil(xi);
+ }
+ else {
+ if (base % 2.0 === 0.0) {
+ return base;
+ }
+ else {
+ return base + 1.0;
+ }
+ }
+ });
+ var roundConfig = {
+ kernelName: tfjsCore.Round,
+ backendName: 'cpu',
+ kernelFunc: round,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function scatterImpl(indices, updates, shape, outputSize, sliceSize, numUpdates, sliceRank, strides, defaultValue, sumDupeIndices) {
+ var flattenShape = [outputSize / sliceSize, sliceSize];
+ var indicesData = indices.values;
+ var updatesData = updates.values;
+ if (outputSize === 0) {
+ return tfjsCore.buffer(shape, updates.dtype);
+ }
+ var outBuf = tfjsCore.buffer(flattenShape, updates.dtype);
+ outBuf.values.fill(defaultValue);
+ for (var i = 0; i < numUpdates; i++) {
+ var index = [];
+ var flattenIndex = 0;
+ for (var j = 0; j < sliceRank; j++) {
+ var dim = indicesData[i * sliceRank + j];
+ index.push(dim);
+ flattenIndex += dim * strides[j];
+ }
+ if (flattenIndex < 0 || flattenIndex >= outputSize / sliceSize) {
+ throw new Error("Invalid indices: " + index + " does not index into " + shape);
+ }
+ for (var k = 0; k < sliceSize; k++) {
+ if (sumDupeIndices) {
+ outBuf.values[flattenIndex * sliceSize + k] +=
+ updatesData[i * sliceSize + k];
+ }
+ else {
+ outBuf.values[flattenIndex * sliceSize + k] = updates.rank === 0 ?
+ updatesData[0] :
+ updatesData[i * sliceSize + k];
+ }
+ }
+ }
+ return outBuf;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function scatterNd(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var indices = inputs.indices, updates = inputs.updates;
+ var shape = attrs.shape;
+ var _a = tfjsCore.backend_util.calculateShapes(updates, indices, shape), sliceRank = _a.sliceRank, numUpdates = _a.numUpdates, sliceSize = _a.sliceSize, strides = _a.strides, outputSize = _a.outputSize;
+ var sumDupeIndices = true;
+ var indicesBuf = backend.bufferSync(indices);
+ var updatesBuf = backend.bufferSync(updates);
+ var outBuf = scatterImpl(indicesBuf, updatesBuf, shape, outputSize, sliceSize, numUpdates, sliceRank, strides, 0 /* defaultValue */, sumDupeIndices);
+ return backend.makeTensorInfo(shape, outBuf.dtype, outBuf.values);
+ }
+ var scatterNdConfig = {
+ kernelName: tfjsCore.ScatterNd,
+ backendName: 'cpu',
+ kernelFunc: scatterNd
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function select(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var condition = inputs.condition, t = inputs.t, e = inputs.e;
+ assertNotComplex([condition, t, e], 'select');
+ var conditionRank = condition.shape.length;
+ var values = backend.data.get(condition.dataId).values;
+ var tValues = backend.data.get(t.dataId).values;
+ var eValues = backend.data.get(e.dataId).values;
+ var resultDtype = tfjsCore.upcastType(t.dtype, e.dtype);
+ var newValues = tfjsCore.util.makeZerosTypedArray(tfjsCore.util.sizeFromShape(t.shape), resultDtype);
+ var index = 0;
+ var offset = conditionRank === 0 || conditionRank > 1 || t.shape.length === 1 ?
+ 1 :
+ tfjsCore.util.sizeFromShape(t.shape.slice(1));
+ for (var i = 0; i < values.length; i++) {
+ for (var j = 0; j < offset; j++) {
+ if (values[i] === 1) {
+ newValues[index++] = tValues[i];
+ }
+ else {
+ newValues[index++] = eValues[i];
+ }
+ }
+ }
+ return backend.makeTensorInfo(t.shape, resultDtype, newValues);
+ }
+ var selectConfig = {
+ kernelName: tfjsCore.Select,
+ backendName: 'cpu',
+ kernelFunc: select
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var scaleAlpha = tfjsCore.backend_util.SELU_SCALEALPHA;
+ var scale = tfjsCore.backend_util.SELU_SCALE;
+ var selu = unaryKernelFunc(tfjsCore.Selu, function (xi) {
+ if (xi >= 0) {
+ return scale * xi;
+ }
+ else {
+ return scaleAlpha * (Math.exp(xi) - 1);
+ }
+ });
+ var seluConfig = {
+ kernelName: tfjsCore.Selu,
+ backendName: 'cpu',
+ kernelFunc: selu,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var sign = unaryKernelFunc(tfjsCore.Sign, function (xi) {
+ if (xi < 0) {
+ return -1;
+ }
+ else if (xi > 0) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+ });
+ var signConfig = {
+ kernelName: tfjsCore.Sign,
+ backendName: 'cpu',
+ kernelFunc: sign,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var sin = unaryKernelFunc(tfjsCore.Sin, function (xi) { return Math.sin(xi); });
+ var sinConfig = {
+ kernelName: tfjsCore.Sin,
+ backendName: 'cpu',
+ kernelFunc: sin,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var sinh = unaryKernelFunc(tfjsCore.Sinh, function (xi) { return Math.sinh(xi); });
+ var sinhConfig = {
+ kernelName: tfjsCore.Sinh,
+ backendName: 'cpu',
+ kernelFunc: sinh,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // mirrors the implementation of tf.nn.softplus: https://goo.gl/vkcvwX
+ // epsilon is the difference between 1.0 and the next representable float.
+ // For a single precision 32 bit float this should be 2^-23, see:
+ // https://math.byu.edu/~schow/work/IEEEFloatingPoint.htm
+ var epsilon = 1.1920928955078125e-7;
+ var threshold = Math.log(epsilon) + 2.0;
+ var softplus = unaryKernelFunc(tfjsCore.Softplus, function (xi) {
+ // Value above which exp(x) may overflow, but softplus(x) == x
+ // is within machine epsilon.
+ var tooLarge = xi > -threshold;
+ // Value below which exp(x) may underflow, but softplus(x) == exp(x)
+ // is within machine epsilon.
+ var tooSmall = xi < threshold;
+ var expX = Math.exp(xi);
+ var result;
+ if (tooSmall) {
+ result = expX;
+ }
+ else if (tooLarge) {
+ result = xi;
+ }
+ else {
+ result = Math.log(1.0 + expX);
+ }
+ return result;
+ });
+ var softplusConfig = {
+ kernelName: tfjsCore.Softplus,
+ backendName: 'cpu',
+ kernelFunc: softplus,
+ };
+
+ function spaceToBatchND(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var blockShape = attrs.blockShape, paddings = attrs.paddings;
+ assertNotComplex([x], 'spaceToBatchND');
+ var prod = tfjsCore.util.sizeFromShape(blockShape);
+ var completePaddings = [[0, 0]];
+ completePaddings.push.apply(completePaddings, __spread(paddings));
+ for (var i = 1 + blockShape.length; i < x.shape.length; ++i) {
+ completePaddings.push([0, 0]);
+ }
+ var paddedX = padV2Config.kernelFunc({
+ inputs: { x: x },
+ backend: backend,
+ attrs: { paddings: completePaddings, constantValue: 0 }
+ });
+ var reshapedPaddedShape = tfjsCore.backend_util.getReshaped(paddedX.shape, blockShape, prod, false);
+ var permutedReshapedPaddedPermutation = tfjsCore.backend_util.getPermuted(reshapedPaddedShape.length, blockShape.length, false);
+ var flattenShape = tfjsCore.backend_util.getReshapedPermuted(paddedX.shape, blockShape, prod, false);
+ var reshapeInputs = { x: paddedX };
+ var reshapeAttrs = { shape: reshapedPaddedShape };
+ var paddedXReshaped = reshape({ inputs: reshapeInputs, backend: backend, attrs: reshapeAttrs });
+ var transposeInputs = { x: paddedXReshaped };
+ var transposeAttrs = { perm: permutedReshapedPaddedPermutation };
+ var paddedXT = transpose({ inputs: transposeInputs, backend: backend, attrs: transposeAttrs });
+ var resultReshapeInputs = { x: paddedXT };
+ var resultReshapeAttrs = { shape: flattenShape };
+ var result = reshape({ inputs: resultReshapeInputs, backend: backend, attrs: resultReshapeAttrs });
+ backend.disposeIntermediateTensorInfo(paddedX);
+ backend.disposeIntermediateTensorInfo(paddedXReshaped);
+ backend.disposeIntermediateTensorInfo(paddedXT);
+ return result;
+ }
+ var spaceToBatchNDConfig = {
+ kernelName: tfjsCore.SpaceToBatchND,
+ backendName: 'cpu',
+ kernelFunc: spaceToBatchND
+ };
+
+ function sparseFillEmptyRows(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var indices = inputs.indices, values = inputs.values, denseShape = inputs.denseShape, defaultValue = inputs.defaultValue;
+ if (denseShape.shape.length !== 1) {
+ throw new Error("Dense shape must be a vector, saw:\n " + denseShape.shape);
+ }
+ if (indices.shape.length !== 2) {
+ throw new Error("Indices must be a matrix, saw:\n " + indices.shape);
+ }
+ if (values.shape.length !== 1) {
+ throw new Error("Values must be a vector, saw:\n " + values.shape);
+ }
+ if (defaultValue.shape.length !== 0) {
+ throw new Error("Default value must be a scalar, saw:\n " + defaultValue.shape);
+ }
+ var $indices = backend.data.get(indices.dataId).values;
+ var $values = backend.data.get(values.dataId).values;
+ var $denseShape = backend.data.get(denseShape.dataId).values;
+ var $defaultValue = backend.data.get(defaultValue.dataId).values[0];
+ var _a = __read(sparseFillEmptyRowsImpl($indices, indices.shape, indices.dtype, $values, values.dtype, $denseShape, $defaultValue), 5), outputIndices = _a[0], outputIndicesShape = _a[1], outputValues = _a[2], emptyRowIndicator = _a[3], reverseIndexMap = _a[4];
+ return [
+ backend.makeTensorInfo(outputIndicesShape, indices.dtype, outputIndices),
+ backend.makeTensorInfo([outputIndicesShape[0]], values.dtype, outputValues),
+ backend.makeTensorInfo([emptyRowIndicator.length], 'bool', new Uint8Array(emptyRowIndicator.map(function (value) { return Number(value); }))),
+ backend.makeTensorInfo([reverseIndexMap.length], indices.dtype, new Int32Array(reverseIndexMap)),
+ ];
+ }
+ var sparseFillEmptyRowsConfig = {
+ kernelName: tfjsCore.SparseFillEmptyRows,
+ backendName: 'cpu',
+ kernelFunc: sparseFillEmptyRows,
+ };
+
+ function sparseReshape(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var inputIndices = inputs.inputIndices, inputShape = inputs.inputShape, newShape = inputs.newShape;
+ if (inputIndices.shape.length !== 2) {
+ throw new Error("Input indices should be a matrix but received shape\n " + inputIndices.shape);
+ }
+ if (inputShape.shape.length !== 1) {
+ throw new Error("Input shape should be a vector but received shape\n " + inputShape.shape);
+ }
+ if (newShape.shape.length !== 1) {
+ throw new Error("Target shape should be a vector but received shape " + newShape.shape);
+ }
+ var $inputShape = Array.from(backend.data.get(inputShape.dataId).values);
+ var $inputIndices = backend.data.get(inputIndices.dataId).values;
+ var targetShape = Array.from(backend.data.get(newShape.dataId).values);
+ var _a = __read(sparseReshapeImpl($inputIndices, inputIndices.shape, inputIndices.dtype, $inputShape, targetShape), 3), newIndices = _a[0], indicesShape = _a[1], outputShape = _a[2];
+ return [
+ backend.makeTensorInfo(indicesShape, inputIndices.dtype, newIndices),
+ backend.makeTensorInfo([outputShape.length], newShape.dtype, new Int32Array(outputShape)),
+ ];
+ }
+ var sparseReshapeConfig = {
+ kernelName: tfjsCore.SparseReshape,
+ backendName: 'cpu',
+ kernelFunc: sparseReshape,
+ };
+
+ function sparseSegmentMean(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var data = inputs.data, indices = inputs.indices, segmentIds = inputs.segmentIds;
+ if (data.shape.length < 1) {
+ throw new Error("Data should be at least 1 dimensional but received scalar");
+ }
+ if (indices.shape.length !== 1) {
+ throw new Error("Indices should be a vector but received shape\n " + indices.shape);
+ }
+ if (segmentIds.shape.length !== 1) {
+ throw new Error("Segment ids should be a vector but received shape\n " + segmentIds.shape);
+ }
+ if (indices.shape[0] !== segmentIds.shape[0]) {
+ throw new Error("segmentIds and indices should have same size.");
+ }
+ var $data = backend.data.get(data.dataId).values;
+ var $indices = backend.data.get(indices.dataId).values;
+ var $segmentIds = backend.data.get(segmentIds.dataId).values;
+ var _a = __read(sparseSegmentReductionImpl($data, data.shape, data.dtype, $indices, $segmentIds, true), 2), outputData = _a[0], outputDataShape = _a[1];
+ return backend.makeTensorInfo(outputDataShape, data.dtype, outputData);
+ }
+ var sparseSegmentMeanConfig = {
+ kernelName: tfjsCore.SparseSegmentMean,
+ backendName: 'cpu',
+ kernelFunc: sparseSegmentMean,
+ };
+
+ function sparseSegmentSum(args) {
+ var inputs = args.inputs, backend = args.backend;
+ var data = inputs.data, indices = inputs.indices, segmentIds = inputs.segmentIds;
+ if (data.shape.length < 1) {
+ throw new Error("Data should be at least 1 dimensional but received scalar");
+ }
+ if (indices.shape.length !== 1) {
+ throw new Error("Indices should be a vector but received shape\n " + indices.shape);
+ }
+ if (segmentIds.shape.length !== 1) {
+ throw new Error("Segment ids should be a vector but received shape\n " + segmentIds.shape);
+ }
+ if (indices.shape[0] !== segmentIds.shape[0]) {
+ throw new Error("segmentIds and indices should have same size.");
+ }
+ var $data = backend.data.get(data.dataId).values;
+ var $indices = backend.data.get(indices.dataId).values;
+ var $segmentIds = backend.data.get(segmentIds.dataId).values;
+ var _a = __read(sparseSegmentReductionImpl($data, data.shape, data.dtype, $indices, $segmentIds), 2), outputData = _a[0], outputDataShape = _a[1];
+ return backend.makeTensorInfo(outputDataShape, data.dtype, outputData);
+ }
+ var sparseSegmentSumConfig = {
+ kernelName: tfjsCore.SparseSegmentSum,
+ backendName: 'cpu',
+ kernelFunc: sparseSegmentSum,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function sparseToDense(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var sparseIndices = inputs.sparseIndices, sparseValues = inputs.sparseValues, defaultValue = inputs.defaultValue;
+ var outputShape = attrs.outputShape;
+ var _a = tfjsCore.backend_util.calculateShapes(sparseValues, sparseIndices, outputShape), sliceRank = _a.sliceRank, numUpdates = _a.numUpdates, sliceSize = _a.sliceSize, strides = _a.strides, outputSize = _a.outputSize;
+ var sumDupeIndices = false;
+ var indicesBuf = backend.bufferSync(sparseIndices);
+ var updatesBuf = backend.bufferSync(sparseValues);
+ var $defaultValue = backend.data.get(defaultValue.dataId).values[0];
+ var outBuf = scatterImpl(indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices);
+ return backend.makeTensorInfo(outputShape, outBuf.dtype, outBuf.values);
+ }
+ var sparseToDenseConfig = {
+ kernelName: tfjsCore.SparseToDense,
+ backendName: 'cpu',
+ kernelFunc: sparseToDense
+ };
+
+ function splitV(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var numOrSizeSplits = attrs.numOrSizeSplits, axis = attrs.axis;
+ var $axis = tfjsCore.util.parseAxisParam(axis, x.shape)[0];
+ var splitSizes = tfjsCore.backend_util.prepareSplitSize(x, numOrSizeSplits, $axis);
+ var begin = new Array(x.shape.length).fill(0);
+ var size = x.shape.slice();
+ return splitSizes.map(function (s) {
+ var sliceSize = __spread(size);
+ sliceSize[$axis] = s;
+ var sliceT = slice({ inputs: { x: x }, backend: backend, attrs: { begin: begin, size: sliceSize } });
+ begin[$axis] += s;
+ return sliceT;
+ });
+ }
+ var splitVConfig = {
+ kernelName: tfjsCore.SplitV,
+ backendName: 'cpu',
+ kernelFunc: splitV
+ };
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var squareConfig = {
+ kernelName: tfjsCore.Square,
+ backendName: 'cpu',
+ kernelFunc: function (_a) {
+ var inputs = _a.inputs, backend = _a.backend;
+ var x = inputs.x;
+ var cpuBackend = backend;
+ assertNotComplex(x, 'square');
+ var values = cpuBackend.data.get(x.dataId).values;
+ var newValues = new Float32Array(values.length);
+ for (var i = 0; i < values.length; ++i) {
+ var value = values[i];
+ newValues[i] = value * value;
+ }
+ var dataId = cpuBackend.write(newValues, x.shape, x.dtype);
+ return { dataId: dataId, shape: x.shape, dtype: x.dtype };
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var step = unaryKernelFunc(tfjsCore.Step, function (xi, attrs) {
+ var stepAttrs = attrs;
+ if (isNaN(xi)) {
+ return NaN;
+ }
+ else {
+ return xi > 0 ? 1 : stepAttrs.alpha;
+ }
+ });
+ var stepConfig = {
+ kernelName: tfjsCore.Step,
+ backendName: 'cpu',
+ kernelFunc: step,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function stridedSlice(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var begin = attrs.begin, end = attrs.end, strides = attrs.strides, beginMask = attrs.beginMask, endMask = attrs.endMask, ellipsisMask = attrs.ellipsisMask, newAxisMask = attrs.newAxisMask, shrinkAxisMask = attrs.shrinkAxisMask;
+ assertNotComplex(x, 'stridedSlice');
+ var _a = tfjsCore.slice_util.sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask), finalShapeSparse = _a.finalShapeSparse, finalShape = _a.finalShape, isIdentity = _a.isIdentity, sliceDim0 = _a.sliceDim0, isSimpleSlice = _a.isSimpleSlice, $begin = _a.begin, $end = _a.end, $strides = _a.strides;
+ var result;
+ // ref:
+ // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/strided_slice_op.cc
+ if (isIdentity) {
+ // Optimization #1, slice is a no-op plus reshape
+ result = reshape({ inputs: { x: x }, backend: backend, attrs: { shape: finalShape } });
+ }
+ else if (sliceDim0 || isSimpleSlice) {
+ // Optimization #2, slice is memory contiguous (only occurs in dim 0)
+ tfjsCore.util.assert(x.shape.length >= 1, function () { return "Input must have rank at least 1, got: " + x.shape.length; });
+ var size = tfjsCore.slice_util.computeOutShape($begin, $end, $strides);
+ // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end).
+ var sliced = slice({ inputs: { x: x }, backend: backend, attrs: { begin: $begin, size: size } });
+ result =
+ reshape({ inputs: { x: sliced }, backend: backend, attrs: { shape: finalShape } });
+ backend.disposeIntermediateTensorInfo(sliced);
+ }
+ else {
+ var xBuf = backend.bufferSync(x);
+ var outBuf = stridedSliceImpl(finalShapeSparse, xBuf, $strides, $begin);
+ result = backend.makeTensorInfo(finalShape, outBuf.dtype, outBuf.values);
+ }
+ return result;
+ }
+ var stridedSliceConfig = {
+ kernelName: tfjsCore.StridedSlice,
+ backendName: 'cpu',
+ kernelFunc: stridedSlice
+ };
+
+ function stringNGrams(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var separator = attrs.separator, nGramWidths = attrs.nGramWidths, leftPad = attrs.leftPad, rightPad = attrs.rightPad, padWidth = attrs.padWidth, preserveShortSequences = attrs.preserveShortSequences;
+ var data = inputs.data, dataSplits = inputs.dataSplits;
+ var $data = backend.data.get(data.dataId).values;
+ var $dataSplits = backend.data.get(dataSplits.dataId).values;
+ var _a = __read(stringNGramsImpl($data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences), 2), nGrams = _a[0], nGramsSplits = _a[1];
+ return [
+ backend.makeTensorInfo([nGrams.length], 'string', nGrams),
+ backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits),
+ ];
+ }
+ var stringNGramsConfig = {
+ kernelName: tfjsCore.StringNGrams,
+ backendName: 'cpu',
+ kernelFunc: stringNGrams,
+ };
+
+ function stringSplit(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var skipEmpty = attrs.skipEmpty;
+ var input = inputs.input, delimiter = inputs.delimiter;
+ if (input.dtype !== 'string') {
+ throw new Error('Input must be of datatype string');
+ }
+ if (input.shape.length !== 1) {
+ throw new Error("Input must be a vector, got shape: " + input.shape);
+ }
+ if (delimiter.shape.length !== 0) {
+ throw new Error("Delimiter must be a scalar, got shape: " + delimiter.shape);
+ }
+ var $input = backend.data.get(input.dataId).values;
+ var $delimiter = backend.data.get(delimiter.dataId).values[0];
+ var _a = __read(stringSplitImpl($input, $delimiter, skipEmpty), 3), indices = _a[0], values = _a[1], shape = _a[2];
+ var outputSize = values.length;
+ return [
+ backend.makeTensorInfo([outputSize, 2], 'int32', indices),
+ backend.makeTensorInfo([outputSize], 'string', values),
+ backend.makeTensorInfo([2], 'int32', new Int32Array(shape))
+ ];
+ }
+ var stringSplitConfig = {
+ kernelName: tfjsCore.StringSplit,
+ backendName: 'cpu',
+ kernelFunc: stringSplit,
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function stringToHashBucketFast(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var numBuckets = attrs.numBuckets;
+ var input = inputs.input;
+ if (input.dtype !== 'string') {
+ throw new Error('Input must be of datatype string');
+ }
+ if (numBuckets <= 0) {
+ throw new Error("Number of buckets must be at least 1");
+ }
+ var $input = backend.data.get(input.dataId).values;
+ var output = stringToHashBucketFastImpl($input, numBuckets);
+ return backend.makeTensorInfo(input.shape, 'int32', output);
+ }
+ var stringToHashBucketFastConfig = {
+ kernelName: tfjsCore.StringToHashBucketFast,
+ backendName: 'cpu',
+ kernelFunc: stringToHashBucketFast,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var tan = unaryKernelFunc(tfjsCore.Tan, function (xi) { return Math.tan(xi); });
+ var tanConfig = {
+ kernelName: tfjsCore.Tan,
+ backendName: 'cpu',
+ kernelFunc: tan,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var tanh = unaryKernelFunc(tfjsCore.Tanh, function (xi) { return Math.tanh(xi); });
+ var tanhConfig = {
+ kernelName: tfjsCore.Tanh,
+ backendName: 'cpu',
+ kernelFunc: tanh,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function tile(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var reps = attrs.reps;
+ assertNotComplex(x, 'tile');
+ var outBuf = tileImpl(backend.bufferSync(x), reps);
+ return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);
+ }
+ var tileConfig = {
+ kernelName: tfjsCore.Tile,
+ backendName: 'cpu',
+ kernelFunc: tile
+ };
+
+ function topK(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x;
+ var k = attrs.k, sorted = attrs.sorted;
+ assertNotComplex(x, 'topk');
+ var xVals = backend.data.get(x.dataId).values;
+ var _a = __read(topKImpl(xVals, x.shape, x.dtype, k, sorted), 2), allTopKVals = _a[0], allTopKIndices = _a[1];
+ return [
+ backend.makeTensorInfo(allTopKVals.shape, allTopKVals.dtype, allTopKVals.values),
+ backend.makeTensorInfo(allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values)
+ ];
+ }
+ var topKConfig = {
+ kernelName: tfjsCore.TopK,
+ backendName: 'cpu',
+ kernelFunc: topK
+ };
+
+ function transform(args) {
+ var inputs = args.inputs, attrs = args.attrs, backend = args.backend;
+ var image = inputs.image, transforms = inputs.transforms;
+ var interpolation = attrs.interpolation, fillMode = attrs.fillMode, fillValue = attrs.fillValue, outputShape = attrs.outputShape;
+ var _a = __read(image.shape, 4), batch = _a[0], imageHeight = _a[1], imageWidth = _a[2], numChannels = _a[3];
+ var _b = __read(outputShape != null ? outputShape : [imageHeight, imageWidth], 2), outHeight = _b[0], outWidth = _b[1];
+ var outShape = [batch, outHeight, outWidth, numChannels];
+ var strides = tfjsCore.util.computeStrides(image.shape);
+ var batchStride = strides[0];
+ var rowStride = strides[1];
+ var colStride = strides[2];
+ var outVals = tfjsCore.util.getTypedArrayFromDType(image.dtype, tfjsCore.util.sizeFromShape(outShape));
+ outVals.fill(fillValue);
+ var imageVals = backend.data.get(image.dataId).values;
+ var transformVals = backend.data.get(transforms.dataId).values;
+ // Ref TF implementation:
+ // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/image/image_ops.h
+ for (var b = 0; b < batch; ++b) {
+ var transform_1 = transforms.shape[0] === 1 ?
+ transformVals :
+ transformVals.subarray(b * 8, b * 8 + 8);
+ for (var outY = 0; outY < outHeight; ++outY) {
+ for (var outX = 0; outX < outWidth; ++outX) {
+ for (var channel = 0; channel < numChannels; ++channel) {
+ var val = void 0;
+ var projection = transform_1[6] * outX + transform_1[7] * outY + 1;
+ if (projection === 0) {
+ // Return the fill value for infinite coordinates,
+ // which are outside the input image
+ continue;
+ }
+ var inX = (transform_1[0] * outX + transform_1[1] * outY + transform_1[2]) /
+ projection;
+ var inY = (transform_1[3] * outX + transform_1[4] * outY + transform_1[5]) /
+ projection;
+ var x = mapCoord(inX, imageWidth, fillMode);
+ var y = mapCoord(inY, imageHeight, fillMode);
+ switch (interpolation) {
+ case 'nearest':
+ val = nearestInterpolation(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, b, y, x, channel, fillValue);
+ break;
+ case 'bilinear':
+ val = bilinearInterpolation(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, b, y, x, channel, fillValue);
+ break;
+ default:
+ throw new Error("Error in Transform: Expect 'nearest' or " +
+ ("'bilinear', but got " + interpolation));
+ }
+ var ind = b * batchStride + outY * rowStride + outX * colStride + channel;
+ outVals[ind] = val;
+ }
+ }
+ }
+ return backend.makeTensorInfo(outShape, image.dtype, outVals);
+ }
+ var dataId = backend.write(outVals, outShape, image.dtype);
+ return { dataId: dataId, shape: image.shape, dtype: image.dtype };
+ }
+ var transformConfig = {
+ kernelName: tfjsCore.Transform,
+ backendName: 'cpu',
+ kernelFunc: transform
+ };
+ function mapCoord(outCoord, len, mode) {
+ switch (mode) {
+ case 'reflect':
+ return mapCoordReflect(outCoord, len);
+ case 'wrap':
+ return mapCoordWrap(outCoord, len);
+ case 'nearest':
+ return mapCoordNearest(outCoord, len);
+ case 'constant':
+ default:
+ return mapCoordConstant(outCoord);
+ }
+ }
+ function mapCoordReflect(outCoord, len) {
+ // Reflect [abcd] to [dcba|abcd|dcba].
+ var inCoord = outCoord;
+ if (inCoord < 0) {
+ if (len <= 1) {
+ inCoord = 0;
+ }
+ else {
+ var sz2 = 2 * len;
+ if (inCoord < sz2) {
+ inCoord = sz2 * Math.trunc(-inCoord / sz2) + inCoord;
+ }
+ inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1;
+ }
+ }
+ else if (inCoord > len - 1) {
+ if (len <= 1) {
+ inCoord = 0;
+ }
+ else {
+ var sz2 = 2 * len;
+ inCoord -= sz2 * Math.trunc(inCoord / sz2);
+ if (inCoord >= len) {
+ inCoord = sz2 - inCoord - 1;
+ }
+ }
+ }
+ // clamp is necessary because when outCoord = 3.5 and len = 4,
+ // inCoord = 3.5 and will be rounded to 4 in nearest interpolation.
+ return tfjsCore.util.clamp(0, inCoord, len - 1);
+ }
+ function mapCoordWrap(outCoord, len) {
+ // Wrap [abcd] to [abcd|abcd|abcd].
+ var inCoord = outCoord;
+ if (inCoord < 0) {
+ if (len <= 1) {
+ inCoord = 0;
+ }
+ else {
+ var sz = len - 1;
+ inCoord += len * (Math.trunc(-inCoord / sz) + 1);
+ }
+ }
+ else if (inCoord > len - 1) {
+ if (len <= 1) {
+ inCoord = 0;
+ }
+ else {
+ var sz = len - 1;
+ inCoord -= len * Math.trunc(inCoord / sz);
+ }
+ }
+ // clamp is necessary because when outCoord = -0.5 and len = 4,
+ // inCoord = 3.5 and will be rounded to 4 in nearest interpolation.
+ return tfjsCore.util.clamp(0, inCoord, len - 1);
+ }
+ function mapCoordConstant(outCoord, len) {
+ return outCoord;
+ }
+ function mapCoordNearest(outCoord, len) {
+ return tfjsCore.util.clamp(0, outCoord, len - 1);
+ }
+ function readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, y, x, channel, fillValue) {
+ var ind = batch * batchStride + y * rowStride + x * colStride + channel;
+ if (0 <= y && y < imageHeight && 0 <= x && x < imageWidth) {
+ return imageVals[ind];
+ }
+ else {
+ return fillValue;
+ }
+ }
+ function nearestInterpolation(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, y, x, channel, fillValue) {
+ var $y = Math.round(y);
+ var $x = Math.round(x);
+ return readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, $y, $x, channel, fillValue);
+ }
+ function bilinearInterpolation(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, y, x, channel, fillValue) {
+ var yFloor = Math.floor(y);
+ var xFloor = Math.floor(x);
+ var yCeil = yFloor + 1;
+ var xCeil = xFloor + 1;
+ // f(x, yFloor) = (xCeil - x) / (xCeil - xFloor) * f(xFloor, yFloor)
+ // + (x - xFloor) / (xCeil - xFloor) * f(xCeil, yFloor)
+ var valueYFloor = (xCeil - x) *
+ readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yFloor, xFloor, channel, fillValue) +
+ (x - xFloor) *
+ readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yFloor, xCeil, channel, fillValue);
+ // f(x, yCeil) = (xCeil - x) / (xCeil - xFloor) * f(xFloor, yCeil)
+ // + (x - xFloor) / (xCeil - xFloor) * f(xCeil, yCeil)
+ var valueYCeil = (xCeil - x) *
+ readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yCeil, xFloor, channel, fillValue) +
+ (x - xFloor) *
+ readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yCeil, xCeil, channel, fillValue);
+ // f(x, y) = (yCeil - y) / (yCeil - yFloor) * f(x, yFloor)
+ // + (y - yFloor) / (yCeil - yFloor) * f(x, yCeil)
+ return (yCeil - y) * valueYFloor + (y - yFloor) * valueYCeil;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function unique(args) {
+ var inputs = args.inputs, attrs = args.attrs, backend = args.backend;
+ var axis = attrs.axis;
+ var x = inputs.x;
+ assertNotComplex(x, 'unique');
+ var values = backend.data.get(x.dataId).values;
+ var _a = uniqueImpl(values, axis, x.shape, x.dtype), outputValues = _a.outputValues, outputShape = _a.outputShape, indices = _a.indices;
+ return [
+ backend.makeTensorInfo(outputShape, x.dtype, outputValues),
+ backend.makeTensorInfo([indices.length], 'int32', indices),
+ ];
+ }
+ var uniqueConfig = {
+ kernelName: tfjsCore.Unique,
+ backendName: 'cpu',
+ kernelFunc: unique,
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function unpack(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var value = inputs.value;
+ var axis = attrs.axis;
+ if (axis < 0) {
+ axis += value.shape.length;
+ }
+ var valueRank = value.shape.length;
+ var num = value.shape[axis];
+ var outShape = new Array(valueRank - 1);
+ var outIndex = 0;
+ for (var i = 0; i < valueRank; i++) {
+ if (i !== axis) {
+ outShape[outIndex++] = value.shape[i];
+ }
+ }
+ var begin = new Array(valueRank).fill(0);
+ var size = value.shape.slice();
+ size[axis] = 1;
+ var res = new Array(num);
+ for (var i = 0; i < res.length; i++) {
+ begin[axis] = i;
+ var tempRes = slice({ inputs: { x: value }, backend: backend, attrs: { begin: begin, size: size } });
+ res[i] = reshape({ inputs: { x: tempRes }, backend: backend, attrs: { shape: outShape } });
+ backend.disposeIntermediateTensorInfo(tempRes);
+ }
+ return res;
+ }
+ var unpackConfig = {
+ kernelName: tfjsCore.Unpack,
+ backendName: 'cpu',
+ kernelFunc: unpack
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function unsortedSegmentSum(args) {
+ var inputs = args.inputs, backend = args.backend, attrs = args.attrs;
+ var x = inputs.x, segmentIds = inputs.segmentIds;
+ var numSegments = attrs.numSegments;
+ assertNotComplex(x, 'unsortedSegmentSum');
+ var xRank = x.shape.length;
+ var segmentIdsRank = segmentIds.shape.length;
+ var res = [];
+ var intermediates = [];
+ // Reshape the segment id's so that they can be broadcast with
+ // x. The new shape should be [segmentIds.shape, 1, ..., 1]
+ var numIters = xRank - segmentIdsRank;
+ var $segmentIds = segmentIds;
+ for (var i = 0; i < numIters; ++i) {
+ var expanded = expandDims({ inputs: { input: $segmentIds }, backend: backend, attrs: { dim: i + 1 } });
+ $segmentIds = expanded;
+ intermediates.push(expanded);
+ }
+ for (var i = 0; i < numSegments; ++i) {
+ var scalarValue = tfjsCore.util.createScalarValue(i, 'int32');
+ var segmentId = backend.makeTensorInfo([], 'int32', scalarValue);
+ var mask = equal({ inputs: { a: segmentId, b: $segmentIds }, backend: backend });
+ var maskCasted = cast({ inputs: { x: mask }, backend: backend, attrs: { dtype: 'float32' } });
+ var mul = multiply({ inputs: { a: maskCasted, b: x }, backend: backend });
+ var sumTensorInfo = sum({ inputs: { x: mul }, backend: backend, attrs: { axis: 0, keepDims: false } });
+ res.push(sumTensorInfo);
+ intermediates.push(segmentId);
+ intermediates.push(mask);
+ intermediates.push(maskCasted);
+ intermediates.push(mul);
+ intermediates.push(sumTensorInfo);
+ }
+ var result = pack({ inputs: res, backend: backend, attrs: { axis: 0 } });
+ intermediates.forEach(function (t) { return backend.disposeIntermediateTensorInfo(t); });
+ return result;
+ }
+ var unsortedSegmentSumConfig = {
+ kernelName: tfjsCore.UnsortedSegmentSum,
+ backendName: 'cpu',
+ kernelFunc: unsortedSegmentSum
+ };
+
+ var e_1, _a;
+ // List all kernel configs here
+ var kernelConfigs = [
+ _fusedMatMulConfig,
+ absConfig,
+ acosConfig,
+ acoshConfig,
+ addConfig,
+ addNConfig,
+ allConfig,
+ anyConfig,
+ argMaxConfig,
+ argMinConfig,
+ asinConfig,
+ asinhConfig,
+ atanConfig,
+ atan2Config,
+ atanhConfig,
+ avgPoolConfig,
+ avgPool3DConfig,
+ avgPool3DGradConfig,
+ avgPoolGradConfig,
+ batchMatMulConfig,
+ batchNormConfig,
+ batchToSpaceNDConfig,
+ bincountConfig,
+ broadcastArgsConfig,
+ castConfig,
+ ceilConfig,
+ clipConfig,
+ complexConfig,
+ complexAbsConfig,
+ concatConfig,
+ conv2DBackpropFilterConfig,
+ conv2DBackpropInputConfig,
+ conv2DConfig,
+ conv3DBackpropFilterV2Config,
+ conv3DBackpropInputV2Config,
+ conv3DConfig,
+ cosConfig,
+ coshConfig,
+ cropAndResizeConfig,
+ cumsumConfig,
+ denseBincountConfig,
+ depthToSpaceConfig,
+ depthwiseConv2dNativeConfig,
+ depthwiseConv2dNativeBackpropFilterConfig,
+ depthwiseConv2dNativeBackpropInputConfig,
+ diagConfig,
+ dilation2dConfig,
+ dilation2dBackpropInputConfig,
+ dilation2dBackpropFilterConfig,
+ realDivConfig,
+ einsumConfig,
+ eluConfig,
+ eluGradConfig,
+ equalConfig,
+ erfConfig,
+ expConfig,
+ expandDimsConfig,
+ expm1Config,
+ fftConfig,
+ fillConfig,
+ flipLeftRightConfig,
+ floorConfig,
+ floorDivConfig,
+ fusedConv2DConfig,
+ fusedDepthwiseConv2DConfig,
+ gatherNdConfig,
+ gatherV2Config,
+ greaterConfig,
+ greaterEqualConfig,
+ identityConfig,
+ ifftConfig,
+ imagConfig,
+ isFiniteConfig,
+ isInfConfig,
+ isNaNConfig,
+ leakyReluConfig,
+ lessConfig,
+ lessEqualConfig,
+ linSpaceConfig,
+ logConfig,
+ log1pConfig,
+ logicalAndConfig,
+ logicalNotConfig,
+ logicalOrConfig,
+ lRNConfig,
+ lRNGradConfig,
+ maximumConfig,
+ maxPoolConfig,
+ maxPool3DConfig,
+ maxPool3DGradConfig,
+ maxPoolGradConfig,
+ maxPoolWithArgmaxConfig,
+ maxConfig,
+ meanConfig,
+ minConfig,
+ minimumConfig,
+ mirrorPadConfig,
+ modConfig,
+ multinomialConfig,
+ multiplyConfig,
+ negConfig,
+ nonMaxSuppressionV3Config,
+ nonMaxSuppressionV4Config,
+ nonMaxSuppressionV5Config,
+ notEqualConfig,
+ oneHotConfig,
+ onesLikeConfig,
+ packConfig,
+ padV2Config,
+ powConfig,
+ preluConfig,
+ prodConfig,
+ rangeConfig,
+ realConfig,
+ reciprocalConfig,
+ reluConfig,
+ relu6Config,
+ reshapeConfig,
+ resizeBilinearConfig,
+ resizeBilinearGradConfig,
+ resizeNearestNeighborConfig,
+ resizeNearestNeighborGradConfig,
+ reverseConfig,
+ rotateWithOffsetConfig,
+ roundConfig,
+ rsqrtConfig,
+ scatterNdConfig,
+ selectConfig,
+ seluConfig,
+ sigmoidConfig,
+ signConfig,
+ sinConfig,
+ sinhConfig,
+ sliceConfig,
+ softmaxConfig,
+ softplusConfig,
+ spaceToBatchNDConfig,
+ sparseFillEmptyRowsConfig,
+ sparseReshapeConfig,
+ sparseSegmentMeanConfig,
+ sparseSegmentSumConfig,
+ sparseToDenseConfig,
+ splitVConfig,
+ sqrtConfig,
+ squareConfig,
+ squaredDifferenceConfig,
+ stepConfig,
+ stridedSliceConfig,
+ stringNGramsConfig,
+ stringSplitConfig,
+ stringToHashBucketFastConfig,
+ subConfig,
+ sumConfig,
+ tanConfig,
+ tanhConfig,
+ tileConfig,
+ topKConfig,
+ transposeConfig,
+ transformConfig,
+ uniqueConfig,
+ unpackConfig,
+ unsortedSegmentSumConfig,
+ zerosLikeConfig
+ ];
+ try {
+ for (var kernelConfigs_1 = __values(kernelConfigs), kernelConfigs_1_1 = kernelConfigs_1.next(); !kernelConfigs_1_1.done; kernelConfigs_1_1 = kernelConfigs_1.next()) {
+ var kernelConfig = kernelConfigs_1_1.value;
+ tfjsCore.registerKernel(kernelConfig);
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (kernelConfigs_1_1 && !kernelConfigs_1_1.done && (_a = kernelConfigs_1.return)) _a.call(kernelConfigs_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+
+ exports.MathBackendCPU = MathBackendCPU;
+ exports.shared = shared;
+ exports.version_cpu = version;
+
+ Object.defineProperty(exports, '__esModule', { value: true });
+
+})));
+//# sourceMappingURL=tf-backend-cpu.js.map
diff --git a/js/tf-converter.js b/js/tf-converter.js
new file mode 100644
index 0000000..8f0d976
--- /dev/null
+++ b/js/tf-converter.js
@@ -0,0 +1,29751 @@
+/**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@tensorflow/tfjs-core')) :
+ typeof define === 'function' && define.amd ? define(['exports', '@tensorflow/tfjs-core'], factory) :
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.tf = global.tf || {}, global.tf));
+}(this, (function (exports, tfc) { 'use strict';
+
+ function _interopNamespace(e) {
+ if (e && e.__esModule) return e;
+ var n = Object.create(null);
+ if (e) {
+ Object.keys(e).forEach(function (k) {
+ if (k !== 'default') {
+ var d = Object.getOwnPropertyDescriptor(e, k);
+ Object.defineProperty(n, k, d.get ? d : {
+ enumerable: true,
+ get: function () {
+ return e[k];
+ }
+ });
+ }
+ });
+ }
+ n['default'] = e;
+ return n;
+ }
+
+ var tfc__namespace = /*#__PURE__*/_interopNamespace(tfc);
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var ENV$1 = tfc.env();
+ /** Whether to keep intermediate tensors. */
+ ENV$1.registerFlag('KEEP_INTERMEDIATE_TENSORS', function () { return false; }, function (debugValue) {
+ if (debugValue) {
+ console.warn('Keep intermediate tensors is ON. This will print the values of all ' +
+ 'intermediate tensors during model inference. Not all models ' +
+ 'support this mode. For details, check e2e/benchmarks/ ' +
+ 'model_config.js. This significantly impacts performance.');
+ }
+ });
+
+ /*! *****************************************************************************
+ Copyright (c) Microsoft Corporation.
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+ ***************************************************************************** */
+ /* global Reflect, Promise */
+ var extendStatics = function (d, b) {
+ extendStatics = Object.setPrototypeOf ||
+ ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
+ function (d, b) { for (var p in b)
+ if (b.hasOwnProperty(p))
+ d[p] = b[p]; };
+ return extendStatics(d, b);
+ };
+ function __extends(d, b) {
+ extendStatics(d, b);
+ function __() { this.constructor = d; }
+ d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
+ }
+ function __awaiter(thisArg, _arguments, P, generator) {
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
+ return new (P || (P = Promise))(function (resolve, reject) {
+ function fulfilled(value) { try {
+ step(generator.next(value));
+ }
+ catch (e) {
+ reject(e);
+ } }
+ function rejected(value) { try {
+ step(generator["throw"](value));
+ }
+ catch (e) {
+ reject(e);
+ } }
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
+ });
+ }
+ function __generator(thisArg, body) {
+ var _ = { label: 0, sent: function () { if (t[0] & 1)
+ throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
+ return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () { return this; }), g;
+ function verb(n) { return function (v) { return step([n, v]); }; }
+ function step(op) {
+ if (f)
+ throw new TypeError("Generator is already executing.");
+ while (_)
+ try {
+ if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done)
+ return t;
+ if (y = 0, t)
+ op = [op[0] & 2, t.value];
+ switch (op[0]) {
+ case 0:
+ case 1:
+ t = op;
+ break;
+ case 4:
+ _.label++;
+ return { value: op[1], done: false };
+ case 5:
+ _.label++;
+ y = op[1];
+ op = [0];
+ continue;
+ case 7:
+ op = _.ops.pop();
+ _.trys.pop();
+ continue;
+ default:
+ if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
+ _ = 0;
+ continue;
+ }
+ if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) {
+ _.label = op[1];
+ break;
+ }
+ if (op[0] === 6 && _.label < t[1]) {
+ _.label = t[1];
+ t = op;
+ break;
+ }
+ if (t && _.label < t[2]) {
+ _.label = t[2];
+ _.ops.push(op);
+ break;
+ }
+ if (t[2])
+ _.ops.pop();
+ _.trys.pop();
+ continue;
+ }
+ op = body.call(thisArg, _);
+ }
+ catch (e) {
+ op = [6, e];
+ y = 0;
+ }
+ finally {
+ f = t = 0;
+ }
+ if (op[0] & 5)
+ throw op[1];
+ return { value: op[0] ? op[1] : void 0, done: true };
+ }
+ }
+ function __values(o) {
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
+ if (m)
+ return m.call(o);
+ if (o && typeof o.length === "number")
+ return {
+ next: function () {
+ if (o && i >= o.length)
+ o = void 0;
+ return { value: o && o[i++], done: !o };
+ }
+ };
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
+ }
+ function __read(o, n) {
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
+ if (!m)
+ return o;
+ var i = m.call(o), r, ar = [], e;
+ try {
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done)
+ ar.push(r.value);
+ }
+ catch (error) {
+ e = { error: error };
+ }
+ finally {
+ try {
+ if (r && !r.done && (m = i["return"]))
+ m.call(i);
+ }
+ finally {
+ if (e)
+ throw e.error;
+ }
+ }
+ return ar;
+ }
+ function __spread() {
+ for (var ar = [], i = 0; i < arguments.length; i++)
+ ar = ar.concat(__read(arguments[i]));
+ return ar;
+ }
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+ /** DataType enum. */
+ var DataType;
+ (function (DataType) {
+ // Not a legal value for DataType. Used to indicate a DataType field
+ // has not been set.
+ DataType[DataType["DT_INVALID"] = 0] = "DT_INVALID";
+ // Data types that all computation devices are expected to be
+ // capable to support.
+ DataType[DataType["DT_FLOAT"] = 1] = "DT_FLOAT";
+ DataType[DataType["DT_DOUBLE"] = 2] = "DT_DOUBLE";
+ DataType[DataType["DT_INT32"] = 3] = "DT_INT32";
+ DataType[DataType["DT_UINT8"] = 4] = "DT_UINT8";
+ DataType[DataType["DT_INT16"] = 5] = "DT_INT16";
+ DataType[DataType["DT_INT8"] = 6] = "DT_INT8";
+ DataType[DataType["DT_STRING"] = 7] = "DT_STRING";
+ DataType[DataType["DT_COMPLEX64"] = 8] = "DT_COMPLEX64";
+ DataType[DataType["DT_INT64"] = 9] = "DT_INT64";
+ DataType[DataType["DT_BOOL"] = 10] = "DT_BOOL";
+ DataType[DataType["DT_QINT8"] = 11] = "DT_QINT8";
+ DataType[DataType["DT_QUINT8"] = 12] = "DT_QUINT8";
+ DataType[DataType["DT_QINT32"] = 13] = "DT_QINT32";
+ DataType[DataType["DT_BFLOAT16"] = 14] = "DT_BFLOAT16";
+ DataType[DataType["DT_QINT16"] = 15] = "DT_QINT16";
+ DataType[DataType["DT_QUINT16"] = 16] = "DT_QUINT16";
+ DataType[DataType["DT_UINT16"] = 17] = "DT_UINT16";
+ DataType[DataType["DT_COMPLEX128"] = 18] = "DT_COMPLEX128";
+ DataType[DataType["DT_HALF"] = 19] = "DT_HALF";
+ DataType[DataType["DT_RESOURCE"] = 20] = "DT_RESOURCE";
+ DataType[DataType["DT_VARIANT"] = 21] = "DT_VARIANT";
+ DataType[DataType["DT_UINT32"] = 22] = "DT_UINT32";
+ DataType[DataType["DT_UINT64"] = 23] = "DT_UINT64";
+ // Do not use! These are only for parameters. Every enum above
+ // should have a corresponding value below (verified by types_test).
+ DataType[DataType["DT_FLOAT_REF"] = 101] = "DT_FLOAT_REF";
+ DataType[DataType["DT_DOUBLE_REF"] = 102] = "DT_DOUBLE_REF";
+ DataType[DataType["DT_INT32_REF"] = 103] = "DT_INT32_REF";
+ DataType[DataType["DT_UINT8_REF"] = 104] = "DT_UINT8_REF";
+ DataType[DataType["DT_INT16_REF"] = 105] = "DT_INT16_REF";
+ DataType[DataType["DT_INT8_REF"] = 106] = "DT_INT8_REF";
+ DataType[DataType["DT_STRING_REF"] = 107] = "DT_STRING_REF";
+ DataType[DataType["DT_COMPLEX64_REF"] = 108] = "DT_COMPLEX64_REF";
+ DataType[DataType["DT_INT64_REF"] = 109] = "DT_INT64_REF";
+ DataType[DataType["DT_BOOL_REF"] = 110] = "DT_BOOL_REF";
+ DataType[DataType["DT_QINT8_REF"] = 111] = "DT_QINT8_REF";
+ DataType[DataType["DT_QUINT8_REF"] = 112] = "DT_QUINT8_REF";
+ DataType[DataType["DT_QINT32_REF"] = 113] = "DT_QINT32_REF";
+ DataType[DataType["DT_BFLOAT16_REF"] = 114] = "DT_BFLOAT16_REF";
+ DataType[DataType["DT_QINT16_REF"] = 115] = "DT_QINT16_REF";
+ DataType[DataType["DT_QUINT16_REF"] = 116] = "DT_QUINT16_REF";
+ DataType[DataType["DT_UINT16_REF"] = 117] = "DT_UINT16_REF";
+ DataType[DataType["DT_COMPLEX128_REF"] = 118] = "DT_COMPLEX128_REF";
+ DataType[DataType["DT_HALF_REF"] = 119] = "DT_HALF_REF";
+ DataType[DataType["DT_RESOURCE_REF"] = 120] = "DT_RESOURCE_REF";
+ DataType[DataType["DT_VARIANT_REF"] = 121] = "DT_VARIANT_REF";
+ DataType[DataType["DT_UINT32_REF"] = 122] = "DT_UINT32_REF";
+ DataType[DataType["DT_UINT64_REF"] = 123] = "DT_UINT64_REF";
+ })(DataType || (DataType = {}));
+ var SaverDef;
+ (function (SaverDef) {
+ (function (CheckpointFormatVersion) {
+ CheckpointFormatVersion[CheckpointFormatVersion["LEGACY"] = 0] = "LEGACY";
+ CheckpointFormatVersion[CheckpointFormatVersion["V1"] = 1] = "V1";
+ CheckpointFormatVersion[CheckpointFormatVersion["V2"] = 2] = "V2";
+ })(SaverDef.CheckpointFormatVersion || (SaverDef.CheckpointFormatVersion = {}));
+ })(SaverDef || (SaverDef = {}));
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var CUSTOM_OPS = {};
+ /**
+ * Register an Op for graph model executor. This allow you to register
+ * TensorFlow custom op or override existing op.
+ *
+ * Here is an example of registering a new MatMul Op.
+ * ```js
+ * const customMatmul = (node) =>
+ * tf.matMul(
+ * node.inputs[0], node.inputs[1],
+ * node.attrs['transpose_a'], node.attrs['transpose_b']);
+ *
+ * tf.registerOp('MatMul', customMatmul);
+ * ```
+ * The inputs and attrs of the node object is based on the TensorFlow op
+ * registry.
+ *
+ * @param name The Tensorflow Op name.
+ * @param opFunc An op function which is called with the current graph node
+ * during execution and needs to return a tensor or a list of tensors. The node
+ * has the following attributes:
+ * - attr: A map from attribute name to its value
+ * - inputs: A list of input tensors
+ *
+ * @doc {heading: 'Models', subheading: 'Op Registry'}
+ */
+ function registerOp(name, opFunc) {
+ var opMapper = {
+ tfOpName: name,
+ category: 'custom',
+ inputs: [],
+ attrs: [],
+ customExecutor: opFunc
+ };
+ CUSTOM_OPS[name] = opMapper;
+ }
+ /**
+ * Retrieve the OpMapper object for the registered op.
+ *
+ * @param name The Tensorflow Op name.
+ *
+ * @doc {heading: 'Models', subheading: 'Op Registry'}
+ */
+ function getRegisteredOp(name) {
+ return CUSTOM_OPS[name];
+ }
+ /**
+ * Deregister the Op for graph model executor.
+ *
+ * @param name The Tensorflow Op name.
+ *
+ * @doc {heading: 'Models', subheading: 'Op Registry'}
+ */
+ function deregisterOp(name) {
+ delete CUSTOM_OPS[name];
+ }
+
+ function getParamValue(paramName, node, tensorMap, context, resourceManager) {
+ var inputParam = node.inputParams[paramName];
+ if (inputParam && inputParam.inputIndexStart !== undefined) {
+ var start = inputParam.inputIndexStart;
+ var end = inputParam.inputIndexEnd === 0 ?
+ undefined :
+ (inputParam.inputIndexEnd === undefined ? start + 1 :
+ inputParam.inputIndexEnd);
+ if (inputParam.type === 'tensor') {
+ return getTensor(node.inputNames[inputParam.inputIndexStart], tensorMap, context, resourceManager);
+ }
+ if (inputParam.type === 'tensors') {
+ var inputs = node.inputNames.slice(start, end);
+ return inputs.map(function (name) { return getTensor(name, tensorMap, context, resourceManager); });
+ }
+ var tensor = getTensor(node.inputNames.slice(start)[0], tensorMap, context, resourceManager);
+ var data = tensor.dataSync();
+ return inputParam.type === 'number' ?
+ data[0] :
+ tfc.util.toNestedArray(tensor.shape, data);
+ }
+ var attrParam = node.attrParams[paramName];
+ return attrParam && attrParam.value;
+ }
+ /**
+ * Retrieve the tensor from tensorsMap based on input name.
+ * @param name Node input name
+ * @param tensorsMap Tensors map keyed by the node
+ * @param context contains tensors and information for running the current node.
+ * @param resourceManager Optional. Contains global resources of the model.
+ */
+ function getTensor(name, tensorsMap, context, resourceManager) {
+ var _a = __read(parseNodeName(name), 2), nodeName = _a[0], index = _a[1];
+ if (resourceManager != null) {
+ var tensor = resourceManager.getHashTableHandleByName(nodeName);
+ if (tensor != null) {
+ return tensor;
+ }
+ }
+ var contextId = context.currentContextIds.find(function (contextId) {
+ return !!tensorsMap[getNodeNameWithContextId(nodeName, contextId)];
+ });
+ return contextId !== undefined ?
+ tensorsMap[getNodeNameWithContextId(nodeName, contextId)][index] :
+ undefined;
+ }
+ /**
+ * Retrieve the tensors based on input name for current context.
+ * @param name Node input name
+ * @param tensorsMap Tensors map keyed by the node
+ */
+ function getTensorsForCurrentContenxt(name, tensorsMap, context) {
+ return tensorsMap[getNodeNameWithContextId(name, context.currentContextId)];
+ }
+ /**
+ * Returns the node name, outputName and index from the Node input name.
+ * @param inputName The input name of the node, in format of
+ * node_name:output_index, i.e. MatMul:0, if the output_index is not set, it is
+ * default to 0.
+ * If the input name contains output name i.e. StringSplit:indices:0, it will
+ * return ['StringSplit', 0, 'indices'].
+ */
+ function getNodeNameAndIndex(inputName, context) {
+ var _a = __read(parseNodeName(inputName), 3), nodeName = _a[0], index = _a[1], outputName = _a[2];
+ return [
+ getNodeNameWithContextId(nodeName, context && context.currentContextId),
+ index, outputName
+ ];
+ }
+ function getNodeNameWithContextId(name, contextId) {
+ return !!contextId ? name + "-" + contextId : name;
+ }
+ function parseNodeName(name) {
+ var parts = name.split(':');
+ if (parts.length === 1) {
+ return [name, 0, undefined];
+ }
+ var nodeName = parts[0];
+ var outputName = parts.length === 3 ? parts[1] : undefined;
+ var index = Number(parts[parts.length - 1]);
+ return [nodeName, index, outputName];
+ }
+ function getPadding(node, tensorMap, context) {
+ var pad = getParamValue('pad', node, tensorMap, context);
+ if (pad === 'explicit') {
+ // This is 1d array, we need to convert it to 2d array
+ pad = getParamValue('explicitPaddings', node, tensorMap, context);
+ var explicitPadding = [[0, 0], [0, 0], [0, 0], [0, 0]];
+ for (var i = 0; i < 4; i++) {
+ explicitPadding[i][0] = pad[i * 2];
+ explicitPadding[i][1] = pad[i * 2 + 1];
+ }
+ return explicitPadding;
+ }
+ return pad;
+ }
+ /**
+ * Reuse the tensor if it is marked as keep, otherwise clone the tensor to
+ * avoid disposal. This is important for TensorArray and TensorList ops, since
+ * internally they use a tensor as the id for TensorArray and TensorList, and
+ * to simplify lookup, they also use Tensor.id as the key to the internal map.
+ * These id tensors have been marked as kept in the backend, we need avoid clone
+ * them in order to create new Tensor.id.
+ * @param tensor
+ */
+ function cloneTensor(tensor) {
+ return tensor.kept ? tensor : tfc.clone(tensor);
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$i = [
+ {
+ 'tfOpName': 'Add',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'AddV2',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'AddN',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'tensors',
+ 'type': 'tensors'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'BiasAdd',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sub',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'RealDiv',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Div',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'DivNoNan',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FloorDiv',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Mul',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Maximum',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Minimum',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Pow',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SquaredDifference',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Mod',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FloorMod',
+ 'category': 'arithmetic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var arithmetic = {
+ __proto__: null,
+ json: json$i
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$h = [
+ {
+ 'tfOpName': 'Abs',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Acos',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Asin',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Atan',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Atan2',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'y',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Ceil',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ClipByValue',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'clipValueMin',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'clipValueMax',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Complex',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'real',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'imag',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ComplexAbs',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Cos',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Cosh',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Elu',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Exp',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Floor',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Log',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Imag',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'Tout',
+ 'name': 'outputType',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Neg',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Real',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'Tout',
+ 'name': 'outputType',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Prelu',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'alpha',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Relu',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Relu6',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Selu',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sigmoid',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sin',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sinh',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sqrt',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Rsqrt',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Square',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Tan',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Tanh',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sign',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Round',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Expm1',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Log1p',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Reciprocal',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Softplus',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Asinh',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Acosh',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Atanh',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Erf',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Prod',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axes',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LeakyRelu',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'alpha',
+ 'name': 'alpha',
+ 'type': 'number',
+ 'defaultValue': 0.2
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'IsNan',
+ 'category': 'basic_math',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var basicMath = {
+ __proto__: null,
+ json: json$h
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$g = [
+ {
+ 'tfOpName': 'EmptyTensorList',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ },
+ {
+ 'start': 1,
+ 'name': 'maxNumElements',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LoopCond',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'pred',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Switch',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'data',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'pred',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Merge',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'tensors',
+ 'type': 'tensors'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Enter',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'frame_name',
+ 'name': 'frameName',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'is_constant',
+ 'name': 'isConstant',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Exit',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'NextIteration',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'size',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'element_shape',
+ 'name': 'elementShape',
+ 'type': 'shape'
+ },
+ {
+ 'tfName': 'dynamic_size',
+ 'name': 'dynamicSize',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'clear_after_read',
+ 'name': 'clearAfterRead',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'identical_element_shapes',
+ 'name': 'identicalElementShapes',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'tensor_array_name',
+ 'name': 'name',
+ 'type': 'string'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayWriteV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'index',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayReadV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'index',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayGatherV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'element_shape',
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayScatterV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayConcatV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'element_shape_except0',
+ 'name': 'elementShapeExcept0',
+ 'type': 'shape',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArraySplitV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'lengths',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 3,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArraySizeV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'flowIn',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayCloseV3',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorArrayId',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'StatelessIf',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'cond',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'then_branch',
+ 'name': 'thenBranch',
+ 'type': 'func'
+ },
+ {
+ 'tfName': 'else_branch',
+ 'name': 'elseBranch',
+ 'type': 'func'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'If',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'cond',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'then_branch',
+ 'name': 'thenBranch',
+ 'type': 'func'
+ },
+ {
+ 'tfName': 'else_branch',
+ 'name': 'elseBranch',
+ 'type': 'func'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'StatelessWhile',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'cond',
+ 'name': 'cond',
+ 'type': 'func'
+ },
+ {
+ 'tfName': 'body',
+ 'name': 'body',
+ 'type': 'func'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'While',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'cond',
+ 'name': 'cond',
+ 'type': 'func'
+ },
+ {
+ 'tfName': 'body',
+ 'name': 'body',
+ 'type': 'func'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListScatter',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListScatterV2',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ },
+ {
+ 'start': 3,
+ 'name': 'numElements',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListGather',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListGetItem',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'index',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListSetItem',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'index',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListReserve',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ },
+ {
+ 'start': 1,
+ 'name': 'numElements',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListFromTensor',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListStack',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'num_elements',
+ 'name': 'numElements',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListSplit',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ },
+ {
+ 'start': 2,
+ 'name': 'lengths',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListConcat',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_shape',
+ 'name': 'elementShape',
+ 'type': 'shape'
+ },
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListPopBack',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'elementShape',
+ 'type': 'shape'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorListPushBack',
+ 'category': 'control',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensorListId',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'element_dtype',
+ 'name': 'elementDType',
+ 'type': 'dtype'
+ }
+ ]
+ }
+ ];
+
+ var control = {
+ __proto__: null,
+ json: json$g
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$f = [
+ {
+ 'tfOpName': 'AvgPool',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'ksize',
+ 'name': 'kernelSize',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'MaxPool',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'ksize',
+ 'name': 'kernelSize',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': [],
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'MaxPoolWithArgmax',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'ksize',
+ 'name': 'kernelSize',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'include_batch_in_index',
+ 'name': 'includeBatchInIndex',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'AvgPool3D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'ksize',
+ 'name': 'kernelSize',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'MaxPool3D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'ksize',
+ 'name': 'kernelSize',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv1D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'stride',
+ 'name': 'stride',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NWC'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'dilation',
+ 'name': 'dilation',
+ 'type': 'number',
+ 'defaultValue': 1
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv2D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'useCudnnOnGpu',
+ 'name': 'useCudnnOnGpu',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': '_FusedConv2D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'num_args',
+ 'name': 'numArgs',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'use_cudnn_on_gpu',
+ 'name': 'useCudnnOnGpu',
+ 'type': 'bool',
+ 'defaultValue': true
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]',
+ 'defaultValue': [
+ 1,
+ 1,
+ 1,
+ 1
+ ]
+ },
+ {
+ 'tfName': 'fused_ops',
+ 'name': 'fusedOps',
+ 'type': 'string[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.0001
+ },
+ {
+ 'tfName': 'leakyrelu_alpha',
+ 'name': 'leakyreluAlpha',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv2DBackpropInput',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 2,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 0,
+ 'name': 'outputShape',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'DepthwiseConv2d',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'input',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'DepthwiseConv2dNative',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'input',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FusedDepthwiseConv2dNative',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'num_args',
+ 'name': 'numArgs',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]',
+ 'defaultValue': [
+ 1,
+ 1,
+ 1,
+ 1
+ ]
+ },
+ {
+ 'tfName': 'fused_ops',
+ 'name': 'fusedOps',
+ 'type': 'string[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv3D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Dilation2D',
+ 'category': 'convolution',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'filter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'strides',
+ 'name': 'strides',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'rates',
+ 'name': 'dilations',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'padding',
+ 'name': 'pad',
+ 'type': 'string'
+ }
+ ]
+ }
+ ];
+
+ var convolution = {
+ __proto__: null,
+ json: json$f
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$e = [
+ {
+ 'tfOpName': 'Fill',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'shape',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 1,
+ 'name': 'value',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LinSpace',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'start',
+ 'type': 'number'
+ },
+ {
+ 'start': 1,
+ 'name': 'stop',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'num',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'OneHot',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'indices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'depth',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'onValue',
+ 'type': 'number',
+ 'defaultValue': 1
+ },
+ {
+ 'start': 3,
+ 'name': 'offValue',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'axis',
+ 'name': 'axis',
+ 'type': 'number',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Ones',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'OnesLike',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'RandomUniform',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'minval',
+ 'name': 'minval',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'maxval',
+ 'name': 'maxval',
+ 'type': 'number',
+ 'defaultValue': 1
+ },
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'seed',
+ 'name': 'seed',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'seed2',
+ 'name': 'seed2',
+ 'type': 'number',
+ 'defaultValue': 0,
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'T',
+ 'type': 'number',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Range',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'start',
+ 'type': 'number'
+ },
+ {
+ 'start': 1,
+ 'name': 'stop',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'step',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'Tidx',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TruncatedNormal',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'means',
+ 'name': 'mean',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'stddev',
+ 'name': 'stdDev',
+ 'type': 'number',
+ 'defaultValue': 1
+ },
+ {
+ 'tfName': 'seed',
+ 'name': 'seed',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'seed2',
+ 'name': 'seed2',
+ 'type': 'number',
+ 'defaultValue': 0,
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'T',
+ 'type': 'number',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Zeros',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ZerosLike',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Multinomial',
+ 'category': 'creation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'logits',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'numSamples',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'seed',
+ 'name': 'seed',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'seed2',
+ 'name': 'seed2',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'output_dtype',
+ 'name': 'output_dtype',
+ 'type': 'dtype'
+ }
+ ]
+ }
+ ];
+
+ var creation = {
+ __proto__: null,
+ json: json$e
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$d = [
+ {
+ 'tfOpName': 'NonMaxSuppressionV2',
+ 'category': 'dynamic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'boxes',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scores',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'maxOutputSize',
+ 'type': 'number'
+ },
+ {
+ 'start': 3,
+ 'name': 'iouThreshold',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'NonMaxSuppressionV3',
+ 'category': 'dynamic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'boxes',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scores',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'maxOutputSize',
+ 'type': 'number'
+ },
+ {
+ 'start': 3,
+ 'name': 'iouThreshold',
+ 'type': 'number'
+ },
+ {
+ 'start': 4,
+ 'name': 'scoreThreshold',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'NonMaxSuppressionV4',
+ 'category': 'dynamic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'boxes',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scores',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'maxOutputSize',
+ 'type': 'number'
+ },
+ {
+ 'start': 3,
+ 'name': 'iouThreshold',
+ 'type': 'number'
+ },
+ {
+ 'start': 4,
+ 'name': 'scoreThreshold',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'T_threshold',
+ 'name': 'threshold',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'pad_to_max_output_size',
+ 'name': 'padToMaxOutputSize',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'NonMaxSuppressionV5',
+ 'category': 'dynamic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'boxes',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scores',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'maxOutputSize',
+ 'type': 'number'
+ },
+ {
+ 'start': 3,
+ 'name': 'iouThreshold',
+ 'type': 'number'
+ },
+ {
+ 'start': 4,
+ 'name': 'scoreThreshold',
+ 'type': 'number'
+ },
+ {
+ 'start': 5,
+ 'name': 'softNmsSigma',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Where',
+ 'category': 'dynamic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'condition',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ListDiff',
+ 'category': 'dynamic',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'y',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var dynamic = {
+ __proto__: null,
+ json: json$d
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$c = [
+ {
+ 'tfOpName': 'TopKV2',
+ 'category': 'evaluation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'k',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'sorted',
+ 'name': 'sorted',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Unique',
+ 'category': 'evaluation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'UniqueV2',
+ 'category': 'evaluation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ]
+ }
+ ];
+
+ var evaluation = {
+ __proto__: null,
+ json: json$c
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$b = [
+ {
+ 'tfOpName': 'PlaceholderWithDefault',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'default',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'shape',
+ 'name': 'shape',
+ 'type': 'shape'
+ },
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Placeholder',
+ 'category': 'graph',
+ 'attrs': [
+ {
+ 'tfName': 'shape',
+ 'name': 'shape',
+ 'type': 'shape'
+ },
+ {
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Const',
+ 'category': 'graph'
+ },
+ {
+ 'tfOpName': 'Identity',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'IdentityN',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'x',
+ 'type': 'tensors'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Snapshot',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Rank',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Size',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Shape',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ShapeN',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'x',
+ 'type': 'tensors'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Print',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'data',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'message',
+ 'name': 'message',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'first_n',
+ 'name': 'firstN',
+ 'type': 'number',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'summarize',
+ 'name': 'summarize',
+ 'type': 'number',
+ 'defaultValue': 3
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'NoOp',
+ 'category': 'graph',
+ 'inputs': []
+ },
+ {
+ 'tfOpName': 'StopGradient',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FakeQuantWithMinMaxVars',
+ 'category': 'graph',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'min',
+ 'name': 'min',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'max',
+ 'name': 'max',
+ 'type': 'number'
+ }
+ ]
+ }
+ ];
+
+ var graph = {
+ __proto__: null,
+ json: json$b
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$a = [
+ {
+ 'tfOpName': 'HashTable',
+ 'category': 'hash_table',
+ 'inputs': [],
+ 'attrs': [
+ {
+ 'tfName': 'shared_name',
+ 'name': 'sharedName',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'use_node_name_sharing',
+ 'name': 'useNodeNameSharing',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'key_dtype',
+ 'name': 'keyDType',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'value_dtype',
+ 'name': 'valueDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'HashTableV2',
+ 'category': 'hash_table',
+ 'inputs': [],
+ 'attrs': [
+ {
+ 'tfName': 'shared_name',
+ 'name': 'sharedName',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'use_node_name_sharing',
+ 'name': 'useNodeNameSharing',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'key_dtype',
+ 'name': 'keyDType',
+ 'type': 'dtype'
+ },
+ {
+ 'tfName': 'value_dtype',
+ 'name': 'valueDType',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LookupTableImport',
+ 'category': 'hash_table',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tableHandle',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'keys',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'values',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'Tin',
+ 'name': 'tIn',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'Tout',
+ 'name': 'tOut',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LookupTableImportV2',
+ 'category': 'hash_table',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tableHandle',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'keys',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'values',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'Tin',
+ 'name': 'tIn',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'Tout',
+ 'name': 'tOut',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LookupTableFind',
+ 'category': 'hash_table',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tableHandle',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'keys',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'defaultValue',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'Tin',
+ 'name': 'tIn',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'Tout',
+ 'name': 'tOut',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LookupTableFindV2',
+ 'category': 'hash_table',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tableHandle',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'keys',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'defaultValue',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'Tin',
+ 'name': 'tIn',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'Tout',
+ 'name': 'tOut',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LookupTableSize',
+ 'category': 'hash_table',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tableHandle',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LookupTableSizeV2',
+ 'category': 'hash_table',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tableHandle',
+ 'type': 'tensor'
+ }
+ ]
+ }
+ ];
+
+ var hashTable = {
+ __proto__: null,
+ json: json$a
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$9 = [
+ {
+ 'tfOpName': 'ResizeBilinear',
+ 'category': 'image',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'images',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'size',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'align_corners',
+ 'name': 'alignCorners',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'half_pixel_centers',
+ 'name': 'halfPixelCenters',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ResizeNearestNeighbor',
+ 'category': 'image',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'images',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'size',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'align_corners',
+ 'name': 'alignCorners',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'half_pixel_centers',
+ 'name': 'halfPixelCenters',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'CropAndResize',
+ 'category': 'image',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'image',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'boxes',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'boxInd',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'cropSize',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'method',
+ 'name': 'method',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'extrapolation_value',
+ 'name': 'extrapolationValue',
+ 'type': 'number'
+ }
+ ]
+ }
+ ];
+
+ var image$1 = {
+ __proto__: null,
+ json: json$9
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$8 = [
+ {
+ 'tfOpName': 'Equal',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'NotEqual',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Greater',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'GreaterEqual',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Less',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LessEqual',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LogicalAnd',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LogicalNot',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LogicalOr',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Select',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'condition',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SelectV2',
+ 'category': 'logical',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'condition',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var logical = {
+ __proto__: null,
+ json: json$8
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$7 = [
+ {
+ 'tfOpName': '_FusedMatMul',
+ 'category': 'matrices',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'end': 0,
+ 'name': 'args',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'num_args',
+ 'name': 'numArgs',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'fused_ops',
+ 'name': 'fusedOps',
+ 'type': 'string[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.0001
+ },
+ {
+ 'tfName': 'transpose_a',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'transpose_b',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'MatMul',
+ 'category': 'matrices',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'transpose_a',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'transpose_b',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'BatchMatMul',
+ 'category': 'matrices',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'adj_x',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'adj_y',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'BatchMatMulV2',
+ 'category': 'matrices',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'a',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'b',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'adj_x',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'adj_y',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Transpose',
+ 'category': 'matrices',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'perm',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Einsum',
+ 'category': 'matrices',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'tensors',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'equation',
+ 'name': 'equation',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'N',
+ 'name': 'n',
+ 'type': 'number',
+ 'defaultValue': 2
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ }
+ ];
+
+ var matrices = {
+ __proto__: null,
+ json: json$7
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$6 = [
+ {
+ 'tfOpName': 'FusedBatchNorm',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scale',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'offset',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'mean',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 4,
+ 'name': 'variance',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.001
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FusedBatchNormV2',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scale',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'offset',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'mean',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 4,
+ 'name': 'variance',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.001
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FusedBatchNormV3',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'scale',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'offset',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'mean',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 4,
+ 'name': 'variance',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.001
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LRN',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'depth_radius',
+ 'name': 'radius',
+ 'type': 'number',
+ 'defaultValue': 5
+ },
+ {
+ 'tfName': 'bias',
+ 'name': 'bias',
+ 'type': 'number',
+ 'defaultValue': 1
+ },
+ {
+ 'tfName': 'alpha',
+ 'name': 'alpha',
+ 'type': 'number',
+ 'defaultValue': 1
+ },
+ {
+ 'tfName': 'beta',
+ 'name': 'beta',
+ 'type': 'number',
+ 'defaultValue': 0.5
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Softmax',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LogSoftmax',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SparseToDense',
+ 'category': 'normalization',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'sparseIndices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'outputShape',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'sparseValues',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'defaultValue',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'validate_indices',
+ 'name': 'validateIndices',
+ 'type': 'bool',
+ 'defaultValue': true,
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var normalization = {
+ __proto__: null,
+ json: json$6
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$5 = [
+ {
+ 'tfOpName': 'Bincount',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'size',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'weights',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'DenseBincount',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'size',
+ 'type': 'number'
+ },
+ {
+ 'start': 2,
+ 'name': 'weights',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'binary_output',
+ 'name': 'binaryOutput',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Max',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Mean',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Min',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Sum',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'All',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Any',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ArgMax',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ArgMin',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Prod',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Cumsum',
+ 'category': 'reduction',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'exclusive',
+ 'name': 'exclusive',
+ 'type': 'bool'
+ },
+ {
+ 'tfName': 'reverse',
+ 'name': 'reverse',
+ 'type': 'bool'
+ }
+ ]
+ }
+ ];
+
+ var reduction = {
+ __proto__: null,
+ json: json$5
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$4 = [
+ {
+ 'tfOpName': 'ConcatV2',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': -1,
+ 'name': 'tensors',
+ 'type': 'tensors'
+ },
+ {
+ 'start': -1,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'N',
+ 'name': 'n',
+ 'type': 'number',
+ 'defaultValue': 2
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Concat',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 1,
+ 'end': 0,
+ 'name': 'tensors',
+ 'type': 'tensors'
+ },
+ {
+ 'start': 0,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'N',
+ 'name': 'n',
+ 'type': 'number',
+ 'defaultValue': 2
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'GatherV2',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'axis',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'batch_dims',
+ 'name': 'batchDims',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Gather',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'validate_indices',
+ 'name': 'validateIndices',
+ 'type': 'bool',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Reverse',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'dims',
+ 'type': 'bool[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ReverseV2',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Slice',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'begin',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'size',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'StridedSlice',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'begin',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'end',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 3,
+ 'name': 'strides',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'begin_mask',
+ 'name': 'beginMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'end_mask',
+ 'name': 'endMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'new_axis_mask',
+ 'name': 'newAxisMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'ellipsis_mask',
+ 'name': 'ellipsisMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'shrink_axis_mask',
+ 'name': 'shrinkAxisMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Pack',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'end': 0,
+ 'name': 'tensors',
+ 'type': 'tensors'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'axis',
+ 'name': 'axis',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Unpack',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'tensor',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'axis',
+ 'name': 'axis',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'num',
+ 'name': 'num',
+ 'type': 'number',
+ 'defaultValue': 0,
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Tile',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'reps',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Split',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'axis',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'start': 1,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'num_split',
+ 'name': 'numOrSizeSplits',
+ 'type': 'number',
+ 'defaultValue': 1
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SplitV',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'numOrSizeSplits',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'axis',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ScatterNd',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'indices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'values',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'GatherNd',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SparseToDense',
+ 'category': 'slice_join',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'sparseIndices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'outputShape',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'sparseValues',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'defaultValue',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'validate_indices',
+ 'name': 'validateIndices',
+ 'type': 'bool',
+ 'defaultValue': false,
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var sliceJoin = {
+ __proto__: null,
+ json: json$4
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$3 = [
+ {
+ 'tfOpName': 'SparseFillEmptyRows',
+ 'category': 'sparse',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'indices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'values',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'denseShape',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 3,
+ 'name': 'defaultValue',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SparseReshape',
+ 'category': 'sparse',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'inputIndices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'inputShape',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'newShape',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SparseSegmentMean',
+ 'category': 'sparse',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'data',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'segmentIds',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SparseSegmentSum',
+ 'category': 'sparse',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'data',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'indices',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 2,
+ 'name': 'segmentIds',
+ 'type': 'tensor'
+ }
+ ]
+ }
+ ];
+
+ var sparse$1 = {
+ __proto__: null,
+ json: json$3
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$2 = [
+ {
+ 'tfOpName': 'FFT',
+ 'category': 'spectral',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'IFFT',
+ 'category': 'spectral',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'RFFT',
+ 'category': 'spectral',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'fft_length',
+ 'type': 'number',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'IRFFT',
+ 'category': 'spectral',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'fft_length',
+ 'type': 'number',
+ 'notSupported': true
+ }
+ ]
+ }
+ ];
+
+ var spectral$1 = {
+ __proto__: null,
+ json: json$2
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json$1 = [
+ {
+ 'tfOpName': 'StringNGrams',
+ 'category': 'string',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'data',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'dataSplits',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'separator',
+ 'name': 'separator',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'ngram_widths',
+ 'name': 'nGramWidths',
+ 'type': 'number[]'
+ },
+ {
+ 'tfName': 'left_pad',
+ 'name': 'leftPad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'right_pad',
+ 'name': 'rightPad',
+ 'type': 'string'
+ },
+ {
+ 'tfName': 'pad_width',
+ 'name': 'padWidth',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'preserve_short_sequences',
+ 'name': 'preserveShortSequences',
+ 'type': 'bool'
+ }
+ ],
+ 'outputs': [
+ 'ngrams',
+ 'ngrams_splits'
+ ]
+ },
+ {
+ 'tfOpName': 'StringSplit',
+ 'category': 'string',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'input',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'delimiter',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'skip_empty',
+ 'name': 'skipEmpty',
+ 'type': 'bool'
+ }
+ ],
+ 'outputs': [
+ 'indices',
+ 'values',
+ 'shape'
+ ]
+ },
+ {
+ 'tfOpName': 'StringToHashBucketFast',
+ 'category': 'string',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'input',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'num_buckets',
+ 'name': 'numBuckets',
+ 'type': 'number'
+ }
+ ]
+ }
+ ];
+
+ var string$1 = {
+ __proto__: null,
+ json: json$1
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var json = [
+ {
+ 'tfOpName': 'Cast',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'SrcT',
+ 'name': 'sdtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'DstT',
+ 'name': 'dtype',
+ 'type': 'dtype'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'ExpandDims',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'axis',
+ 'type': 'number'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'MirrorPad',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'padding',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'mode',
+ 'name': 'mode',
+ 'type': 'string'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Pad',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'padding',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'constant_value',
+ 'name': 'constantValue',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'PadV2',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'padding',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'constantValue',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Reshape',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Squeeze',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'axis',
+ 'tfDeprecatedName': 'squeeze_dims',
+ 'name': 'axis',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'SpaceToBatchND',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'blockShape',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'paddings',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'BatchToSpaceND',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'blockShape',
+ 'type': 'number[]'
+ },
+ {
+ 'start': 2,
+ 'name': 'crops',
+ 'type': 'number[]'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'DepthToSpace',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'block_size',
+ 'name': 'blockSize',
+ 'type': 'number'
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string'
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'BroadcastTo',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 'x',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 'shape',
+ 'type': 'number[]'
+ }
+ ],
+ 'attrs': []
+ },
+ {
+ 'tfOpName': 'BroadcastArgs',
+ 'category': 'transformation',
+ 'inputs': [
+ {
+ 'start': 0,
+ 'name': 's0',
+ 'type': 'tensor'
+ },
+ {
+ 'start': 1,
+ 'name': 's1',
+ 'type': 'tensor'
+ }
+ ],
+ 'attrs': []
+ }
+ ];
+
+ var transformation = {
+ __proto__: null,
+ json: json
+ };
+
+ var OperationMapper = /** @class */ (function () {
+ // Loads the op mapping from the JSON file.
+ function OperationMapper() {
+ var ops = [
+ arithmetic, basicMath, control, convolution, creation, dynamic,
+ evaluation, graph, hashTable, image$1, logical, matrices, normalization,
+ reduction, sliceJoin, sparse$1, spectral$1, string$1, transformation
+ ];
+ var mappersJson = [].concat.apply([], __spread(ops.map(function (op) { return op.json; })));
+ this.opMappers = mappersJson.reduce(function (map, mapper) {
+ map[mapper.tfOpName] = mapper;
+ return map;
+ }, {});
+ }
+ Object.defineProperty(OperationMapper, "Instance", {
+ // Singleton instance for the mapper
+ get: function () {
+ return this._instance || (this._instance = new this());
+ },
+ enumerable: true,
+ configurable: true
+ });
+ // Converts the model inference graph from Tensorflow GraphDef to local
+ // representation for TensorFlow.js API
+ OperationMapper.prototype.transformGraph = function (graph, signature) {
+ var _this = this;
+ if (signature === void 0) { signature = {}; }
+ var tfNodes = graph.node;
+ var placeholders = [];
+ var weights = [];
+ var initNodes = [];
+ var nodes = tfNodes.reduce(function (map, node) {
+ map[node.name] = _this.mapNode(node);
+ if (node.op.startsWith('Placeholder')) {
+ placeholders.push(map[node.name]);
+ }
+ else if (node.op === 'Const') {
+ weights.push(map[node.name]);
+ }
+ else if (node.input == null || node.input.length === 0) {
+ initNodes.push(map[node.name]);
+ }
+ return map;
+ }, {});
+ var inputs = [];
+ var outputs = [];
+ var inputNodeNameToKey = {};
+ var outputNodeNameToKey = {};
+ if (signature != null) {
+ inputNodeNameToKey = this.mapSignatureEntries(signature.inputs);
+ outputNodeNameToKey = this.mapSignatureEntries(signature.outputs);
+ }
+ var allNodes = Object.keys(nodes);
+ allNodes.forEach(function (key) {
+ var node = nodes[key];
+ node.inputNames.forEach(function (name, index) {
+ var _a = __read(getNodeNameAndIndex(name), 3), nodeName = _a[0], outputName = _a[2];
+ var inputNode = nodes[nodeName];
+ if (inputNode.outputs != null) {
+ var outputIndex = inputNode.outputs.indexOf(outputName);
+ if (outputIndex !== -1) {
+ var inputName = nodeName + ":" + outputIndex;
+ // update the input name to use the mapped output index directly.
+ node.inputNames[index] = inputName;
+ }
+ }
+ node.inputs.push(inputNode);
+ inputNode.children.push(node);
+ });
+ });
+ // if signature has not outputs set, add any node that does not have
+ // outputs.
+ if (Object.keys(outputNodeNameToKey).length === 0) {
+ allNodes.forEach(function (key) {
+ var node = nodes[key];
+ if (node.children.length === 0) {
+ outputs.push(node);
+ }
+ });
+ }
+ else {
+ Object.keys(outputNodeNameToKey).forEach(function (name) {
+ var _a = __read(getNodeNameAndIndex(name), 1), nodeName = _a[0];
+ var node = nodes[nodeName];
+ if (node != null) {
+ node.signatureKey = outputNodeNameToKey[name];
+ outputs.push(node);
+ }
+ });
+ }
+ if (Object.keys(inputNodeNameToKey).length > 0) {
+ Object.keys(inputNodeNameToKey).forEach(function (name) {
+ var _a = __read(getNodeNameAndIndex(name), 1), nodeName = _a[0];
+ var node = nodes[nodeName];
+ if (node) {
+ node.signatureKey = inputNodeNameToKey[name];
+ inputs.push(node);
+ }
+ });
+ }
+ else {
+ inputs = placeholders;
+ }
+ var functions = {};
+ if (graph.library != null && graph.library.function != null) {
+ functions = graph.library.function.reduce(function (functions, func) {
+ functions[func.signature.name] = _this.mapFunction(func);
+ return functions;
+ }, {});
+ }
+ var result = { nodes: nodes, inputs: inputs, outputs: outputs, weights: weights, placeholders: placeholders, signature: signature, functions: functions };
+ if (initNodes.length > 0) {
+ result.initNodes = initNodes;
+ }
+ return result;
+ };
+ OperationMapper.prototype.mapSignatureEntries = function (entries) {
+ return Object.keys(entries || {})
+ .reduce(function (prev, curr) {
+ prev[entries[curr].name] = curr;
+ return prev;
+ }, {});
+ };
+ OperationMapper.prototype.mapNode = function (node) {
+ // Unsupported ops will cause an error at run-time (not parse time), since
+ // they may not be used by the actual execution subgraph.
+ var mapper = getRegisteredOp(node.op) || this.opMappers[node.op] || {};
+ if (node.attr == null) {
+ node.attr = {};
+ }
+ var newNode = {
+ name: node.name,
+ op: node.op,
+ category: mapper.category,
+ inputNames: (node.input ||
+ []).map(function (input) { return input.startsWith('^') ? input.substr(1) : input; }),
+ inputs: [],
+ children: [],
+ inputParams: {},
+ attrParams: {},
+ rawAttrs: node.attr,
+ outputs: mapper.outputs
+ };
+ if (mapper.inputs != null) {
+ newNode.inputParams =
+ mapper.inputs.reduce(function (map, param) {
+ map[param.name] = {
+ type: param.type,
+ inputIndexStart: param.start,
+ inputIndexEnd: param.end
+ };
+ return map;
+ }, {});
+ }
+ if (mapper.attrs != null) {
+ newNode.attrParams =
+ mapper.attrs.reduce(function (map, param) {
+ var type = param.type;
+ var value = undefined;
+ switch (param.type) {
+ case 'string':
+ value = getStringParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getStringParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'string[]':
+ value = getStringArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getStringArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'number':
+ value = getNumberParam(node.attr, param.tfName, (param.defaultValue || 0));
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getNumberParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'number[]':
+ value = getNumericArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getNumericArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'bool':
+ value = getBoolParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getBoolParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'bool[]':
+ value = getBoolArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getBoolArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'shape':
+ value = getTensorShapeParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getTensorShapeParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'shape[]':
+ value = getTensorShapeArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getTensorShapeArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'dtype':
+ value = getDtypeParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getDtypeParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'dtype[]':
+ value = getDtypeArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getDtypeArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'func':
+ value = getFuncParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getFuncParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'tensor':
+ case 'tensors':
+ break;
+ default:
+ throw new Error("Unsupported param type: " + param.type + " for op: " + node.op);
+ }
+ map[param.name] = { value: value, type: type };
+ return map;
+ }, {});
+ }
+ return newNode;
+ };
+ // map the TFunctionDef to TFJS graph object
+ OperationMapper.prototype.mapFunction = function (functionDef) {
+ var _this = this;
+ var tfNodes = functionDef.nodeDef;
+ var placeholders = [];
+ var weights = [];
+ var nodes = {};
+ if (tfNodes != null) {
+ nodes = tfNodes.reduce(function (map, node) {
+ map[node.name] = _this.mapNode(node);
+ if (node.op === 'Const') {
+ weights.push(map[node.name]);
+ }
+ return map;
+ }, {});
+ }
+ var inputs = [];
+ var outputs = [];
+ functionDef.signature.inputArg.forEach(function (arg) {
+ var _a = __read(getNodeNameAndIndex(arg.name), 1), nodeName = _a[0];
+ var node = {
+ name: nodeName,
+ op: 'Placeholder',
+ inputs: [],
+ inputNames: [],
+ category: 'graph',
+ inputParams: {},
+ attrParams: { dtype: { value: parseDtypeParam(arg.type), type: 'dtype' } },
+ children: []
+ };
+ node.signatureKey = arg.name;
+ inputs.push(node);
+ nodes[nodeName] = node;
+ });
+ var allNodes = Object.keys(nodes);
+ allNodes.forEach(function (key) {
+ var node = nodes[key];
+ node.inputNames.forEach(function (name, index) {
+ var _a = __read(getNodeNameAndIndex(name), 3), nodeName = _a[0], outputName = _a[2];
+ var inputNode = nodes[nodeName];
+ if (inputNode.outputs != null) {
+ var outputIndex = inputNode.outputs.indexOf(outputName);
+ if (outputIndex !== -1) {
+ var inputName = nodeName + ":" + outputIndex;
+ // update the input name to use the mapped output index directly.
+ node.inputNames[index] = inputName;
+ }
+ }
+ node.inputs.push(inputNode);
+ inputNode.children.push(node);
+ });
+ });
+ var returnNodeMap = functionDef.ret;
+ functionDef.signature.outputArg.forEach(function (output) {
+ var _a = __read(getNodeNameAndIndex(returnNodeMap[output.name]), 2), nodeName = _a[0], index = _a[1];
+ var node = nodes[nodeName];
+ if (node != null) {
+ node.defaultOutput = index;
+ outputs.push(node);
+ }
+ });
+ var signature = this.mapArgsToSignature(functionDef);
+ return { nodes: nodes, inputs: inputs, outputs: outputs, weights: weights, placeholders: placeholders, signature: signature };
+ };
+ OperationMapper.prototype.mapArgsToSignature = function (functionDef) {
+ var _this = this;
+ return {
+ methodName: functionDef.signature.name,
+ inputs: functionDef.signature.inputArg.reduce(function (map, arg) {
+ map[arg.name] = _this.mapArgToTensorInfo(arg);
+ return map;
+ }, {}),
+ outputs: functionDef.signature.outputArg.reduce(function (map, arg) {
+ map[arg.name] = _this.mapArgToTensorInfo(arg, functionDef.ret);
+ return map;
+ }, {}),
+ };
+ };
+ OperationMapper.prototype.mapArgToTensorInfo = function (arg, nameMap) {
+ var name = arg.name;
+ if (nameMap != null) {
+ name = nameMap[name];
+ }
+ return { name: name, dtype: arg.type };
+ };
+ return OperationMapper;
+ }());
+ function decodeBase64(text) {
+ var global = tfc.env().global;
+ if (typeof global.atob !== 'undefined') {
+ return global.atob(text);
+ }
+ else if (typeof Buffer !== 'undefined') {
+ return new Buffer(text, 'base64').toString();
+ }
+ else {
+ throw new Error('Unable to decode base64 in this environment. ' +
+ 'Missing built-in atob() or Buffer()');
+ }
+ }
+ function parseStringParam(s, keepCase) {
+ var value = Array.isArray(s) ? String.fromCharCode.apply(null, s) : decodeBase64(s);
+ return keepCase ? value : value.toLowerCase();
+ }
+ function getStringParam(attrs, name, def, keepCase) {
+ if (keepCase === void 0) { keepCase = false; }
+ var param = attrs[name];
+ if (param != null) {
+ return parseStringParam(param.s, keepCase);
+ }
+ return def;
+ }
+ function getBoolParam(attrs, name, def) {
+ var param = attrs[name];
+ return param ? param.b : def;
+ }
+ function getNumberParam(attrs, name, def) {
+ var param = attrs[name] || {};
+ var value = param['i'] != null ? param['i'] : (param['f'] != null ? param['f'] : def);
+ return (typeof value === 'number') ? value : parseInt(value, 10);
+ }
+ function parseDtypeParam(value) {
+ if (typeof (value) === 'string') {
+ // tslint:disable-next-line:no-any
+ value = DataType[value];
+ }
+ switch (value) {
+ case DataType.DT_FLOAT:
+ case DataType.DT_HALF:
+ return 'float32';
+ case DataType.DT_INT32:
+ case DataType.DT_INT64:
+ case DataType.DT_INT8:
+ case DataType.DT_UINT8:
+ return 'int32';
+ case DataType.DT_BOOL:
+ return 'bool';
+ case DataType.DT_DOUBLE:
+ return 'float32';
+ case DataType.DT_STRING:
+ return 'string';
+ default:
+ // Unknown dtype error will happen at runtime (instead of parse time),
+ // since these nodes might not be used by the actual subgraph execution.
+ return null;
+ }
+ }
+ function getFuncParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param && param.func) {
+ return param.func.name;
+ }
+ return def;
+ }
+ function getDtypeParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param && param.type) {
+ return parseDtypeParam(param.type);
+ }
+ return def;
+ }
+ function getDtypeArrayParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param && param.list && param.list.type) {
+ return param.list.type.map(function (v) { return parseDtypeParam(v); });
+ }
+ return def;
+ }
+ function parseTensorShapeParam(shape) {
+ if (shape.unknownRank) {
+ return undefined;
+ }
+ if (shape.dim != null) {
+ return shape.dim.map(function (dim) { return (typeof dim.size === 'number') ? dim.size : parseInt(dim.size, 10); });
+ }
+ return [];
+ }
+ function getTensorShapeParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param && param.shape) {
+ return parseTensorShapeParam(param.shape);
+ }
+ return def;
+ }
+ function getNumericArrayParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param) {
+ return ((param.list.f && param.list.f.length ? param.list.f :
+ param.list.i) ||
+ [])
+ .map(function (v) { return (typeof v === 'number') ? v : parseInt(v, 10); });
+ }
+ return def;
+ }
+ function getStringArrayParam(attrs, name, def, keepCase) {
+ if (keepCase === void 0) { keepCase = false; }
+ var param = attrs[name];
+ if (param && param.list && param.list.s) {
+ return param.list.s.map(function (v) {
+ return parseStringParam(v, keepCase);
+ });
+ }
+ return def;
+ }
+ function getTensorShapeArrayParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param && param.list && param.list.shape) {
+ return param.list.shape.map(function (v) {
+ return parseTensorShapeParam(v);
+ });
+ }
+ return def;
+ }
+ function getBoolArrayParam(attrs, name, def) {
+ var param = attrs[name];
+ if (param && param.list && param.list.b) {
+ return param.list.b;
+ }
+ return def;
+ }
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Helper class for lookup inputs and params for nodes in the model graph.
+ */
+ var NodeValueImpl = /** @class */ (function () {
+ function NodeValueImpl(node, tensorMap, context) {
+ var _this = this;
+ this.node = node;
+ this.tensorMap = tensorMap;
+ this.context = context;
+ this.inputs = [];
+ this.attrs = {};
+ this.inputs = node.inputNames.map(function (name) { return _this.getInput(name); });
+ if (node.rawAttrs != null) {
+ this.attrs = Object.keys(node.rawAttrs)
+ .reduce(function (attrs, key) {
+ attrs[key] = _this.getAttr(key);
+ return attrs;
+ }, {});
+ }
+ }
+ /**
+ * Return the value of the attribute or input param.
+ * @param name String: name of attribute or input param.
+ */
+ NodeValueImpl.prototype.getInput = function (name) {
+ return getTensor(name, this.tensorMap, this.context);
+ };
+ /**
+ * Return the value of the attribute or input param.
+ * @param name String: name of attribute or input param.
+ */
+ NodeValueImpl.prototype.getAttr = function (name, defaultValue) {
+ var value = this.node.rawAttrs[name];
+ if (value.tensor != null) {
+ return getTensor(name, this.tensorMap, this.context);
+ }
+ if (value.i != null || value.f != null) {
+ return getNumberParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.s != null) {
+ return getStringParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.b != null) {
+ return getBoolParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.shape != null) {
+ return getTensorShapeParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.type != null) {
+ return getDtypeParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list != null) {
+ if (value.list.i != null || value.list.f != null) {
+ return getNumericArrayParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.s != null) {
+ return getStringArrayParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.shape != null) {
+ return getTensorShapeArrayParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.b != null) {
+ return getBoolArrayParam(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.type != null) {
+ return getDtypeArrayParam(this.node.rawAttrs, name, defaultValue);
+ }
+ }
+ return defaultValue;
+ };
+ return NodeValueImpl;
+ }());
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var EPSILON_FLOAT32 = 1e-7;
+ var EPSILON_FLOAT16 = 1e-4;
+ /**
+ * The interface that defines the kernels that should be implemented when
+ * adding a new backend. New backends don't need to implement every one of the
+ * methods, this can be done gradually (throw an error for unimplemented
+ * methods).
+ */
+ var KernelBackend = /** @class */ (function () {
+ function KernelBackend() {
+ }
+ KernelBackend.prototype.refCount = function (dataId) {
+ return notYetImplemented('refCount');
+ };
+ KernelBackend.prototype.incRef = function (dataId) {
+ return notYetImplemented('incRef');
+ };
+ KernelBackend.prototype.timerAvailable = function () {
+ return true;
+ };
+ KernelBackend.prototype.time = function (f) {
+ return notYetImplemented('time');
+ };
+ KernelBackend.prototype.read = function (dataId) {
+ return notYetImplemented('read');
+ };
+ KernelBackend.prototype.readSync = function (dataId) {
+ return notYetImplemented('readSync');
+ };
+ KernelBackend.prototype.numDataIds = function () {
+ return notYetImplemented('numDataIds');
+ };
+ KernelBackend.prototype.disposeData = function (dataId, force) {
+ return notYetImplemented('disposeData');
+ };
+ KernelBackend.prototype.write = function (values, shape, dtype) {
+ return notYetImplemented('write');
+ };
+ KernelBackend.prototype.move = function (dataId, values, shape, dtype, refCount) {
+ return notYetImplemented('move');
+ };
+ KernelBackend.prototype.memory = function () {
+ return notYetImplemented('memory');
+ };
+ /** Returns the highest precision for floats in bits (e.g. 16 or 32) */
+ KernelBackend.prototype.floatPrecision = function () {
+ return notYetImplemented('floatPrecision');
+ };
+ /** Returns the smallest representable number. */
+ KernelBackend.prototype.epsilon = function () {
+ return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;
+ };
+ KernelBackend.prototype.dispose = function () {
+ return notYetImplemented('dispose');
+ };
+ return KernelBackend;
+ }());
+ function notYetImplemented(kernelName) {
+ throw new Error("'" + kernelName + "' not yet implemented or not found in the registry. " +
+ "This kernel may not be supported by the tfjs backend you have chosen");
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Asserts that the expression is true. Otherwise throws an error with the
+ * provided message.
+ *
+ * ```js
+ * const x = 2;
+ * tf.util.assert(x === 2, 'x is not 2');
+ * ```
+ *
+ * @param expr The expression to assert (as a boolean).
+ * @param msg A function that returns the message to report when throwing an
+ * error. We use a function for performance reasons.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function assert(expr, msg) {
+ if (!expr) {
+ throw new Error(typeof msg === 'string' ? msg : msg());
+ }
+ }
+ function assertShapesMatch(shapeA, shapeB, errorMessagePrefix) {
+ if (errorMessagePrefix === void 0) { errorMessagePrefix = ''; }
+ assert(arraysEqual(shapeA, shapeB), function () { return errorMessagePrefix + (" Shapes " + shapeA + " and " + shapeB + " must match"); });
+ }
+ function assertNonNull(a) {
+ assert(a != null, function () { return "The input to the tensor constructor must be a non-null value."; });
+ }
+ // NOTE: We explicitly type out what T extends instead of any so that
+ // util.flatten on a nested array of number doesn't try to infer T as a
+ // number[][], causing us to explicitly type util.flatten<number>().
+ /**
+ * Flattens an arbitrarily nested array.
+ *
+ * ```js
+ * const a = [[1, 2], [3, 4], [5, [6, [7]]]];
+ * const flat = tf.util.flatten(a);
+ * console.log(flat);
+ * ```
+ *
+ * @param arr The nested array to flatten.
+ * @param result The destination array which holds the elements.
+ * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults
+ * to false.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function flatten(arr, result, skipTypedArray) {
+ if (result === void 0) { result = []; }
+ if (skipTypedArray === void 0) { skipTypedArray = false; }
+ if (result == null) {
+ result = [];
+ }
+ if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {
+ for (var i = 0; i < arr.length; ++i) {
+ flatten(arr[i], result, skipTypedArray);
+ }
+ }
+ else {
+ result.push(arr);
+ }
+ return result;
+ }
+ /**
+ * Returns the size (number of elements) of the tensor given its shape.
+ *
+ * ```js
+ * const shape = [3, 4, 2];
+ * const size = tf.util.sizeFromShape(shape);
+ * console.log(size);
+ * ```
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function sizeFromShape(shape) {
+ if (shape.length === 0) {
+ // Scalar.
+ return 1;
+ }
+ var size = shape[0];
+ for (var i = 1; i < shape.length; i++) {
+ size *= shape[i];
+ }
+ return size;
+ }
+ function arraysEqual(n1, n2) {
+ if (n1 === n2) {
+ return true;
+ }
+ if (n1 == null || n2 == null) {
+ return false;
+ }
+ if (n1.length !== n2.length) {
+ return false;
+ }
+ for (var i = 0; i < n1.length; i++) {
+ if (n1[i] !== n2[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ function isInt(a) {
+ return a % 1 === 0;
+ }
+ function rightPad(a, size) {
+ if (size <= a.length) {
+ return a;
+ }
+ return a + ' '.repeat(size - a.length);
+ }
+ function parseAxisParam(axis, shape) {
+ var rank = shape.length;
+ // Normalize input
+ axis = axis == null ? shape.map(function (s, i) { return i; }) : [].concat(axis);
+ // Check for valid range
+ assert(axis.every(function (ax) { return ax >= -rank && ax < rank; }), function () { return "All values in axis param must be in range [-" + rank + ", " + rank + ") but " +
+ ("got axis " + axis); });
+ // Check for only integers
+ assert(axis.every(function (ax) { return isInt(ax); }), function () { return "All values in axis param must be integers but " +
+ ("got axis " + axis); });
+ // Handle negative axis.
+ return axis.map(function (a) { return a < 0 ? rank + a : a; });
+ }
+ /** Reduces the shape by removing all dimensions of shape 1. */
+ function squeezeShape(shape, axis) {
+ var newShape = [];
+ var keptDims = [];
+ var isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;
+ var axes = (axis == null || isEmptyArray) ?
+ null :
+ parseAxisParam(axis, shape).sort();
+ var j = 0;
+ for (var i = 0; i < shape.length; ++i) {
+ if (axes != null) {
+ if (axes[j] === i && shape[i] !== 1) {
+ throw new Error("Can't squeeze axis " + i + " since its dim '" + shape[i] + "' is not 1");
+ }
+ if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {
+ newShape.push(shape[i]);
+ keptDims.push(i);
+ }
+ if (axes[j] <= i) {
+ j++;
+ }
+ }
+ if (shape[i] !== 1) {
+ newShape.push(shape[i]);
+ keptDims.push(i);
+ }
+ }
+ return { newShape: newShape, keptDims: keptDims };
+ }
+ function getTypedArrayFromDType(dtype, size) {
+ var values = null;
+ if (dtype == null || dtype === 'float32') {
+ values = new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ return values;
+ }
+ function getArrayFromDType(dtype, size) {
+ var values = null;
+ if (dtype == null || dtype === 'float32') {
+ values = new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(size);
+ }
+ else if (dtype === 'string') {
+ values = new Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ return values;
+ }
+ function checkConversionForErrors(vals, dtype) {
+ for (var i = 0; i < vals.length; i++) {
+ var num = vals[i];
+ if (isNaN(num) || !isFinite(num)) {
+ throw Error("A tensor of type " + dtype + " being uploaded contains " + num + ".");
+ }
+ }
+ }
+ /** Returns true if the dtype is valid. */
+ function isValidDtype(dtype) {
+ return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||
+ dtype === 'int32' || dtype === 'string';
+ }
+ function isTypedArray(a) {
+ return a instanceof Float32Array || a instanceof Int32Array ||
+ a instanceof Uint8Array || a instanceof Uint8ClampedArray;
+ }
+ function bytesPerElement(dtype) {
+ if (dtype === 'float32' || dtype === 'int32') {
+ return 4;
+ }
+ else if (dtype === 'complex64') {
+ return 8;
+ }
+ else if (dtype === 'bool') {
+ return 1;
+ }
+ else {
+ throw new Error("Unknown dtype " + dtype);
+ }
+ }
+ /**
+ * Returns the approximate number of bytes allocated in the string array - 2
+ * bytes per character. Computing the exact bytes for a native string in JS is
+ * not possible since it depends on the encoding of the html page that serves
+ * the website.
+ */
+ function bytesFromStringArray(arr) {
+ if (arr == null) {
+ return 0;
+ }
+ var bytes = 0;
+ arr.forEach(function (x) { return bytes += x.length; });
+ return bytes;
+ }
+ /** Returns true if the value is a string. */
+ function isString(value) {
+ return typeof value === 'string' || value instanceof String;
+ }
+ function isBoolean(value) {
+ return typeof value === 'boolean';
+ }
+ function isNumber(value) {
+ return typeof value === 'number';
+ }
+ function inferDtype(values) {
+ if (Array.isArray(values)) {
+ return inferDtype(values[0]);
+ }
+ if (values instanceof Float32Array) {
+ return 'float32';
+ }
+ else if (values instanceof Int32Array
+ || values instanceof Uint8Array
+ || values instanceof Uint8ClampedArray) {
+ return 'int32';
+ }
+ else if (isNumber(values)) {
+ return 'float32';
+ }
+ else if (isString(values)) {
+ return 'string';
+ }
+ else if (isBoolean(values)) {
+ return 'bool';
+ }
+ return 'float32';
+ }
+ function isFunction(f) {
+ return !!(f && f.constructor && f.call && f.apply);
+ }
+ function computeStrides(shape) {
+ var rank = shape.length;
+ if (rank < 2) {
+ return [];
+ }
+ // Last dimension has implicit stride of 1, thus having D-1 (instead of D)
+ // strides.
+ var strides = new Array(rank - 1);
+ strides[rank - 2] = shape[rank - 1];
+ for (var i = rank - 3; i >= 0; --i) {
+ strides[i] = strides[i + 1] * shape[i + 1];
+ }
+ return strides;
+ }
+ function createNestedArray(offset, shape, a, isComplex) {
+ if (isComplex === void 0) { isComplex = false; }
+ var ret = new Array();
+ if (shape.length === 1) {
+ var d = shape[0] * (isComplex ? 2 : 1);
+ for (var i = 0; i < d; i++) {
+ ret[i] = a[offset + i];
+ }
+ }
+ else {
+ var d = shape[0];
+ var rest = shape.slice(1);
+ var len = rest.reduce(function (acc, c) { return acc * c; }) * (isComplex ? 2 : 1);
+ for (var i = 0; i < d; i++) {
+ ret[i] = createNestedArray(offset + i * len, rest, a, isComplex);
+ }
+ }
+ return ret;
+ }
+ // Provide a nested array of TypedArray in given shape.
+ function toNestedArray(shape, a, isComplex) {
+ if (isComplex === void 0) { isComplex = false; }
+ if (shape.length === 0) {
+ // Scalar type should return a single number.
+ return a[0];
+ }
+ var size = shape.reduce(function (acc, c) { return acc * c; }) * (isComplex ? 2 : 1);
+ if (size === 0) {
+ // A tensor with shape zero should be turned into empty list.
+ return [];
+ }
+ if (size !== a.length) {
+ throw new Error("[" + shape + "] does not match the input size " + a.length + (isComplex ? ' for a complex tensor' : '') + ".");
+ }
+ return createNestedArray(0, shape, a, isComplex);
+ }
+ function makeOnesTypedArray(size, dtype) {
+ var array = makeZerosTypedArray(size, dtype);
+ for (var i = 0; i < array.length; i++) {
+ array[i] = 1;
+ }
+ return array;
+ }
+ function makeZerosTypedArray(size, dtype) {
+ if (dtype == null || dtype === 'float32' || dtype === 'complex64') {
+ return new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ return new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ return new Uint8Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ }
+ function assertNonNegativeIntegerDimensions(shape) {
+ shape.forEach(function (dimSize) {
+ assert(Number.isInteger(dimSize) && dimSize >= 0, function () { return "Tensor must have a shape comprised of positive integers but got " +
+ ("shape [" + shape + "]."); });
+ });
+ }
+ /**
+ * This method asserts whether an object is a Promise instance.
+ * @param object
+ */
+ // tslint:disable-next-line: no-any
+ function isPromise(object) {
+ // We chose to not use 'obj instanceOf Promise' for two reasons:
+ // 1. It only reliably works for es6 Promise, not other Promise
+ // implementations.
+ // 2. It doesn't work with framework that uses zone.js. zone.js monkey patch
+ // the async calls, so it is possible the obj (patched) is comparing to a
+ // pre-patched Promise.
+ return object && object.then && typeof object.then === 'function';
+ }
+
+ // Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.
+ var TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';
+ /**
+ * The environment contains evaluated flags as well as the registered platform.
+ * This is always used as a global singleton and can be retrieved with
+ * `tf.env()`.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ var Environment = /** @class */ (function () {
+ // tslint:disable-next-line: no-any
+ function Environment(global) {
+ this.global = global;
+ this.flags = {};
+ this.flagRegistry = {};
+ this.urlFlags = {};
+ // Jasmine spies on this in 'environment_test.ts'
+ this.getQueryParams = getQueryParams;
+ this.populateURLFlags();
+ }
+ Environment.prototype.setPlatform = function (platformName, platform) {
+ if (this.platform != null) {
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.warn("Platform " + this.platformName + " has already been set. " +
+ ("Overwriting the platform with " + platform + "."));
+ }
+ }
+ this.platformName = platformName;
+ this.platform = platform;
+ };
+ Environment.prototype.registerFlag = function (flagName, evaluationFn, setHook) {
+ this.flagRegistry[flagName] = { evaluationFn: evaluationFn, setHook: setHook };
+ // Override the flag value from the URL. This has to happen here because
+ // the environment is initialized before flags get registered.
+ if (this.urlFlags[flagName] != null) {
+ var flagValue = this.urlFlags[flagName];
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.warn("Setting feature override from URL " + flagName + ": " + flagValue + ".");
+ }
+ this.set(flagName, flagValue);
+ }
+ };
+ Environment.prototype.getAsync = function (flagName) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a, _b;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ if (flagName in this.flags) {
+ return [2 /*return*/, this.flags[flagName]];
+ }
+ _a = this.flags;
+ _b = flagName;
+ return [4 /*yield*/, this.evaluateFlag(flagName)];
+ case 1:
+ _a[_b] = _c.sent();
+ return [2 /*return*/, this.flags[flagName]];
+ }
+ });
+ });
+ };
+ Environment.prototype.get = function (flagName) {
+ if (flagName in this.flags) {
+ return this.flags[flagName];
+ }
+ var flagValue = this.evaluateFlag(flagName);
+ if (isPromise(flagValue)) {
+ throw new Error("Flag " + flagName + " cannot be synchronously evaluated. " +
+ "Please use getAsync() instead.");
+ }
+ this.flags[flagName] = flagValue;
+ return this.flags[flagName];
+ };
+ Environment.prototype.getNumber = function (flagName) {
+ return this.get(flagName);
+ };
+ Environment.prototype.getBool = function (flagName) {
+ return this.get(flagName);
+ };
+ Environment.prototype.getFlags = function () {
+ return this.flags;
+ };
+ Object.defineProperty(Environment.prototype, "features", {
+ // For backwards compatibility.
+ get: function () {
+ return this.flags;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Environment.prototype.set = function (flagName, value) {
+ if (this.flagRegistry[flagName] == null) {
+ throw new Error("Cannot set flag " + flagName + " as it has not been registered.");
+ }
+ this.flags[flagName] = value;
+ if (this.flagRegistry[flagName].setHook != null) {
+ this.flagRegistry[flagName].setHook(value);
+ }
+ };
+ Environment.prototype.evaluateFlag = function (flagName) {
+ if (this.flagRegistry[flagName] == null) {
+ throw new Error("Cannot evaluate flag '" + flagName + "': no evaluation function found.");
+ }
+ return this.flagRegistry[flagName].evaluationFn();
+ };
+ Environment.prototype.setFlags = function (flags) {
+ this.flags = Object.assign({}, flags);
+ };
+ Environment.prototype.reset = function () {
+ this.flags = {};
+ this.urlFlags = {};
+ this.populateURLFlags();
+ };
+ Environment.prototype.populateURLFlags = function () {
+ var _this = this;
+ if (typeof this.global === 'undefined' ||
+ typeof this.global.location === 'undefined' ||
+ typeof this.global.location.search === 'undefined') {
+ return;
+ }
+ var urlParams = this.getQueryParams(this.global.location.search);
+ if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {
+ var keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');
+ keyValues.forEach(function (keyValue) {
+ var _a = __read(keyValue.split(':'), 2), key = _a[0], value = _a[1];
+ _this.urlFlags[key] = parseValue(key, value);
+ });
+ }
+ };
+ return Environment;
+ }());
+ function getQueryParams(queryString) {
+ var params = {};
+ queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, function (s) {
+ var t = [];
+ for (var _i = 1; _i < arguments.length; _i++) {
+ t[_i - 1] = arguments[_i];
+ }
+ decodeParam(params, t[0], t[1]);
+ return t.join('=');
+ });
+ return params;
+ }
+ function decodeParam(params, name, value) {
+ params[decodeURIComponent(name)] = decodeURIComponent(value || '');
+ }
+ function parseValue(flagName, value) {
+ value = value.toLowerCase();
+ if (value === 'true' || value === 'false') {
+ return value === 'true';
+ }
+ else if ("" + +value === value) {
+ return +value;
+ }
+ throw new Error("Could not parse value flag value " + value + " for flag " + flagName + ".");
+ }
+ /**
+ * Returns the current environment (a global singleton).
+ *
+ * The environment object contains the evaluated feature values as well as the
+ * active platform.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ function env() {
+ return ENV;
+ }
+ var ENV = null;
+ function setEnvironmentGlobal(environment) {
+ ENV = environment;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Note that the identifier globalNameSpace is scoped to this module, but will
+ // always resolve to the same global object regardless of how the module is
+ // resolved.
+ // tslint:disable-next-line:no-any
+ var globalNameSpace;
+ // tslint:disable-next-line:no-any
+ function getGlobalNamespace() {
+ if (globalNameSpace == null) {
+ // tslint:disable-next-line:no-any
+ var ns = void 0;
+ if (typeof (window) !== 'undefined') {
+ ns = window;
+ }
+ else if (typeof (global) !== 'undefined') {
+ ns = global;
+ }
+ else if (typeof (process) !== 'undefined') {
+ ns = process;
+ }
+ else if (typeof (self) !== 'undefined') {
+ ns = self;
+ }
+ else {
+ throw new Error('Could not find a global object');
+ }
+ globalNameSpace = ns;
+ }
+ return globalNameSpace;
+ }
+ // tslint:disable-next-line:no-any
+ function getGlobalMap() {
+ var ns = getGlobalNamespace();
+ if (ns._tfGlobals == null) {
+ ns._tfGlobals = new Map();
+ }
+ return ns._tfGlobals;
+ }
+ /**
+ * Returns a globally accessible 'singleton' object.
+ *
+ * @param key the name of the object
+ * @param init a function to initialize to initialize this object
+ * the first time it is fetched.
+ */
+ function getGlobal(key, init) {
+ var globalMap = getGlobalMap();
+ if (globalMap.has(key)) {
+ return globalMap.get(key);
+ }
+ else {
+ var singleton = init();
+ globalMap.set(key, singleton);
+ return globalMap.get(key);
+ }
+ }
+
+ var Abs = 'Abs';
+ var Acos = 'Acos';
+ var Acosh = 'Acosh';
+ var Add = 'Add';
+ var AddN = 'AddN';
+ var All = 'All';
+ var Any = 'Any';
+ var ArgMax = 'ArgMax';
+ var ArgMin = 'ArgMin';
+ var Asin = 'Asin';
+ var Asinh = 'Asinh';
+ var Atan = 'Atan';
+ var Atanh = 'Atanh';
+ var Atan2 = 'Atan2';
+ var AvgPool = 'AvgPool';
+ var AvgPool3D = 'AvgPool3D';
+ var BatchMatMul = 'BatchMatMul';
+ var BatchToSpaceND = 'BatchToSpaceND';
+ var Bincount = 'Bincount';
+ var BroadcastArgs = 'BroadcastArgs';
+ var Cast = 'Cast';
+ var Ceil = 'Ceil';
+ var ClipByValue = 'ClipByValue';
+ var Complex = 'Complex';
+ var ComplexAbs = 'ComplexAbs';
+ var Concat = 'Concat';
+ var Conv2D = 'Conv2D';
+ var Conv2DBackpropFilter = 'Conv2DBackpropFilter';
+ var Conv2DBackpropInput = 'Conv2DBackpropInput';
+ var Conv3D = 'Conv3D';
+ var Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';
+ var Cos = 'Cos';
+ var Cosh = 'Cosh';
+ var Cumsum = 'Cumsum';
+ var CropAndResize = 'CropAndResize';
+ var DenseBincount = 'DenseBincount';
+ var DepthToSpace = 'DepthToSpace';
+ var DepthwiseConv2dNative = 'DepthwiseConv2dNative';
+ var DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';
+ var DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';
+ var Diag = 'Diag';
+ var Dilation2D = 'Dilation2D';
+ var RealDiv = 'RealDiv';
+ var Einsum = 'Einsum';
+ var Elu = 'Elu';
+ var Erf = 'Erf';
+ var Equal = 'Equal';
+ var Exp = 'Exp';
+ var ExpandDims = 'ExpandDims';
+ var Expm1 = 'Expm1';
+ var FFT = 'FFT';
+ var Fill = 'Fill';
+ var FlipLeftRight = 'FlipLeftRight';
+ var Floor = 'Floor';
+ var FloorDiv = 'FloorDiv';
+ var FusedBatchNorm = 'FusedBatchNorm';
+ var GatherV2 = 'GatherV2';
+ var GatherNd = 'GatherNd';
+ var Greater = 'Greater';
+ var GreaterEqual = 'GreaterEqual';
+ var Identity = 'Identity';
+ var IFFT = 'IFFT';
+ var Imag = 'Imag';
+ var IsFinite = 'IsFinite';
+ var IsInf = 'IsInf';
+ var IsNan = 'IsNan';
+ var LeakyRelu = 'LeakyRelu';
+ var Less = 'Less';
+ var LessEqual = 'LessEqual';
+ var LinSpace = 'LinSpace';
+ var Log = 'Log';
+ var Log1p = 'Log1p';
+ var LogicalAnd = 'LogicalAnd';
+ var LogicalNot = 'LogicalNot';
+ var LogicalOr = 'LogicalOr';
+ var LRN = 'LRN';
+ var Max = 'Max';
+ var Maximum = 'Maximum';
+ var MaxPool = 'MaxPool';
+ var MaxPool3D = 'MaxPool3D';
+ var MaxPoolWithArgmax = 'MaxPoolWithArgmax';
+ var Mean = 'Mean';
+ var Min = 'Min';
+ var Minimum = 'Minimum';
+ var MirrorPad = 'MirrorPad';
+ var Mod = 'Mod';
+ var Multinomial = 'Multinomial';
+ var Multiply = 'Multiply';
+ var Neg = 'Neg';
+ var NotEqual = 'NotEqual';
+ var NonMaxSuppressionV3 = 'NonMaxSuppressionV3';
+ var NonMaxSuppressionV4 = 'NonMaxSuppressionV4';
+ var NonMaxSuppressionV5 = 'NonMaxSuppressionV5';
+ var OnesLike = 'OnesLike';
+ var OneHot = 'OneHot';
+ var Pack = 'Pack';
+ var PadV2 = 'PadV2';
+ var Pow = 'Pow';
+ var Prelu = 'Prelu';
+ var Prod = 'Prod';
+ var Range = 'Range';
+ var Real = 'Real';
+ var Reciprocal = 'Reciprocal';
+ var Relu = 'Relu';
+ var Reshape = 'Reshape';
+ var ResizeNearestNeighbor = 'ResizeNearestNeighbor';
+ var ResizeBilinear = 'ResizeBilinear';
+ var Relu6 = 'Relu6';
+ var Reverse = 'Reverse';
+ var Round = 'Round';
+ var Rsqrt = 'Rsqrt';
+ var ScatterNd = 'ScatterNd';
+ var Select = 'Select';
+ var Selu = 'Selu';
+ var Slice = 'Slice';
+ var Sin = 'Sin';
+ var Sinh = 'Sinh';
+ var Sign = 'Sign';
+ var Sigmoid = 'Sigmoid';
+ var Softplus = 'Softplus';
+ var Sqrt = 'Sqrt';
+ var Sum = 'Sum';
+ var SpaceToBatchND = 'SpaceToBatchND';
+ var SplitV = 'SplitV';
+ var Softmax = 'Softmax';
+ var SparseFillEmptyRows = 'SparseFillEmptyRows';
+ var SparseReshape = 'SparseReshape';
+ var SparseSegmentMean = 'SparseSegmentMean';
+ var SparseSegmentSum = 'SparseSegmentSum';
+ var SparseToDense = 'SparseToDense';
+ var SquaredDifference = 'SquaredDifference';
+ var StridedSlice = 'StridedSlice';
+ var StringNGrams = 'StringNGrams';
+ var StringSplit = 'StringSplit';
+ var StringToHashBucketFast = 'StringToHashBucketFast';
+ var Sub = 'Sub';
+ var Tan = 'Tan';
+ var Tanh = 'Tanh';
+ var Tile = 'Tile';
+ var TopK = 'TopK';
+ var Transform = 'Transform';
+ var Transpose = 'Transpose';
+ var Unique = 'Unique';
+ var Unpack = 'Unpack';
+ var UnsortedSegmentSum = 'UnsortedSegmentSum';
+ var ZerosLike = 'ZerosLike';
+ /**
+ * TensorFlow.js-only kernels
+ */
+ var Step = 'Step';
+ var RotateWithOffset = 'RotateWithOffset';
+ var _FusedMatMul = '_FusedMatMul';
+ var FusedConv2D = 'FusedConv2D';
+ var FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';
+
+ function warn() {
+ var msg = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ msg[_i] = arguments[_i];
+ }
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.warn.apply(console, __spread(msg));
+ }
+ }
+
+ var kernelRegistry = getGlobal('kernelRegistry', function () { return new Map(); });
+ var gradRegistry = getGlobal('gradRegistry', function () { return new Map(); });
+ /**
+ * Returns the kernel function (code) associated with the provided names.
+ *
+ * @param kernelName The official name of the kernel.
+ * @param backendName The official name of the backend.
+ */
+ function getKernel(kernelName, backendName) {
+ var key = makeKey(kernelName, backendName);
+ return kernelRegistry.get(key);
+ }
+ /**
+ * Returns the registered gradient info associated with the provided kernel.
+ * @param kernelName The official TF kernel name.
+ */
+ function getGradient(kernelName) {
+ return gradRegistry.get(kernelName);
+ }
+ function getKernelsForBackend(backendName) {
+ var it = kernelRegistry.entries();
+ var result = [];
+ while (true) {
+ var _a = it.next(), done = _a.done, value = _a.value;
+ if (done) {
+ break;
+ }
+ var _b = __read(value, 2), key = _b[0], config = _b[1];
+ var _c = __read(key.split('_'), 1), backend = _c[0];
+ if (backend === backendName) {
+ result.push(config);
+ }
+ }
+ return result;
+ }
+ function makeKey(kernelName, backendName) {
+ return backendName + "_" + kernelName;
+ }
+
+ var long = Long$1;
+ /**
+ * wasm optimizations, to do native i64 multiplication and divide
+ */
+ var wasm = null;
+ try {
+ wasm = new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([
+ 0, 97, 115, 109, 1, 0, 0, 0, 1, 13, 2, 96, 0, 1, 127, 96, 4, 127, 127, 127, 127, 1, 127, 3, 7, 6, 0, 1, 1, 1, 1, 1, 6, 6, 1, 127, 1, 65, 0, 11, 7, 50, 6, 3, 109, 117, 108, 0, 1, 5, 100, 105, 118, 95, 115, 0, 2, 5, 100, 105, 118, 95, 117, 0, 3, 5, 114, 101, 109, 95, 115, 0, 4, 5, 114, 101, 109, 95, 117, 0, 5, 8, 103, 101, 116, 95, 104, 105, 103, 104, 0, 0, 10, 191, 1, 6, 4, 0, 35, 0, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 126, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 127, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 128, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 129, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 130, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11
+ ])), {}).exports;
+ }
+ catch (e) {
+ // no wasm support :(
+ }
+ /**
+ * Constructs a 64 bit two's-complement integer, given its low and high 32 bit values as *signed* integers.
+ * See the from* functions below for more convenient ways of constructing Longs.
+ * @exports Long
+ * @class A Long class for representing a 64 bit two's-complement integer value.
+ * @param {number} low The low (signed) 32 bits of the long
+ * @param {number} high The high (signed) 32 bits of the long
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @constructor
+ */
+ function Long$1(low, high, unsigned) {
+ /**
+ * The low 32 bits as a signed value.
+ * @type {number}
+ */
+ this.low = low | 0;
+ /**
+ * The high 32 bits as a signed value.
+ * @type {number}
+ */
+ this.high = high | 0;
+ /**
+ * Whether unsigned or not.
+ * @type {boolean}
+ */
+ this.unsigned = !!unsigned;
+ }
+ // The internal representation of a long is the two given signed, 32-bit values.
+ // We use 32-bit pieces because these are the size of integers on which
+ // Javascript performs bit-operations. For operations like addition and
+ // multiplication, we split each number into 16 bit pieces, which can easily be
+ // multiplied within Javascript's floating-point representation without overflow
+ // or change in sign.
+ //
+ // In the algorithms below, we frequently reduce the negative case to the
+ // positive case by negating the input(s) and then post-processing the result.
+ // Note that we must ALWAYS check specially whether those values are MIN_VALUE
+ // (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as
+ // a positive number, it overflows back into a negative). Not handling this
+ // case would often result in infinite recursion.
+ //
+ // Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the from*
+ // methods on which they depend.
+ /**
+ * An indicator used to reliably determine if an object is a Long or not.
+ * @type {boolean}
+ * @const
+ * @private
+ */
+ Long$1.prototype.__isLong__;
+ Object.defineProperty(Long$1.prototype, "__isLong__", { value: true });
+ /**
+ * @function
+ * @param {*} obj Object
+ * @returns {boolean}
+ * @inner
+ */
+ function isLong(obj) {
+ return (obj && obj["__isLong__"]) === true;
+ }
+ /**
+ * Tests if the specified object is a Long.
+ * @function
+ * @param {*} obj Object
+ * @returns {boolean}
+ */
+ Long$1.isLong = isLong;
+ /**
+ * A cache of the Long representations of small integer values.
+ * @type {!Object}
+ * @inner
+ */
+ var INT_CACHE = {};
+ /**
+ * A cache of the Long representations of small unsigned integer values.
+ * @type {!Object}
+ * @inner
+ */
+ var UINT_CACHE = {};
+ /**
+ * @param {number} value
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromInt(value, unsigned) {
+ var obj, cachedObj, cache;
+ if (unsigned) {
+ value >>>= 0;
+ if (cache = (0 <= value && value < 256)) {
+ cachedObj = UINT_CACHE[value];
+ if (cachedObj)
+ return cachedObj;
+ }
+ obj = fromBits(value, (value | 0) < 0 ? -1 : 0, true);
+ if (cache)
+ UINT_CACHE[value] = obj;
+ return obj;
+ }
+ else {
+ value |= 0;
+ if (cache = (-128 <= value && value < 128)) {
+ cachedObj = INT_CACHE[value];
+ if (cachedObj)
+ return cachedObj;
+ }
+ obj = fromBits(value, value < 0 ? -1 : 0, false);
+ if (cache)
+ INT_CACHE[value] = obj;
+ return obj;
+ }
+ }
+ /**
+ * Returns a Long representing the given 32 bit integer value.
+ * @function
+ * @param {number} value The 32 bit integer in question
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromInt = fromInt;
+ /**
+ * @param {number} value
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromNumber(value, unsigned) {
+ if (isNaN(value))
+ return unsigned ? UZERO : ZERO;
+ if (unsigned) {
+ if (value < 0)
+ return UZERO;
+ if (value >= TWO_PWR_64_DBL)
+ return MAX_UNSIGNED_VALUE;
+ }
+ else {
+ if (value <= -TWO_PWR_63_DBL)
+ return MIN_VALUE;
+ if (value + 1 >= TWO_PWR_63_DBL)
+ return MAX_VALUE;
+ }
+ if (value < 0)
+ return fromNumber(-value, unsigned).neg();
+ return fromBits((value % TWO_PWR_32_DBL) | 0, (value / TWO_PWR_32_DBL) | 0, unsigned);
+ }
+ /**
+ * Returns a Long representing the given value, provided that it is a finite number. Otherwise, zero is returned.
+ * @function
+ * @param {number} value The number in question
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromNumber = fromNumber;
+ /**
+ * @param {number} lowBits
+ * @param {number} highBits
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromBits(lowBits, highBits, unsigned) {
+ return new Long$1(lowBits, highBits, unsigned);
+ }
+ /**
+ * Returns a Long representing the 64 bit integer that comes by concatenating the given low and high bits. Each is
+ * assumed to use 32 bits.
+ * @function
+ * @param {number} lowBits The low 32 bits
+ * @param {number} highBits The high 32 bits
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromBits = fromBits;
+ /**
+ * @function
+ * @param {number} base
+ * @param {number} exponent
+ * @returns {number}
+ * @inner
+ */
+ var pow_dbl = Math.pow; // Used 4 times (4*8 to 15+4)
+ /**
+ * @param {string} str
+ * @param {(boolean|number)=} unsigned
+ * @param {number=} radix
+ * @returns {!Long}
+ * @inner
+ */
+ function fromString(str, unsigned, radix) {
+ if (str.length === 0)
+ throw Error('empty string');
+ if (str === "NaN" || str === "Infinity" || str === "+Infinity" || str === "-Infinity")
+ return ZERO;
+ if (typeof unsigned === 'number') {
+ // For goog.math.long compatibility
+ radix = unsigned,
+ unsigned = false;
+ }
+ else {
+ unsigned = !!unsigned;
+ }
+ radix = radix || 10;
+ if (radix < 2 || 36 < radix)
+ throw RangeError('radix');
+ var p;
+ if ((p = str.indexOf('-')) > 0)
+ throw Error('interior hyphen');
+ else if (p === 0) {
+ return fromString(str.substring(1), unsigned, radix).neg();
+ }
+ // Do several (8) digits each time through the loop, so as to
+ // minimize the calls to the very expensive emulated div.
+ var radixToPower = fromNumber(pow_dbl(radix, 8));
+ var result = ZERO;
+ for (var i = 0; i < str.length; i += 8) {
+ var size = Math.min(8, str.length - i), value = parseInt(str.substring(i, i + size), radix);
+ if (size < 8) {
+ var power = fromNumber(pow_dbl(radix, size));
+ result = result.mul(power).add(fromNumber(value));
+ }
+ else {
+ result = result.mul(radixToPower);
+ result = result.add(fromNumber(value));
+ }
+ }
+ result.unsigned = unsigned;
+ return result;
+ }
+ /**
+ * Returns a Long representation of the given string, written using the specified radix.
+ * @function
+ * @param {string} str The textual representation of the Long
+ * @param {(boolean|number)=} unsigned Whether unsigned or not, defaults to signed
+ * @param {number=} radix The radix in which the text is written (2-36), defaults to 10
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromString = fromString;
+ /**
+ * @function
+ * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromValue(val, unsigned) {
+ if (typeof val === 'number')
+ return fromNumber(val, unsigned);
+ if (typeof val === 'string')
+ return fromString(val, unsigned);
+ // Throws for non-objects, converts non-instanceof Long:
+ return fromBits(val.low, val.high, typeof unsigned === 'boolean' ? unsigned : val.unsigned);
+ }
+ /**
+ * Converts the specified value to a Long using the appropriate from* function for its type.
+ * @function
+ * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val Value
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long}
+ */
+ Long$1.fromValue = fromValue;
+ // NOTE: the compiler should inline these constant values below and then remove these variables, so there should be
+ // no runtime penalty for these.
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_16_DBL = 1 << 16;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_24_DBL = 1 << 24;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_63_DBL = TWO_PWR_64_DBL / 2;
+ /**
+ * @type {!Long}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_24 = fromInt(TWO_PWR_24_DBL);
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var ZERO = fromInt(0);
+ /**
+ * Signed zero.
+ * @type {!Long}
+ */
+ Long$1.ZERO = ZERO;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var UZERO = fromInt(0, true);
+ /**
+ * Unsigned zero.
+ * @type {!Long}
+ */
+ Long$1.UZERO = UZERO;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var ONE = fromInt(1);
+ /**
+ * Signed one.
+ * @type {!Long}
+ */
+ Long$1.ONE = ONE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var UONE = fromInt(1, true);
+ /**
+ * Unsigned one.
+ * @type {!Long}
+ */
+ Long$1.UONE = UONE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var NEG_ONE = fromInt(-1);
+ /**
+ * Signed negative one.
+ * @type {!Long}
+ */
+ Long$1.NEG_ONE = NEG_ONE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var MAX_VALUE = fromBits(0xFFFFFFFF | 0, 0x7FFFFFFF | 0, false);
+ /**
+ * Maximum signed value.
+ * @type {!Long}
+ */
+ Long$1.MAX_VALUE = MAX_VALUE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var MAX_UNSIGNED_VALUE = fromBits(0xFFFFFFFF | 0, 0xFFFFFFFF | 0, true);
+ /**
+ * Maximum unsigned value.
+ * @type {!Long}
+ */
+ Long$1.MAX_UNSIGNED_VALUE = MAX_UNSIGNED_VALUE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var MIN_VALUE = fromBits(0, 0x80000000 | 0, false);
+ /**
+ * Minimum signed value.
+ * @type {!Long}
+ */
+ Long$1.MIN_VALUE = MIN_VALUE;
+ /**
+ * @alias Long.prototype
+ * @inner
+ */
+ var LongPrototype = Long$1.prototype;
+ /**
+ * Converts the Long to a 32 bit integer, assuming it is a 32 bit integer.
+ * @returns {number}
+ */
+ LongPrototype.toInt = function toInt() {
+ return this.unsigned ? this.low >>> 0 : this.low;
+ };
+ /**
+ * Converts the Long to a the nearest floating-point representation of this value (double, 53 bit mantissa).
+ * @returns {number}
+ */
+ LongPrototype.toNumber = function toNumber() {
+ if (this.unsigned)
+ return ((this.high >>> 0) * TWO_PWR_32_DBL) + (this.low >>> 0);
+ return this.high * TWO_PWR_32_DBL + (this.low >>> 0);
+ };
+ /**
+ * Converts the Long to a string written in the specified radix.
+ * @param {number=} radix Radix (2-36), defaults to 10
+ * @returns {string}
+ * @override
+ * @throws {RangeError} If `radix` is out of range
+ */
+ LongPrototype.toString = function toString(radix) {
+ radix = radix || 10;
+ if (radix < 2 || 36 < radix)
+ throw RangeError('radix');
+ if (this.isZero())
+ return '0';
+ if (this.isNegative()) { // Unsigned Longs are never negative
+ if (this.eq(MIN_VALUE)) {
+ // We need to change the Long value before it can be negated, so we remove
+ // the bottom-most digit in this base and then recurse to do the rest.
+ var radixLong = fromNumber(radix), div = this.div(radixLong), rem1 = div.mul(radixLong).sub(this);
+ return div.toString(radix) + rem1.toInt().toString(radix);
+ }
+ else
+ return '-' + this.neg().toString(radix);
+ }
+ // Do several (6) digits each time through the loop, so as to
+ // minimize the calls to the very expensive emulated div.
+ var radixToPower = fromNumber(pow_dbl(radix, 6), this.unsigned), rem = this;
+ var result = '';
+ while (true) {
+ var remDiv = rem.div(radixToPower), intval = rem.sub(remDiv.mul(radixToPower)).toInt() >>> 0, digits = intval.toString(radix);
+ rem = remDiv;
+ if (rem.isZero())
+ return digits + result;
+ else {
+ while (digits.length < 6)
+ digits = '0' + digits;
+ result = '' + digits + result;
+ }
+ }
+ };
+ /**
+ * Gets the high 32 bits as a signed integer.
+ * @returns {number} Signed high bits
+ */
+ LongPrototype.getHighBits = function getHighBits() {
+ return this.high;
+ };
+ /**
+ * Gets the high 32 bits as an unsigned integer.
+ * @returns {number} Unsigned high bits
+ */
+ LongPrototype.getHighBitsUnsigned = function getHighBitsUnsigned() {
+ return this.high >>> 0;
+ };
+ /**
+ * Gets the low 32 bits as a signed integer.
+ * @returns {number} Signed low bits
+ */
+ LongPrototype.getLowBits = function getLowBits() {
+ return this.low;
+ };
+ /**
+ * Gets the low 32 bits as an unsigned integer.
+ * @returns {number} Unsigned low bits
+ */
+ LongPrototype.getLowBitsUnsigned = function getLowBitsUnsigned() {
+ return this.low >>> 0;
+ };
+ /**
+ * Gets the number of bits needed to represent the absolute value of this Long.
+ * @returns {number}
+ */
+ LongPrototype.getNumBitsAbs = function getNumBitsAbs() {
+ if (this.isNegative()) // Unsigned Longs are never negative
+ return this.eq(MIN_VALUE) ? 64 : this.neg().getNumBitsAbs();
+ var val = this.high != 0 ? this.high : this.low;
+ for (var bit = 31; bit > 0; bit--)
+ if ((val & (1 << bit)) != 0)
+ break;
+ return this.high != 0 ? bit + 33 : bit + 1;
+ };
+ /**
+ * Tests if this Long's value equals zero.
+ * @returns {boolean}
+ */
+ LongPrototype.isZero = function isZero() {
+ return this.high === 0 && this.low === 0;
+ };
+ /**
+ * Tests if this Long's value equals zero. This is an alias of {@link Long#isZero}.
+ * @returns {boolean}
+ */
+ LongPrototype.eqz = LongPrototype.isZero;
+ /**
+ * Tests if this Long's value is negative.
+ * @returns {boolean}
+ */
+ LongPrototype.isNegative = function isNegative() {
+ return !this.unsigned && this.high < 0;
+ };
+ /**
+ * Tests if this Long's value is positive.
+ * @returns {boolean}
+ */
+ LongPrototype.isPositive = function isPositive() {
+ return this.unsigned || this.high >= 0;
+ };
+ /**
+ * Tests if this Long's value is odd.
+ * @returns {boolean}
+ */
+ LongPrototype.isOdd = function isOdd() {
+ return (this.low & 1) === 1;
+ };
+ /**
+ * Tests if this Long's value is even.
+ * @returns {boolean}
+ */
+ LongPrototype.isEven = function isEven() {
+ return (this.low & 1) === 0;
+ };
+ /**
+ * Tests if this Long's value equals the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.equals = function equals(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ if (this.unsigned !== other.unsigned && (this.high >>> 31) === 1 && (other.high >>> 31) === 1)
+ return false;
+ return this.high === other.high && this.low === other.low;
+ };
+ /**
+ * Tests if this Long's value equals the specified's. This is an alias of {@link Long#equals}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.eq = LongPrototype.equals;
+ /**
+ * Tests if this Long's value differs from the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.notEquals = function notEquals(other) {
+ return !this.eq(/* validates */ other);
+ };
+ /**
+ * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.neq = LongPrototype.notEquals;
+ /**
+ * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.ne = LongPrototype.notEquals;
+ /**
+ * Tests if this Long's value is less than the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lessThan = function lessThan(other) {
+ return this.comp(/* validates */ other) < 0;
+ };
+ /**
+ * Tests if this Long's value is less than the specified's. This is an alias of {@link Long#lessThan}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lt = LongPrototype.lessThan;
+ /**
+ * Tests if this Long's value is less than or equal the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lessThanOrEqual = function lessThanOrEqual(other) {
+ return this.comp(/* validates */ other) <= 0;
+ };
+ /**
+ * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lte = LongPrototype.lessThanOrEqual;
+ /**
+ * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.le = LongPrototype.lessThanOrEqual;
+ /**
+ * Tests if this Long's value is greater than the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.greaterThan = function greaterThan(other) {
+ return this.comp(/* validates */ other) > 0;
+ };
+ /**
+ * Tests if this Long's value is greater than the specified's. This is an alias of {@link Long#greaterThan}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.gt = LongPrototype.greaterThan;
+ /**
+ * Tests if this Long's value is greater than or equal the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.greaterThanOrEqual = function greaterThanOrEqual(other) {
+ return this.comp(/* validates */ other) >= 0;
+ };
+ /**
+ * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.gte = LongPrototype.greaterThanOrEqual;
+ /**
+ * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.ge = LongPrototype.greaterThanOrEqual;
+ /**
+ * Compares this Long's value with the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {number} 0 if they are the same, 1 if the this is greater and -1
+ * if the given one is greater
+ */
+ LongPrototype.compare = function compare(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ if (this.eq(other))
+ return 0;
+ var thisNeg = this.isNegative(), otherNeg = other.isNegative();
+ if (thisNeg && !otherNeg)
+ return -1;
+ if (!thisNeg && otherNeg)
+ return 1;
+ // At this point the sign bits are the same
+ if (!this.unsigned)
+ return this.sub(other).isNegative() ? -1 : 1;
+ // Both are positive if at least one is unsigned
+ return (other.high >>> 0) > (this.high >>> 0) || (other.high === this.high && (other.low >>> 0) > (this.low >>> 0)) ? -1 : 1;
+ };
+ /**
+ * Compares this Long's value with the specified's. This is an alias of {@link Long#compare}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {number} 0 if they are the same, 1 if the this is greater and -1
+ * if the given one is greater
+ */
+ LongPrototype.comp = LongPrototype.compare;
+ /**
+ * Negates this Long's value.
+ * @returns {!Long} Negated Long
+ */
+ LongPrototype.negate = function negate() {
+ if (!this.unsigned && this.eq(MIN_VALUE))
+ return MIN_VALUE;
+ return this.not().add(ONE);
+ };
+ /**
+ * Negates this Long's value. This is an alias of {@link Long#negate}.
+ * @function
+ * @returns {!Long} Negated Long
+ */
+ LongPrototype.neg = LongPrototype.negate;
+ /**
+ * Returns the sum of this and the specified Long.
+ * @param {!Long|number|string} addend Addend
+ * @returns {!Long} Sum
+ */
+ LongPrototype.add = function add(addend) {
+ if (!isLong(addend))
+ addend = fromValue(addend);
+ // Divide each number into 4 chunks of 16 bits, and then sum the chunks.
+ var a48 = this.high >>> 16;
+ var a32 = this.high & 0xFFFF;
+ var a16 = this.low >>> 16;
+ var a00 = this.low & 0xFFFF;
+ var b48 = addend.high >>> 16;
+ var b32 = addend.high & 0xFFFF;
+ var b16 = addend.low >>> 16;
+ var b00 = addend.low & 0xFFFF;
+ var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
+ c00 += a00 + b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 + b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 + b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 + b48;
+ c48 &= 0xFFFF;
+ return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned);
+ };
+ /**
+ * Returns the difference of this and the specified Long.
+ * @param {!Long|number|string} subtrahend Subtrahend
+ * @returns {!Long} Difference
+ */
+ LongPrototype.subtract = function subtract(subtrahend) {
+ if (!isLong(subtrahend))
+ subtrahend = fromValue(subtrahend);
+ return this.add(subtrahend.neg());
+ };
+ /**
+ * Returns the difference of this and the specified Long. This is an alias of {@link Long#subtract}.
+ * @function
+ * @param {!Long|number|string} subtrahend Subtrahend
+ * @returns {!Long} Difference
+ */
+ LongPrototype.sub = LongPrototype.subtract;
+ /**
+ * Returns the product of this and the specified Long.
+ * @param {!Long|number|string} multiplier Multiplier
+ * @returns {!Long} Product
+ */
+ LongPrototype.multiply = function multiply(multiplier) {
+ if (this.isZero())
+ return ZERO;
+ if (!isLong(multiplier))
+ multiplier = fromValue(multiplier);
+ // use wasm support if present
+ if (wasm) {
+ var low = wasm.mul(this.low, this.high, multiplier.low, multiplier.high);
+ return fromBits(low, wasm.get_high(), this.unsigned);
+ }
+ if (multiplier.isZero())
+ return ZERO;
+ if (this.eq(MIN_VALUE))
+ return multiplier.isOdd() ? MIN_VALUE : ZERO;
+ if (multiplier.eq(MIN_VALUE))
+ return this.isOdd() ? MIN_VALUE : ZERO;
+ if (this.isNegative()) {
+ if (multiplier.isNegative())
+ return this.neg().mul(multiplier.neg());
+ else
+ return this.neg().mul(multiplier).neg();
+ }
+ else if (multiplier.isNegative())
+ return this.mul(multiplier.neg()).neg();
+ // If both longs are small, use float multiplication
+ if (this.lt(TWO_PWR_24) && multiplier.lt(TWO_PWR_24))
+ return fromNumber(this.toNumber() * multiplier.toNumber(), this.unsigned);
+ // Divide each long into 4 chunks of 16 bits, and then add up 4x4 products.
+ // We can skip products that would overflow.
+ var a48 = this.high >>> 16;
+ var a32 = this.high & 0xFFFF;
+ var a16 = this.low >>> 16;
+ var a00 = this.low & 0xFFFF;
+ var b48 = multiplier.high >>> 16;
+ var b32 = multiplier.high & 0xFFFF;
+ var b16 = multiplier.low >>> 16;
+ var b00 = multiplier.low & 0xFFFF;
+ var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
+ c00 += a00 * b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 * b00;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c16 += a00 * b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 * b00;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a16 * b16;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a00 * b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
+ c48 &= 0xFFFF;
+ return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned);
+ };
+ /**
+ * Returns the product of this and the specified Long. This is an alias of {@link Long#multiply}.
+ * @function
+ * @param {!Long|number|string} multiplier Multiplier
+ * @returns {!Long} Product
+ */
+ LongPrototype.mul = LongPrototype.multiply;
+ /**
+ * Returns this Long divided by the specified. The result is signed if this Long is signed or
+ * unsigned if this Long is unsigned.
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Quotient
+ */
+ LongPrototype.divide = function divide(divisor) {
+ if (!isLong(divisor))
+ divisor = fromValue(divisor);
+ if (divisor.isZero())
+ throw Error('division by zero');
+ // use wasm support if present
+ if (wasm) {
+ // guard against signed division overflow: the largest
+ // negative number / -1 would be 1 larger than the largest
+ // positive number, due to two's complement.
+ if (!this.unsigned &&
+ this.high === -0x80000000 &&
+ divisor.low === -1 && divisor.high === -1) {
+ // be consistent with non-wasm code path
+ return this;
+ }
+ var low = (this.unsigned ? wasm.div_u : wasm.div_s)(this.low, this.high, divisor.low, divisor.high);
+ return fromBits(low, wasm.get_high(), this.unsigned);
+ }
+ if (this.isZero())
+ return this.unsigned ? UZERO : ZERO;
+ var approx, rem, res;
+ if (!this.unsigned) {
+ // This section is only relevant for signed longs and is derived from the
+ // closure library as a whole.
+ if (this.eq(MIN_VALUE)) {
+ if (divisor.eq(ONE) || divisor.eq(NEG_ONE))
+ return MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE
+ else if (divisor.eq(MIN_VALUE))
+ return ONE;
+ else {
+ // At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.
+ var halfThis = this.shr(1);
+ approx = halfThis.div(divisor).shl(1);
+ if (approx.eq(ZERO)) {
+ return divisor.isNegative() ? ONE : NEG_ONE;
+ }
+ else {
+ rem = this.sub(divisor.mul(approx));
+ res = approx.add(rem.div(divisor));
+ return res;
+ }
+ }
+ }
+ else if (divisor.eq(MIN_VALUE))
+ return this.unsigned ? UZERO : ZERO;
+ if (this.isNegative()) {
+ if (divisor.isNegative())
+ return this.neg().div(divisor.neg());
+ return this.neg().div(divisor).neg();
+ }
+ else if (divisor.isNegative())
+ return this.div(divisor.neg()).neg();
+ res = ZERO;
+ }
+ else {
+ // The algorithm below has not been made for unsigned longs. It's therefore
+ // required to take special care of the MSB prior to running it.
+ if (!divisor.unsigned)
+ divisor = divisor.toUnsigned();
+ if (divisor.gt(this))
+ return UZERO;
+ if (divisor.gt(this.shru(1))) // 15 >>> 1 = 7 ; with divisor = 8 ; true
+ return UONE;
+ res = UZERO;
+ }
+ // Repeat the following until the remainder is less than other: find a
+ // floating-point that approximates remainder / other *from below*, add this
+ // into the result, and subtract it from the remainder. It is critical that
+ // the approximate value is less than or equal to the real value so that the
+ // remainder never becomes negative.
+ rem = this;
+ while (rem.gte(divisor)) {
+ // Approximate the result of division. This may be a little greater or
+ // smaller than the actual value.
+ approx = Math.max(1, Math.floor(rem.toNumber() / divisor.toNumber()));
+ // We will tweak the approximate result by changing it in the 48-th digit or
+ // the smallest non-fractional digit, whichever is larger.
+ var log2 = Math.ceil(Math.log(approx) / Math.LN2), delta = (log2 <= 48) ? 1 : pow_dbl(2, log2 - 48),
+ // Decrease the approximation until it is smaller than the remainder. Note
+ // that if it is too large, the product overflows and is negative.
+ approxRes = fromNumber(approx), approxRem = approxRes.mul(divisor);
+ while (approxRem.isNegative() || approxRem.gt(rem)) {
+ approx -= delta;
+ approxRes = fromNumber(approx, this.unsigned);
+ approxRem = approxRes.mul(divisor);
+ }
+ // We know the answer can't be zero... and actually, zero would cause
+ // infinite recursion since we would make no progress.
+ if (approxRes.isZero())
+ approxRes = ONE;
+ res = res.add(approxRes);
+ rem = rem.sub(approxRem);
+ }
+ return res;
+ };
+ /**
+ * Returns this Long divided by the specified. This is an alias of {@link Long#divide}.
+ * @function
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Quotient
+ */
+ LongPrototype.div = LongPrototype.divide;
+ /**
+ * Returns this Long modulo the specified.
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Remainder
+ */
+ LongPrototype.modulo = function modulo(divisor) {
+ if (!isLong(divisor))
+ divisor = fromValue(divisor);
+ // use wasm support if present
+ if (wasm) {
+ var low = (this.unsigned ? wasm.rem_u : wasm.rem_s)(this.low, this.high, divisor.low, divisor.high);
+ return fromBits(low, wasm.get_high(), this.unsigned);
+ }
+ return this.sub(this.div(divisor).mul(divisor));
+ };
+ /**
+ * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}.
+ * @function
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Remainder
+ */
+ LongPrototype.mod = LongPrototype.modulo;
+ /**
+ * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}.
+ * @function
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Remainder
+ */
+ LongPrototype.rem = LongPrototype.modulo;
+ /**
+ * Returns the bitwise NOT of this Long.
+ * @returns {!Long}
+ */
+ LongPrototype.not = function not() {
+ return fromBits(~this.low, ~this.high, this.unsigned);
+ };
+ /**
+ * Returns the bitwise AND of this Long and the specified.
+ * @param {!Long|number|string} other Other Long
+ * @returns {!Long}
+ */
+ LongPrototype.and = function and(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ return fromBits(this.low & other.low, this.high & other.high, this.unsigned);
+ };
+ /**
+ * Returns the bitwise OR of this Long and the specified.
+ * @param {!Long|number|string} other Other Long
+ * @returns {!Long}
+ */
+ LongPrototype.or = function or(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ return fromBits(this.low | other.low, this.high | other.high, this.unsigned);
+ };
+ /**
+ * Returns the bitwise XOR of this Long and the given one.
+ * @param {!Long|number|string} other Other Long
+ * @returns {!Long}
+ */
+ LongPrototype.xor = function xor(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ return fromBits(this.low ^ other.low, this.high ^ other.high, this.unsigned);
+ };
+ /**
+ * Returns this Long with bits shifted to the left by the given amount.
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shiftLeft = function shiftLeft(numBits) {
+ if (isLong(numBits))
+ numBits = numBits.toInt();
+ if ((numBits &= 63) === 0)
+ return this;
+ else if (numBits < 32)
+ return fromBits(this.low << numBits, (this.high << numBits) | (this.low >>> (32 - numBits)), this.unsigned);
+ else
+ return fromBits(0, this.low << (numBits - 32), this.unsigned);
+ };
+ /**
+ * Returns this Long with bits shifted to the left by the given amount. This is an alias of {@link Long#shiftLeft}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shl = LongPrototype.shiftLeft;
+ /**
+ * Returns this Long with bits arithmetically shifted to the right by the given amount.
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shiftRight = function shiftRight(numBits) {
+ if (isLong(numBits))
+ numBits = numBits.toInt();
+ if ((numBits &= 63) === 0)
+ return this;
+ else if (numBits < 32)
+ return fromBits((this.low >>> numBits) | (this.high << (32 - numBits)), this.high >> numBits, this.unsigned);
+ else
+ return fromBits(this.high >> (numBits - 32), this.high >= 0 ? 0 : -1, this.unsigned);
+ };
+ /**
+ * Returns this Long with bits arithmetically shifted to the right by the given amount. This is an alias of {@link Long#shiftRight}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shr = LongPrototype.shiftRight;
+ /**
+ * Returns this Long with bits logically shifted to the right by the given amount.
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shiftRightUnsigned = function shiftRightUnsigned(numBits) {
+ if (isLong(numBits))
+ numBits = numBits.toInt();
+ numBits &= 63;
+ if (numBits === 0)
+ return this;
+ else {
+ var high = this.high;
+ if (numBits < 32) {
+ var low = this.low;
+ return fromBits((low >>> numBits) | (high << (32 - numBits)), high >>> numBits, this.unsigned);
+ }
+ else if (numBits === 32)
+ return fromBits(high, 0, this.unsigned);
+ else
+ return fromBits(high >>> (numBits - 32), 0, this.unsigned);
+ }
+ };
+ /**
+ * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shru = LongPrototype.shiftRightUnsigned;
+ /**
+ * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shr_u = LongPrototype.shiftRightUnsigned;
+ /**
+ * Converts this Long to signed.
+ * @returns {!Long} Signed long
+ */
+ LongPrototype.toSigned = function toSigned() {
+ if (!this.unsigned)
+ return this;
+ return fromBits(this.low, this.high, false);
+ };
+ /**
+ * Converts this Long to unsigned.
+ * @returns {!Long} Unsigned long
+ */
+ LongPrototype.toUnsigned = function toUnsigned() {
+ if (this.unsigned)
+ return this;
+ return fromBits(this.low, this.high, true);
+ };
+ /**
+ * Converts this Long to its byte representation.
+ * @param {boolean=} le Whether little or big endian, defaults to big endian
+ * @returns {!Array.<number>} Byte representation
+ */
+ LongPrototype.toBytes = function toBytes(le) {
+ return le ? this.toBytesLE() : this.toBytesBE();
+ };
+ /**
+ * Converts this Long to its little endian byte representation.
+ * @returns {!Array.<number>} Little endian byte representation
+ */
+ LongPrototype.toBytesLE = function toBytesLE() {
+ var hi = this.high, lo = this.low;
+ return [
+ lo & 0xff,
+ lo >>> 8 & 0xff,
+ lo >>> 16 & 0xff,
+ lo >>> 24,
+ hi & 0xff,
+ hi >>> 8 & 0xff,
+ hi >>> 16 & 0xff,
+ hi >>> 24
+ ];
+ };
+ /**
+ * Converts this Long to its big endian byte representation.
+ * @returns {!Array.<number>} Big endian byte representation
+ */
+ LongPrototype.toBytesBE = function toBytesBE() {
+ var hi = this.high, lo = this.low;
+ return [
+ hi >>> 24,
+ hi >>> 16 & 0xff,
+ hi >>> 8 & 0xff,
+ hi & 0xff,
+ lo >>> 24,
+ lo >>> 16 & 0xff,
+ lo >>> 8 & 0xff,
+ lo & 0xff
+ ];
+ };
+ /**
+ * Creates a Long from its byte representation.
+ * @param {!Array.<number>} bytes Byte representation
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @param {boolean=} le Whether little or big endian, defaults to big endian
+ * @returns {Long} The corresponding Long value
+ */
+ Long$1.fromBytes = function fromBytes(bytes, unsigned, le) {
+ return le ? Long$1.fromBytesLE(bytes, unsigned) : Long$1.fromBytesBE(bytes, unsigned);
+ };
+ /**
+ * Creates a Long from its little endian byte representation.
+ * @param {!Array.<number>} bytes Little endian byte representation
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {Long} The corresponding Long value
+ */
+ Long$1.fromBytesLE = function fromBytesLE(bytes, unsigned) {
+ return new Long$1(bytes[0] |
+ bytes[1] << 8 |
+ bytes[2] << 16 |
+ bytes[3] << 24, bytes[4] |
+ bytes[5] << 8 |
+ bytes[6] << 16 |
+ bytes[7] << 24, unsigned);
+ };
+ /**
+ * Creates a Long from its big endian byte representation.
+ * @param {!Array.<number>} bytes Big endian byte representation
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {Long} The corresponding Long value
+ */
+ Long$1.fromBytesBE = function fromBytesBE(bytes, unsigned) {
+ return new Long$1(bytes[4] << 24 |
+ bytes[5] << 16 |
+ bytes[6] << 8 |
+ bytes[7], bytes[0] << 24 |
+ bytes[1] << 16 |
+ bytes[2] << 8 |
+ bytes[3], unsigned);
+ };
+
+ var LongExports = /*#__PURE__*/Object.assign(/*#__PURE__*/Object.create(null), long, {
+ 'default': long
+ });
+
+ // tslint:disable-next-line
+ var Long =
+ // tslint:disable-next-line
+ long || LongExports;
+ function hexToLong(hex) {
+ return Long.fromString(hex, true, 16);
+ }
+ // Some primes between 2^63 and 2^64 for various uses.
+ // Hex 0xc3a5c85c97cb3127
+ hexToLong('c3a5c85c97cb3127');
+ // Hex 0xb492b66fbe98f273
+ hexToLong('b492b66fbe98f273');
+ // Hex 0x9ae16a3b2f90404f
+ hexToLong('9ae16a3b2f90404f');
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function noConversionNeeded(a, dtype) {
+ return (a instanceof Float32Array && dtype === 'float32') ||
+ (a instanceof Int32Array && dtype === 'int32') ||
+ (a instanceof Uint8Array && dtype === 'bool');
+ }
+ function toTypedArray(a, dtype) {
+ if (dtype === 'string') {
+ throw new Error('Cannot convert a string[] to a TypedArray');
+ }
+ if (Array.isArray(a)) {
+ a = flatten(a);
+ }
+ if (env().getBool('DEBUG')) {
+ checkConversionForErrors(a, dtype);
+ }
+ if (noConversionNeeded(a, dtype)) {
+ return a;
+ }
+ if (dtype == null || dtype === 'float32' || dtype === 'complex64') {
+ return new Float32Array(a);
+ }
+ else if (dtype === 'int32') {
+ return new Int32Array(a);
+ }
+ else if (dtype === 'bool') {
+ var bool = new Uint8Array(a.length);
+ for (var i = 0; i < bool.length; ++i) {
+ if (Math.round(a[i]) !== 0) {
+ bool[i] = 1;
+ }
+ }
+ return bool;
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ }
+ /**
+ * Returns the current high-resolution time in milliseconds relative to an
+ * arbitrary time in the past. It works across different platforms (node.js,
+ * browsers).
+ *
+ * ```js
+ * console.log(tf.util.now());
+ * ```
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function now() {
+ return env().platform.now();
+ }
+ /**
+ * Encodes the provided string into bytes using the provided encoding scheme.
+ *
+ * @param s The string to encode.
+ * @param encoding The encoding scheme. Defaults to utf-8.
+ *
+ * @doc {heading: 'Util'}
+ */
+ function encodeString(s, encoding) {
+ if (encoding === void 0) { encoding = 'utf-8'; }
+ encoding = encoding || 'utf-8';
+ return env().platform.encode(s, encoding);
+ }
+ /**
+ * Decodes the provided bytes into a string using the provided encoding scheme.
+ * @param bytes The bytes to decode.
+ *
+ * @param encoding The encoding scheme. Defaults to utf-8.
+ *
+ * @doc {heading: 'Util'}
+ */
+ function decodeString(bytes, encoding) {
+ if (encoding === void 0) { encoding = 'utf-8'; }
+ encoding = encoding || 'utf-8';
+ return env().platform.decode(bytes, encoding);
+ }
+
+ var Profiler = /** @class */ (function () {
+ function Profiler(backendTimer, logger) {
+ this.backendTimer = backendTimer;
+ this.logger = logger;
+ if (logger == null) {
+ this.logger = new Logger();
+ }
+ }
+ Profiler.prototype.profileKernel = function (kernelName, inputs, f) {
+ var e_1, _a;
+ var outputs;
+ var holdResultWrapperFn = function () {
+ outputs = f();
+ };
+ var timer;
+ var start = now();
+ if (this.backendTimer.timerAvailable()) {
+ timer = this.backendTimer.time(holdResultWrapperFn);
+ }
+ else {
+ holdResultWrapperFn();
+ try {
+ for (var outputs_1 = __values(outputs), outputs_1_1 = outputs_1.next(); !outputs_1_1.done; outputs_1_1 = outputs_1.next()) {
+ var output = outputs_1_1.value;
+ output.dataSync();
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (outputs_1_1 && !outputs_1_1.done && (_a = outputs_1.return)) _a.call(outputs_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ timer = Promise.resolve({ kernelMs: now() - start });
+ }
+ if (env().getBool('CHECK_COMPUTATION_FOR_ERRORS')) {
+ var _loop_1 = function (i) {
+ var output = outputs[i];
+ // Dangling promise here because we don't want to propagate up
+ // asynchronicity.
+ output.data().then(function (tensorVals) {
+ checkComputationForErrors(tensorVals, output.dtype, kernelName);
+ });
+ };
+ for (var i = 0; i < outputs.length; i++) {
+ _loop_1(i);
+ }
+ }
+ var kernelProfile = {
+ kernelName: kernelName,
+ outputs: outputs,
+ inputs: inputs,
+ timeMs: timer.then(function (timing) { return timing.kernelMs; }),
+ extraInfo: timer.then(function (timing) { return timing.getExtraProfileInfo != null ?
+ timing.getExtraProfileInfo() :
+ ''; })
+ };
+ return kernelProfile;
+ };
+ Profiler.prototype.logKernelProfile = function (kernelProfile) {
+ var _this = this;
+ var kernelName = kernelProfile.kernelName, outputs = kernelProfile.outputs, timeMs = kernelProfile.timeMs, inputs = kernelProfile.inputs, extraInfo = kernelProfile.extraInfo;
+ outputs.forEach(function (result) {
+ Promise.all([result.data(), timeMs, extraInfo]).then(function (valueContainer) {
+ _this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);
+ });
+ });
+ };
+ return Profiler;
+ }());
+ function checkComputationForErrors(vals, dtype, kernelName) {
+ if (dtype !== 'float32') {
+ // Only floating point computations will generate NaN values
+ return false;
+ }
+ for (var i = 0; i < vals.length; i++) {
+ var num = vals[i];
+ if (isNaN(num) || !isFinite(num)) {
+ // Throwing custom exception so behavior is testable.
+ console.warn("Found " + num + " in the result of '" + kernelName + "'");
+ return true;
+ }
+ }
+ return false;
+ }
+ var Logger = /** @class */ (function () {
+ function Logger() {
+ }
+ Logger.prototype.logKernelProfile = function (name, result, vals, timeMs, inputs, extraInfo) {
+ var time = typeof timeMs === 'number' ? rightPad(timeMs + "ms", 9) :
+ timeMs['error'];
+ var paddedName = rightPad(name, 25);
+ var rank = result.rank;
+ var size = result.size;
+ var shape = rightPad(result.shape.toString(), 14);
+ var inputShapesDescription = '';
+ for (var name_1 in inputs) {
+ var input = inputs[name_1];
+ if (input != null) {
+ // The input might be a non-tensor (e.g HTMLImageElement), in which case
+ // we claim the output shape as input shape.
+ var inputShape = input.shape || result.shape;
+ var inputRank = inputShape.length;
+ inputShapesDescription +=
+ name_1 + ": " + inputRank + "D " + (inputRank > 0 ? inputShape : '') + " ";
+ }
+ }
+ console.log("%c" + paddedName + "\t%c" + time + "\t%c" + rank + "D " + shape + "\t%c" + size + "\t%c" + inputShapesDescription + "\t%c" + extraInfo, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');
+ };
+ return Logger;
+ }());
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes a list of TapeNodes that connect x to y, filtering everything else
+ * out and preserving the order of the original tape elements.
+ *
+ * @param tape The tape elements to filter.
+ * @param xs The input Tensors.
+ * @param y The output Tensor.
+ */
+ function getFilteredNodesXToY(tape, xs, y) {
+ // Forward pass to compute all the nodes and Tensors that are transitively a
+ // function of x.
+ var tensorsFromX = {};
+ var nodesFromX = {};
+ for (var i = 0; i < xs.length; i++) {
+ tensorsFromX[xs[i].id] = true;
+ }
+ for (var i = 0; i < tape.length; i++) {
+ var node = tape[i];
+ var nodeInputs = node.inputs;
+ for (var inputName in nodeInputs) {
+ var input = nodeInputs[inputName];
+ var anyInputFromX = false;
+ for (var j = 0; j < xs.length; j++) {
+ if (tensorsFromX[input.id]) {
+ node.outputs.forEach(function (output) { return tensorsFromX[output.id] = true; });
+ anyInputFromX = true;
+ nodesFromX[node.id] = true;
+ break;
+ }
+ }
+ if (anyInputFromX) {
+ break;
+ }
+ }
+ }
+ // Backward pass to find all of the nodes and Tensors that lead to y.
+ var tensorsLeadToY = {};
+ tensorsLeadToY[y.id] = true;
+ var nodesToY = {};
+ for (var i = tape.length - 1; i >= 0; i--) {
+ var node = tape[i];
+ var nodeInputs = node.inputs;
+ // If any of the outputs lead to y, mark all of the inputs as leading to y.
+ for (var j = 0; j < node.outputs.length; j++) {
+ if (tensorsLeadToY[node.outputs[j].id]) {
+ for (var inputName in nodeInputs) {
+ tensorsLeadToY[nodeInputs[inputName].id] = true;
+ nodesToY[node.id] = true;
+ }
+ break;
+ }
+ }
+ }
+ // Return the paths that come from x and lead to y.
+ var filteredTape = [];
+ for (var i = 0; i < tape.length; i++) {
+ var node = tape[i];
+ if (nodesFromX[node.id] && nodesToY[node.id]) {
+ // Prune the inputs from the node that aren't a function of x.
+ var prunedInputs = {};
+ for (var inputName in node.inputs) {
+ var nodeInput = node.inputs[inputName];
+ if (tensorsFromX[nodeInput.id]) {
+ prunedInputs[inputName] = nodeInput;
+ }
+ }
+ // Copy the node and overwrite inputsAndArgs to the pruned version.
+ var prunedNode = Object.assign({}, node);
+ prunedNode.inputs = prunedInputs;
+ prunedNode.outputs = node.outputs;
+ filteredTape.push(prunedNode);
+ }
+ }
+ return filteredTape;
+ }
+ /**
+ * Backpropagate gradients through the filtered TapeNodes.
+ *
+ * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map
+ * is mutated by this method.
+ * @param filteredTape The filtered TapeNodes to backprop through.
+ */
+ function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {
+ var _loop_1 = function (i) {
+ var node = filteredTape[i];
+ var dys = [];
+ node.outputs.forEach(function (o) {
+ var gradTensor = tensorAccumulatedGradientMap[o.id];
+ if (gradTensor != null) {
+ dys.push(gradTensor);
+ }
+ else {
+ // This particular output is not in the back-propagation subgraph, so it
+ // does not affect the final output, thus we put null for its dy.
+ dys.push(null);
+ }
+ });
+ if (node.gradient == null) {
+ throw new Error("Cannot compute gradient: gradient function not found " +
+ ("for " + node.kernelName + "."));
+ }
+ // Backprop dy through this node and accumulate gradients over the inputs.
+ var inputGradients = node.gradient(dys);
+ var _loop_2 = function (inputName) {
+ if (!(inputName in inputGradients)) {
+ throw new Error("Cannot backprop through input " + inputName + ". " +
+ ("Available gradients found: " + Object.keys(inputGradients) + "."));
+ }
+ // Call the gradient function.
+ var dx = tidy(function () { return inputGradients[inputName](); });
+ if (dx.dtype !== 'float32') {
+ throw new Error("Error in gradient for op " + node.kernelName + ". The gradient of input " +
+ (inputName + " must have 'float32' dtype, but has '" + dx.dtype + "'"));
+ }
+ var x = node.inputs[inputName];
+ if (!arraysEqual(dx.shape, x.shape)) {
+ throw new Error("Error in gradient for op " + node.kernelName + ". The gradient of input " +
+ ("'" + inputName + "' has shape '" + dx.shape + "', which does not match ") +
+ ("the shape of the input '" + x.shape + "'"));
+ }
+ if (tensorAccumulatedGradientMap[x.id] == null) {
+ tensorAccumulatedGradientMap[x.id] = dx;
+ }
+ else {
+ var curGradient = tensorAccumulatedGradientMap[x.id];
+ tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);
+ curGradient.dispose();
+ }
+ };
+ for (var inputName in node.inputs) {
+ _loop_2(inputName);
+ }
+ };
+ // Walk the tape backward and keep a map of Tensor to its gradient.
+ for (var i = filteredTape.length - 1; i >= 0; i--) {
+ _loop_1(i);
+ }
+ }
+
+ // Maximum number of values before we decide to show ellipsis.
+ var FORMAT_LIMIT_NUM_VALS = 20;
+ // Number of first and last values to show when displaying a, b,...,y, z.
+ var FORMAT_NUM_FIRST_LAST_VALS = 3;
+ // Number of significant digits to show.
+ var FORMAT_NUM_SIG_DIGITS = 7;
+ function tensorToString(vals, shape, dtype, verbose) {
+ var strides = computeStrides(shape);
+ var padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);
+ var rank = shape.length;
+ var valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);
+ var lines = ['Tensor'];
+ if (verbose) {
+ lines.push(" dtype: " + dtype);
+ lines.push(" rank: " + rank);
+ lines.push(" shape: [" + shape + "]");
+ lines.push(" values:");
+ }
+ lines.push(valsLines.map(function (l) { return ' ' + l; }).join('\n'));
+ return lines.join('\n');
+ }
+ function computeMaxSizePerColumn(vals, shape, dtype, strides) {
+ var n = sizeFromShape(shape);
+ var numCols = strides[strides.length - 1];
+ var padPerCol = new Array(numCols).fill(0);
+ var rank = shape.length;
+ var valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;
+ if (rank > 1) {
+ for (var row = 0; row < n / numCols; row++) {
+ var offset = row * numCols;
+ for (var j = 0; j < numCols; j++) {
+ padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);
+ }
+ }
+ }
+ return padPerCol;
+ }
+ function valToString(val, pad, dtype) {
+ var valStr;
+ if (Array.isArray(val)) {
+ valStr = parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS)) + " + " +
+ (parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS)) + "j");
+ }
+ else if (isString(val)) {
+ valStr = "'" + val + "'";
+ }
+ else if (dtype === 'bool') {
+ valStr = boolNumToString(val);
+ }
+ else {
+ valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();
+ }
+ return rightPad(valStr, pad);
+ }
+ function boolNumToString(v) {
+ return v === 0 ? 'false' : 'true';
+ }
+ function subTensorToString(vals, shape, dtype, strides, padPerCol, isLast) {
+ if (isLast === void 0) { isLast = true; }
+ var storagePerElement = dtype === 'complex64' ? 2 : 1;
+ var size = shape[0];
+ var rank = shape.length;
+ if (rank === 0) {
+ if (dtype === 'complex64') {
+ var complexTuple = createComplexTuples(vals);
+ return [valToString(complexTuple[0], 0, dtype)];
+ }
+ if (dtype === 'bool') {
+ return [boolNumToString(vals[0])];
+ }
+ return [vals[0].toString()];
+ }
+ if (rank === 1) {
+ if (size > FORMAT_LIMIT_NUM_VALS) {
+ var firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;
+ var firstVals = Array.from(vals.slice(0, firstValsSize));
+ var lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));
+ if (dtype === 'complex64') {
+ firstVals = createComplexTuples(firstVals);
+ lastVals = createComplexTuples(lastVals);
+ }
+ return [
+ '[' +
+ firstVals.map(function (x, i) { return valToString(x, padPerCol[i], dtype); })
+ .join(', ') +
+ ', ..., ' +
+ lastVals
+ .map(function (x, i) { return valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype); })
+ .join(', ') +
+ ']'
+ ];
+ }
+ var displayVals = dtype === 'complex64' ? createComplexTuples(vals) :
+ Array.from(vals);
+ return [
+ '[' +
+ displayVals.map(function (x, i) { return valToString(x, padPerCol[i], dtype); })
+ .join(', ') +
+ ']'
+ ];
+ }
+ // The array is rank 2 or more.
+ var subshape = shape.slice(1);
+ var substrides = strides.slice(1);
+ var stride = strides[0] * storagePerElement;
+ var lines = [];
+ if (size > FORMAT_LIMIT_NUM_VALS) {
+ for (var i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {
+ var start = i * stride;
+ var end = start + stride;
+ lines.push.apply(lines, __spread(subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */)));
+ }
+ lines.push('...');
+ for (var i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {
+ var start = i * stride;
+ var end = start + stride;
+ lines.push.apply(lines, __spread(subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */)));
+ }
+ }
+ else {
+ for (var i = 0; i < size; i++) {
+ var start = i * stride;
+ var end = start + stride;
+ lines.push.apply(lines, __spread(subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */)));
+ }
+ }
+ var sep = rank === 2 ? ',' : '';
+ lines[0] = '[' + lines[0] + sep;
+ for (var i = 1; i < lines.length - 1; i++) {
+ lines[i] = ' ' + lines[i] + sep;
+ }
+ var newLineSep = ',\n';
+ for (var i = 2; i < rank; i++) {
+ newLineSep += '\n';
+ }
+ lines[lines.length - 1] =
+ ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);
+ return lines;
+ }
+ function createComplexTuples(vals) {
+ var complexTuples = [];
+ for (var i = 0; i < vals.length; i += 2) {
+ complexTuples.push([vals[i], vals[i + 1]]);
+ }
+ return complexTuples;
+ }
+
+ /**
+ * A mutable object, similar to `tf.Tensor`, that allows users to set values
+ * at locations before converting to an immutable `tf.Tensor`.
+ *
+ * See `tf.buffer` for creating a tensor buffer.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ var TensorBuffer = /** @class */ (function () {
+ function TensorBuffer(shape, dtype, values) {
+ var _this = this;
+ this.dtype = dtype;
+ this.shape = shape.slice();
+ this.size = sizeFromShape(shape);
+ if (values != null) {
+ var n_1 = values.length;
+ assert(n_1 === this.size, function () { return "Length of values '" + n_1 + "' does not match the size " +
+ ("inferred by the shape '" + _this.size + "'."); });
+ }
+ if (dtype === 'complex64') {
+ throw new Error("complex64 dtype TensorBuffers are not supported. Please create " +
+ "a TensorBuffer for the real and imaginary parts separately and " +
+ "call tf.complex(real, imag).");
+ }
+ this.values = values || getArrayFromDType(dtype, this.size);
+ this.strides = computeStrides(shape);
+ }
+ /**
+ * Sets a value in the buffer at a given location.
+ *
+ * @param value The value to set.
+ * @param locs The location indices.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ TensorBuffer.prototype.set = function (value) {
+ var _this = this;
+ var locs = [];
+ for (var _i = 1; _i < arguments.length; _i++) {
+ locs[_i - 1] = arguments[_i];
+ }
+ if (locs.length === 0) {
+ locs = [0];
+ }
+ assert(locs.length === this.rank, function () { return "The number of provided coordinates (" + locs.length + ") must " +
+ ("match the rank (" + _this.rank + ")"); });
+ var index = this.locToIndex(locs);
+ this.values[index] = value;
+ };
+ /**
+ * Returns the value in the buffer at the provided location.
+ *
+ * @param locs The location indices.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ TensorBuffer.prototype.get = function () {
+ var e_1, _b;
+ var locs = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ locs[_i] = arguments[_i];
+ }
+ if (locs.length === 0) {
+ locs = [0];
+ }
+ var i = 0;
+ try {
+ for (var locs_1 = __values(locs), locs_1_1 = locs_1.next(); !locs_1_1.done; locs_1_1 = locs_1.next()) {
+ var loc = locs_1_1.value;
+ if (loc < 0 || loc >= this.shape[i]) {
+ var msg = "Requested out of range element at " + locs + ". " +
+ (" Buffer shape=" + this.shape);
+ throw new Error(msg);
+ }
+ i++;
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (locs_1_1 && !locs_1_1.done && (_b = locs_1.return)) _b.call(locs_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ var index = locs[locs.length - 1];
+ for (var i_1 = 0; i_1 < locs.length - 1; ++i_1) {
+ index += this.strides[i_1] * locs[i_1];
+ }
+ return this.values[index];
+ };
+ TensorBuffer.prototype.locToIndex = function (locs) {
+ if (this.rank === 0) {
+ return 0;
+ }
+ else if (this.rank === 1) {
+ return locs[0];
+ }
+ var index = locs[locs.length - 1];
+ for (var i = 0; i < locs.length - 1; ++i) {
+ index += this.strides[i] * locs[i];
+ }
+ return index;
+ };
+ TensorBuffer.prototype.indexToLoc = function (index) {
+ if (this.rank === 0) {
+ return [];
+ }
+ else if (this.rank === 1) {
+ return [index];
+ }
+ var locs = new Array(this.shape.length);
+ for (var i = 0; i < locs.length - 1; ++i) {
+ locs[i] = Math.floor(index / this.strides[i]);
+ index -= locs[i] * this.strides[i];
+ }
+ locs[locs.length - 1] = index;
+ return locs;
+ };
+ Object.defineProperty(TensorBuffer.prototype, "rank", {
+ get: function () {
+ return this.shape.length;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Creates an immutable `tf.Tensor` object from the buffer.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ TensorBuffer.prototype.toTensor = function () {
+ return trackerFn().makeTensor(this.values, this.shape, this.dtype);
+ };
+ return TensorBuffer;
+ }());
+ // For tracking tensor creation and disposal.
+ var trackerFn = null;
+ // Used by chaining methods to call into ops.
+ var opHandler = null;
+ /**
+ * An external consumer can register itself as the tensor tracker. This way
+ * the Tensor class can notify the tracker for every tensor created and
+ * disposed.
+ */
+ function setTensorTracker(fn) {
+ trackerFn = fn;
+ }
+ /**
+ * A `tf.Tensor` object represents an immutable, multidimensional array of
+ * numbers that has a shape and a data type.
+ *
+ * For performance reasons, functions that create tensors do not necessarily
+ * perform a copy of the data passed to them (e.g. if the data is passed as a
+ * `Float32Array`), and changes to the data will change the tensor. This is not
+ * a feature and is not supported. To avoid this behavior, use the tensor before
+ * changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.
+ *
+ * See `tf.tensor` for details on how to create a `tf.Tensor`.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ var Tensor = /** @class */ (function () {
+ function Tensor(shape, dtype, dataId, id) {
+ /** Whether this tensor has been globally kept. */
+ this.kept = false;
+ this.isDisposedInternal = false;
+ this.shape = shape.slice();
+ this.dtype = dtype || 'float32';
+ this.size = sizeFromShape(shape);
+ this.strides = computeStrides(shape);
+ this.dataId = dataId;
+ this.id = id;
+ this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');
+ }
+ Object.defineProperty(Tensor.prototype, "rank", {
+ get: function () {
+ return this.shape.length;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Returns a promise of `tf.TensorBuffer` that holds the underlying data.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.buffer = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var vals;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0: return [4 /*yield*/, this.data()];
+ case 1:
+ vals = _b.sent();
+ return [2 /*return*/, opHandler.buffer(this.shape, this.dtype, vals)];
+ }
+ });
+ });
+ };
+ /**
+ * Returns a `tf.TensorBuffer` that holds the underlying data.
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.bufferSync = function () {
+ return opHandler.buffer(this.shape, this.dtype, this.dataSync());
+ };
+ /**
+ * Returns the tensor data as a nested array. The transfer of data is done
+ * asynchronously.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.array = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var vals;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0: return [4 /*yield*/, this.data()];
+ case 1:
+ vals = _b.sent();
+ return [2 /*return*/, toNestedArray(this.shape, vals, this.dtype === 'complex64')];
+ }
+ });
+ });
+ };
+ /**
+ * Returns the tensor data as a nested array. The transfer of data is done
+ * synchronously.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.arraySync = function () {
+ return toNestedArray(this.shape, this.dataSync(), this.dtype === 'complex64');
+ };
+ /**
+ * Asynchronously downloads the values from the `tf.Tensor`. Returns a
+ * promise of `TypedArray` that resolves when the computation has finished.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.data = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var data, bytes;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ this.throwIfDisposed();
+ data = trackerFn().read(this.dataId);
+ if (!(this.dtype === 'string')) return [3 /*break*/, 2];
+ return [4 /*yield*/, data];
+ case 1:
+ bytes = _b.sent();
+ try {
+ return [2 /*return*/, bytes.map(function (b) { return decodeString(b); })];
+ }
+ catch (_a) {
+ throw new Error('Failed to decode the string bytes into utf-8. ' +
+ 'To get the original bytes, call tensor.bytes().');
+ }
+ _b.label = 2;
+ case 2: return [2 /*return*/, data];
+ }
+ });
+ });
+ };
+ /**
+ * Synchronously downloads the values from the `tf.Tensor`. This blocks the
+ * UI thread until the values are ready, which can cause performance issues.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.dataSync = function () {
+ this.throwIfDisposed();
+ var data = trackerFn().readSync(this.dataId);
+ if (this.dtype === 'string') {
+ try {
+ return data.map(function (b) { return decodeString(b); });
+ }
+ catch (_a) {
+ throw new Error('Failed to decode the string bytes into utf-8. ' +
+ 'To get the original bytes, call tensor.bytes().');
+ }
+ }
+ return data;
+ };
+ /** Returns the underlying bytes of the tensor's data. */
+ Tensor.prototype.bytes = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var data;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ this.throwIfDisposed();
+ return [4 /*yield*/, trackerFn().read(this.dataId)];
+ case 1:
+ data = _b.sent();
+ if (this.dtype === 'string') {
+ return [2 /*return*/, data];
+ }
+ else {
+ return [2 /*return*/, new Uint8Array(data.buffer)];
+ }
+ }
+ });
+ });
+ };
+ /**
+ * Disposes `tf.Tensor` from memory.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.dispose = function () {
+ if (this.isDisposed) {
+ return;
+ }
+ trackerFn().disposeTensor(this);
+ this.isDisposedInternal = true;
+ };
+ Object.defineProperty(Tensor.prototype, "isDisposed", {
+ get: function () {
+ return this.isDisposedInternal;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Tensor.prototype.throwIfDisposed = function () {
+ if (this.isDisposed) {
+ throw new Error("Tensor is disposed.");
+ }
+ };
+ /**
+ * Prints the `tf.Tensor`. See `tf.print` for details.
+ *
+ * @param verbose Whether to print verbose information about the tensor,
+ * including dtype and size.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.print = function (verbose) {
+ if (verbose === void 0) { verbose = false; }
+ return opHandler.print(this, verbose);
+ };
+ /**
+ * Returns a copy of the tensor. See `tf.clone` for details.
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.clone = function () {
+ this.throwIfDisposed();
+ return opHandler.clone(this);
+ };
+ /**
+ * Returns a human-readable description of the tensor. Useful for logging.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.toString = function (verbose) {
+ if (verbose === void 0) { verbose = false; }
+ var vals = this.dataSync();
+ return tensorToString(vals, this.shape, this.dtype, verbose);
+ };
+ Tensor.prototype.cast = function (dtype) {
+ this.throwIfDisposed();
+ return opHandler.cast(this, dtype);
+ };
+ Tensor.prototype.variable = function (trainable, name, dtype) {
+ if (trainable === void 0) { trainable = true; }
+ this.throwIfDisposed();
+ return trackerFn().makeVariable(this, trainable, name, dtype);
+ };
+ return Tensor;
+ }());
+ Object.defineProperty(Tensor, Symbol.hasInstance, {
+ value: function (instance) {
+ // Implementation note: we should use properties of the object that will be
+ // defined before the constructor body has finished executing (methods).
+ // This is because when this code is transpiled by babel, babel will call
+ // classCallCheck before the constructor body is run.
+ // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.
+ return !!instance && instance.data != null && instance.dataSync != null &&
+ instance.throwIfDisposed != null;
+ }
+ });
+ function getGlobalTensorClass() {
+ // Use getGlobal so that we can augment the Tensor class across package
+ // boundaries becase the node resolution alg may result in different modules
+ // being returned for this file depending on the path they are loaded from.
+ return getGlobal('Tensor', function () {
+ return Tensor;
+ });
+ }
+ // Global side effect. Cache global reference to Tensor class
+ getGlobalTensorClass();
+ /**
+ * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ var Variable = /** @class */ (function (_super) {
+ __extends(Variable, _super);
+ function Variable(initialValue, trainable, name, tensorId) {
+ var _this = _super.call(this, initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId) || this;
+ _this.trainable = trainable;
+ _this.name = name;
+ return _this;
+ }
+ /**
+ * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have
+ * the same shape and dtype as the old `tf.Tensor`.
+ *
+ * @param newValue New tensor to be assigned to this variable.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Variable.prototype.assign = function (newValue) {
+ if (newValue.dtype !== this.dtype) {
+ throw new Error("dtype of the new value (" + newValue.dtype + ") and " +
+ ("previous value (" + this.dtype + ") must match"));
+ }
+ if (!arraysEqual(newValue.shape, this.shape)) {
+ throw new Error("shape of the new value (" + newValue.shape + ") and " +
+ ("previous value (" + this.shape + ") must match"));
+ }
+ trackerFn().disposeTensor(this);
+ this.dataId = newValue.dataId;
+ trackerFn().incRef(this, null /* backend */);
+ };
+ Variable.prototype.dispose = function () {
+ trackerFn().disposeVariable(this);
+ this.isDisposedInternal = true;
+ };
+ return Variable;
+ }(Tensor));
+ Object.defineProperty(Variable, Symbol.hasInstance, {
+ value: function (instance) {
+ return instance instanceof Tensor && instance.assign != null &&
+ instance.assign instanceof Function;
+ }
+ });
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var Rank;
+ (function (Rank) {
+ Rank["R0"] = "R0";
+ Rank["R1"] = "R1";
+ Rank["R2"] = "R2";
+ Rank["R3"] = "R3";
+ Rank["R4"] = "R4";
+ Rank["R5"] = "R5";
+ Rank["R6"] = "R6";
+ })(Rank || (Rank = {}));
+ // Looks for upcasting types. Used, for example, in operations with mixed dtype
+ // inputs.
+ var UpcastInt32AndMap;
+ (function (UpcastInt32AndMap) {
+ UpcastInt32AndMap["float32"] = "float32";
+ UpcastInt32AndMap["int32"] = "int32";
+ UpcastInt32AndMap["bool"] = "int32";
+ UpcastInt32AndMap["complex64"] = "complex64";
+ })(UpcastInt32AndMap || (UpcastInt32AndMap = {}));
+ var UpcastBoolAndMap;
+ (function (UpcastBoolAndMap) {
+ UpcastBoolAndMap["float32"] = "float32";
+ UpcastBoolAndMap["int32"] = "int32";
+ UpcastBoolAndMap["bool"] = "bool";
+ UpcastBoolAndMap["complex64"] = "complex64";
+ })(UpcastBoolAndMap || (UpcastBoolAndMap = {}));
+ var UpcastFloat32AndMap;
+ (function (UpcastFloat32AndMap) {
+ UpcastFloat32AndMap["float32"] = "float32";
+ UpcastFloat32AndMap["int32"] = "float32";
+ UpcastFloat32AndMap["bool"] = "float32";
+ UpcastFloat32AndMap["complex64"] = "complex64";
+ })(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));
+ var UpcastComplex64AndMap;
+ (function (UpcastComplex64AndMap) {
+ UpcastComplex64AndMap["float32"] = "complex64";
+ UpcastComplex64AndMap["int32"] = "complex64";
+ UpcastComplex64AndMap["bool"] = "complex64";
+ UpcastComplex64AndMap["complex64"] = "complex64";
+ })(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));
+ var upcastTypeMap = {
+ 'float32': UpcastFloat32AndMap,
+ 'int32': UpcastInt32AndMap,
+ 'bool': UpcastBoolAndMap,
+ 'complex64': UpcastComplex64AndMap
+ };
+ function upcastType(typeA, typeB) {
+ if (typeA === 'string' || typeB === 'string') {
+ if (typeA === 'string' && typeB === 'string') {
+ return 'string';
+ }
+ throw new Error("Can not upcast " + typeA + " with " + typeB);
+ }
+ return upcastTypeMap[typeA][typeB];
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function makeTypesMatch(a, b) {
+ if (a.dtype === b.dtype) {
+ return [a, b];
+ }
+ var dtype = upcastType(a.dtype, b.dtype);
+ return [a.cast(dtype), b.cast(dtype)];
+ }
+ function assertTypesMatch(a, b) {
+ assert(a.dtype === b.dtype, function () { return "The dtypes of the first(" + a.dtype + ") and" +
+ (" second(" + b.dtype + ") input must match"); });
+ }
+ /**
+ * Extracts any `Tensor`s found within the provided object.
+ *
+ * @param container an object that may be a `Tensor` or may directly contain
+ * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it
+ * is safe to pass any object here, except that `Promise`s are not
+ * supported.
+ * @returns An array of `Tensors` found within the passed object. If the
+ * argument is simply a `Tensor', a list containing that `Tensor` is
+ * returned. If the object is not a `Tensor` or does not
+ * contain `Tensors`, an empty list is returned.
+ */
+ function getTensorsInContainer(result) {
+ var list = [];
+ var seen = new Set();
+ walkTensorContainer(result, list, seen);
+ return list;
+ }
+ function walkTensorContainer(container, list, seen) {
+ if (container == null) {
+ return;
+ }
+ if (container instanceof Tensor) {
+ list.push(container);
+ return;
+ }
+ if (!isIterable(container)) {
+ return;
+ }
+ // Iteration over keys works also for arrays.
+ var iterable = container;
+ for (var k in iterable) {
+ var val = iterable[k];
+ if (!seen.has(val)) {
+ seen.add(val);
+ walkTensorContainer(val, list, seen);
+ }
+ }
+ }
+ // tslint:disable-next-line:no-any
+ function isIterable(obj) {
+ return Array.isArray(obj) || typeof obj === 'object';
+ }
+
+ function isRegisteredKernelInvocation(kernelInvocation) {
+ return kernelInvocation.kernelName != null;
+ }
+ var EngineState = /** @class */ (function () {
+ function EngineState() {
+ // Public since optimizers will use it.
+ this.registeredVariables = {};
+ this.nextTapeNodeId = 0;
+ this.numBytes = 0;
+ this.numTensors = 0;
+ this.numStringTensors = 0;
+ this.numDataBuffers = 0;
+ // Number of nested tf.grad() statements when computing higher-order
+ // gradients. E.g. `1` for first-order gradients and `2` for second-order
+ // gradients. Used to track if the tape should be removed after a backprop.
+ this.gradientDepth = 0;
+ // Number of nested kernel calls. When kernel depth is greater than 1, we turn
+ // off the tape.
+ this.kernelDepth = 0;
+ this.scopeStack = [];
+ /**
+ * Keeps track of the number of data moves during a kernel execution. We
+ * maintain a stack since kernels can call other kernels, recursively.
+ */
+ this.numDataMovesStack = [];
+ this.nextScopeId = 0;
+ this.tensorInfo = new WeakMap();
+ this.profiling = false;
+ this.activeProfile = {
+ newBytes: 0,
+ newTensors: 0,
+ peakBytes: 0,
+ kernels: [],
+ result: null,
+ get kernelNames() {
+ return Array.from(new Set(this.kernels.map(function (k) { return k.name; })));
+ }
+ };
+ }
+ EngineState.prototype.dispose = function () {
+ for (var variableName in this.registeredVariables) {
+ this.registeredVariables[variableName].dispose();
+ }
+ };
+ return EngineState;
+ }());
+ var Engine = /** @class */ (function () {
+ function Engine(ENV) {
+ this.ENV = ENV;
+ this.registry = {};
+ this.registryFactory = {};
+ this.pendingBackendInitId = 0;
+ this.state = new EngineState();
+ }
+ Engine.prototype.ready = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var sortedBackends, i, backendName, success;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ if (this.pendingBackendInit != null) {
+ return [2 /*return*/, this.pendingBackendInit.then(function () { })];
+ }
+ if (this.backendInstance != null) {
+ return [2 /*return*/];
+ }
+ sortedBackends = this.getSortedBackends();
+ i = 0;
+ _a.label = 1;
+ case 1:
+ if (!(i < sortedBackends.length)) return [3 /*break*/, 5];
+ backendName = sortedBackends[i];
+ return [4 /*yield*/, this.initializeBackend(backendName).success];
+ case 2:
+ success = _a.sent();
+ if (!success) return [3 /*break*/, 4];
+ return [4 /*yield*/, this.setBackend(backendName)];
+ case 3:
+ _a.sent();
+ return [2 /*return*/];
+ case 4:
+ i++;
+ return [3 /*break*/, 1];
+ case 5: throw new Error("Could not initialize any backends, all backend initializations " +
+ "failed.");
+ }
+ });
+ });
+ };
+ Object.defineProperty(Engine.prototype, "backend", {
+ get: function () {
+ if (this.pendingBackendInit != null) {
+ throw new Error("Backend '" + this.backendName + "' has not yet been initialized. Make " +
+ "sure to await tf.ready() or await tf.setBackend() before calling " +
+ "other methods");
+ }
+ if (this.backendInstance == null) {
+ var _a = this.initializeBackendsAndReturnBest(), name = _a.name, asyncInit = _a.asyncInit;
+ if (asyncInit) {
+ throw new Error("The highest priority backend '" + name + "' has not yet been " +
+ "initialized. Make sure to await tf.ready() or " +
+ "await tf.setBackend() before calling other methods");
+ }
+ this.setBackend(name);
+ }
+ return this.backendInstance;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Engine.prototype.backendNames = function () {
+ return Object.keys(this.registryFactory);
+ };
+ Engine.prototype.findBackend = function (backendName) {
+ if (!(backendName in this.registry)) {
+ // If the backend hasn't been initialized but we have a registry entry for
+ // it, initialize it and return it.
+ if (backendName in this.registryFactory) {
+ var asyncInit = this.initializeBackend(backendName).asyncInit;
+ if (asyncInit) {
+ // Backend is not ready yet.
+ return null;
+ }
+ }
+ else {
+ return null;
+ }
+ }
+ return this.registry[backendName];
+ };
+ Engine.prototype.findBackendFactory = function (backendName) {
+ if (!(backendName in this.registryFactory)) {
+ return null;
+ }
+ return this.registryFactory[backendName].factory;
+ };
+ Engine.prototype.registerBackend = function (backendName, factory, priority) {
+ if (priority === void 0) { priority = 1; }
+ if (backendName in this.registryFactory) {
+ warn(backendName + " backend was already registered. " +
+ "Reusing existing backend factory.");
+ return false;
+ }
+ this.registryFactory[backendName] = { factory: factory, priority: priority };
+ return true;
+ };
+ Engine.prototype.setBackend = function (backendName) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a, success, asyncInit, result, _b;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ if (this.registryFactory[backendName] == null) {
+ throw new Error("Backend name '" + backendName + "' not found in registry");
+ }
+ this.backendName = backendName;
+ if (!(this.registry[backendName] == null)) return [3 /*break*/, 4];
+ this.backendInstance = null;
+ _a = this.initializeBackend(backendName), success = _a.success, asyncInit = _a.asyncInit;
+ if (!asyncInit) return [3 /*break*/, 2];
+ return [4 /*yield*/, success];
+ case 1:
+ _b = _c.sent();
+ return [3 /*break*/, 3];
+ case 2:
+ _b = success;
+ _c.label = 3;
+ case 3:
+ result = _b;
+ if (!result) {
+ return [2 /*return*/, false];
+ }
+ _c.label = 4;
+ case 4:
+ this.backendInstance = this.registry[backendName];
+ this.setupRegisteredKernels();
+ // Reset the profiler.
+ this.profiler = new Profiler(this.backendInstance);
+ return [2 /*return*/, true];
+ }
+ });
+ });
+ };
+ Engine.prototype.setupRegisteredKernels = function () {
+ var _this = this;
+ var kernels = getKernelsForBackend(this.backendName);
+ kernels.forEach(function (kernel) {
+ if (kernel.setupFunc != null) {
+ kernel.setupFunc(_this.backendInstance);
+ }
+ });
+ };
+ Engine.prototype.disposeRegisteredKernels = function (backendName) {
+ var _this = this;
+ var kernels = getKernelsForBackend(backendName);
+ kernels.forEach(function (kernel) {
+ if (kernel.disposeFunc != null) {
+ kernel.disposeFunc(_this.registry[backendName]);
+ }
+ });
+ };
+ /**
+ * Initializes a backend by looking up the backend name in the factory
+ * registry and calling the factory method. Returns a boolean representing
+ * whether the initialization of the backend suceeded. Throws an error if
+ * there is no backend in the factory registry.
+ */
+ Engine.prototype.initializeBackend = function (backendName) {
+ var _this = this;
+ var registryFactoryEntry = this.registryFactory[backendName];
+ if (registryFactoryEntry == null) {
+ throw new Error("Cannot initialize backend " + backendName + ", no registration found.");
+ }
+ try {
+ var backend = registryFactoryEntry.factory();
+ /* Test if the factory returns a promise.
+ Done in a more liberal way than
+ previous 'Promise.resolve(backend)===backend'
+ as we needed to account for custom Promise
+ implementations (e.g. Angular) */
+ if (backend && !(backend instanceof KernelBackend) &&
+ typeof backend.then === 'function') {
+ var promiseId_1 = ++this.pendingBackendInitId;
+ var success = backend
+ .then(function (backendInstance) {
+ // Outdated promise. Another backend was set in the meantime.
+ if (promiseId_1 < _this.pendingBackendInitId) {
+ return false;
+ }
+ _this.registry[backendName] = backendInstance;
+ _this.pendingBackendInit = null;
+ return true;
+ })
+ .catch(function (err) {
+ // Outdated promise. Another backend was set in the meantime.
+ if (promiseId_1 < _this.pendingBackendInitId) {
+ return false;
+ }
+ _this.pendingBackendInit = null;
+ warn("Initialization of backend " + backendName + " failed");
+ warn(err.stack || err.message);
+ return false;
+ });
+ this.pendingBackendInit = success;
+ return { success: success, asyncInit: true };
+ }
+ else {
+ this.registry[backendName] = backend;
+ return { success: true, asyncInit: false };
+ }
+ }
+ catch (err) {
+ warn("Initialization of backend " + backendName + " failed");
+ warn(err.stack || err.message);
+ return { success: false, asyncInit: false };
+ }
+ };
+ Engine.prototype.removeBackend = function (backendName) {
+ if (!(backendName in this.registryFactory)) {
+ throw new Error(backendName + " backend not found in registry");
+ }
+ if (this.backendName === backendName && this.pendingBackendInit != null) {
+ // There is a pending promise of the backend we want to remove. Make it
+ // obsolete.
+ this.pendingBackendInitId++;
+ }
+ if (backendName in this.registry) {
+ this.disposeRegisteredKernels(backendName);
+ this.registry[backendName].dispose();
+ delete this.registry[backendName];
+ }
+ delete this.registryFactory[backendName];
+ // Unset the backend if it is active.
+ if (this.backendName === backendName) {
+ this.pendingBackendInit = null;
+ this.backendName = null;
+ this.backendInstance = null;
+ }
+ };
+ Engine.prototype.getSortedBackends = function () {
+ var _this = this;
+ if (Object.keys(this.registryFactory).length === 0) {
+ throw new Error('No backend found in registry.');
+ }
+ return Object.keys(this.registryFactory).sort(function (a, b) {
+ // Highest priority comes first.
+ return _this.registryFactory[b].priority -
+ _this.registryFactory[a].priority;
+ });
+ };
+ Engine.prototype.initializeBackendsAndReturnBest = function () {
+ var sortedBackends = this.getSortedBackends();
+ for (var i = 0; i < sortedBackends.length; i++) {
+ var backendName = sortedBackends[i];
+ var _a = this.initializeBackend(backendName), success = _a.success, asyncInit = _a.asyncInit;
+ if (asyncInit || success) {
+ return { name: backendName, asyncInit: asyncInit };
+ }
+ }
+ throw new Error("Could not initialize any backends, all backend initializations " +
+ "failed.");
+ };
+ Engine.prototype.moveData = function (backend, dataId) {
+ var info = this.state.tensorInfo.get(dataId);
+ var srcBackend = info.backend;
+ var values = this.readSync(dataId);
+ var refCount = srcBackend.refCount(dataId);
+ // Delete the tensor from the old backend and move it to the new
+ // backend.
+ srcBackend.disposeData(dataId, true);
+ info.backend = backend;
+ backend.move(dataId, values, info.shape, info.dtype, refCount);
+ if (this.shouldCheckForMemLeaks()) {
+ // Track the number of moves during a kernel execution to correctly
+ // detect memory leaks.
+ this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;
+ }
+ };
+ Engine.prototype.tidy = function (nameOrFn, fn) {
+ var _this = this;
+ var name = null;
+ if (fn == null) {
+ // Called with only 1 argument.
+ if (typeof nameOrFn !== 'function') {
+ throw new Error('Please provide a function to tidy()');
+ }
+ fn = nameOrFn;
+ }
+ else {
+ // Called with 2 arguments.
+ if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {
+ throw new Error('When calling with two arguments, the first argument ' +
+ 'to tidy() must be a string');
+ }
+ if (typeof fn !== 'function') {
+ throw new Error('When calling with two arguments, the 2nd argument ' +
+ 'to tidy() must be a function');
+ }
+ name = nameOrFn;
+ // TODO(nsthorat,smilkov): Do operation logging and performance
+ // profiling.
+ }
+ var result;
+ return this.scopedRun(function () { return _this.startScope(name); }, function () { return _this.endScope(result); }, function () {
+ result = fn();
+ if (result instanceof Promise) {
+ console.error('Cannot return a Promise inside of tidy.');
+ }
+ return result;
+ });
+ };
+ Engine.prototype.scopedRun = function (start, end, f) {
+ start();
+ try {
+ var res = f();
+ end();
+ return res;
+ }
+ catch (ex) {
+ end();
+ throw ex;
+ }
+ };
+ Engine.prototype.nextTensorId = function () {
+ return Engine.nextTensorId++;
+ };
+ Engine.prototype.nextVariableId = function () {
+ return Engine.nextVariableId++;
+ };
+ /**
+ * This method is called instead of the public-facing tensor.clone() when
+ * saving a tensor for backwards pass. It makes sure to add the clone
+ * operation to the tape regardless of being called inside a kernel
+ * execution.
+ */
+ Engine.prototype.clone = function (x) {
+ var y = ENGINE.runKernel(Identity, { x: x });
+ var inputs = { x: x };
+ var grad = function (dy) { return ({
+ x: function () {
+ var dtype = 'float32';
+ var gradInputs = { x: dy };
+ var attrs = { dtype: dtype };
+ return ENGINE.runKernel(Cast, gradInputs,
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ attrs);
+ }
+ }); };
+ var saved = [];
+ this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});
+ return y;
+ };
+ /**
+ * Execute a kernel with the given name and return the output tensor.
+ *
+ * @param kernelName The name of the kernel to execute.
+ * @param inputs A map of input names to tensors.
+ * @param attrs A map of attribute names to their values. An attribute is a
+ * primitive (non-tensor) input to the kernel.
+ * @param inputsToSave A list of tensors, inputs to save for the backprop
+ * computation.
+ * @param outputsToSave A list of booleans, specifying which output to save
+ * for the backprop computation. These are booleans since the output
+ * tensors are not visible to the user.
+ */
+ Engine.prototype.runKernel = function (kernelName, inputs, attrs) {
+ if (this.backendName == null) {
+ // backend has not been initialized yet (backend initialization is lazy
+ // can be deferred until an op/ kernel is run).
+ // The below getter has side effects that will try to initialize the
+ // backend and set properties like this.backendName
+ // tslint:disable-next-line: no-unused-expression
+ this.backend;
+ }
+ var hasKernel = getKernel(kernelName, this.backendName) != null;
+ if (!hasKernel) {
+ throw new Error("Kernel '" + kernelName + "' not registered for backend '" + this.backendName + "'");
+ }
+ return this.runKernelFunc({ kernelName: kernelName, inputs: inputs, attrs: attrs });
+ };
+ Engine.prototype.shouldCheckForMemLeaks = function () {
+ return this.ENV.getBool('IS_TEST');
+ };
+ Engine.prototype.checkKernelForMemLeak = function (kernelName, numDataIdsBefore, outInfos) {
+ var numDataIdsAfter = this.backend.numDataIds();
+ // Count the number of data ids associated with the result of the kernel.
+ var numOutputDataIds = 0;
+ outInfos.forEach(function (info) {
+ // Complex numbers allocate 3 data ids, one for 'real', one for
+ // 'imaginary', and one for the container that holds the former two.
+ numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);
+ });
+ // Account for the number of moves during kernel execution. A "data move"
+ // can happen in the middle of a kernel execution, placing a new (key,value)
+ // pair in the data storage. Since data moves have net zero effect (we
+ // always remove the data from the old backend), we have to cancel them out
+ // when detecting memory leaks.
+ var numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];
+ var dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;
+ if (dataIdsLeaked > 0) {
+ throw new Error("Backend '" + this.backendName + "' has an internal memory leak " +
+ ("(" + dataIdsLeaked + " data ids) after running '" + kernelName + "'"));
+ }
+ };
+ /**
+ * Internal helper method to execute a kernel Func
+ *
+ * Use `runKernel` to execute kernels from outside of engine.
+ */
+ Engine.prototype.runKernelFunc = function (kernelParams) {
+ var _this = this;
+ var outputs;
+ var saved = [];
+ var isTapeOn = this.isTapeOn();
+ var startingBytecount = this.state.numBytes;
+ var startingNumTensors = this.state.numTensors;
+ if (this.shouldCheckForMemLeaks()) {
+ this.state.numDataMovesStack.push(0);
+ }
+ var kernelFunc;
+ if (this.backendName == null) {
+ // backend has not been initialized yet (backend initialization is lazy
+ // can be deferred until an op/ kernel is run).
+ // The below getter has side effects that will try to initialize the
+ // backend and set properties like this.backendName
+ // tslint:disable-next-line: no-unused-expression
+ this.backend;
+ }
+ var out;
+ var kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ?
+ kernelParams.kernelName :
+ this.state.activeScope != null ? this.state.activeScope.name : '';
+ // Create the kernelFunc from either a registered kernel OR passed in
+ // forward/backward functions (used by custom grad). In this context a
+ // kernelFunc wraps a kernel implementation with some bookkeeping.
+ if (isRegisteredKernelInvocation(kernelParams)) {
+ var kernelName_1 = kernelParams.kernelName, inputs_1 = kernelParams.inputs, attrs_1 = kernelParams.attrs;
+ if (this.backendName == null) {
+ // backend has not been initialized yet (backend initialization is lazy
+ // can be deferred until an op/ kernel is run).
+ // The below getter has side effects that will try to initialize the
+ // backend and set properties like this.backendName
+ // tslint:disable-next-line: no-unused-expression
+ this.backend;
+ }
+ var kernel_1 = getKernel(kernelName_1, this.backendName);
+ assert(kernel_1 != null, function () { return "Cannot find registered kernel '" + kernelName_1 + "' for backend '" + _this.backendName + "'"; });
+ kernelFunc = function () {
+ var numDataIdsBefore = _this.backend.numDataIds();
+ out = kernel_1.kernelFunc({ inputs: inputs_1, attrs: attrs_1, backend: _this.backend });
+ var outInfos = Array.isArray(out) ? out : [out];
+ if (_this.shouldCheckForMemLeaks()) {
+ _this.checkKernelForMemLeak(kernelName_1, numDataIdsBefore, outInfos);
+ }
+ var outTensors = outInfos.map(function (outInfo) {
+ // todo (yassogba) remove this option (Tensor) when node backend
+ // methods have been modularized and they all return tensorInfo.
+ // TensorInfos do not have a rank attribute.
+ if (outInfo.rank != null) {
+ return outInfo;
+ }
+ var dataId = outInfo.dataId, shape = outInfo.shape, dtype = outInfo.dtype;
+ return _this.makeTensorFromDataId(dataId, shape, dtype);
+ });
+ // Save any required inputs and outputs.
+ // Do not save unless we are recording to the tape. Otherwise it would
+ // cause a mem leak since there would be no backprop for these tensors
+ // (which would otherwise dispose them).
+ if (isTapeOn) {
+ var tensorsToSave = _this.getTensorsForGradient(kernelName_1, inputs_1, outTensors);
+ saved = _this.saveTensorsForBackwardMode(tensorsToSave);
+ }
+ return outTensors;
+ };
+ }
+ else {
+ var forwardFunc_1 = kernelParams.forwardFunc;
+ // Running a customGrad op.
+ var saveFunc_1 = function (tensors) {
+ // Do not save unless we are recording to the tape. Otherwise it would
+ // cause a mem leak since we would never run backprop, which disposes
+ // the kept tensors.
+ if (!isTapeOn) {
+ return;
+ }
+ saved = tensors.map(function (tensor) { return _this.keep(_this.clone(tensor)); });
+ };
+ kernelFunc = function () {
+ var numDataIdsBefore = _this.backend.numDataIds();
+ out = _this.tidy(function () { return forwardFunc_1(_this.backend, saveFunc_1); });
+ var outs = (Array.isArray(out) ? out : [out]);
+ if (_this.shouldCheckForMemLeaks()) {
+ // Scope name is used to print a more helpful error message if needed.
+ _this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs);
+ }
+ return outs;
+ };
+ }
+ //
+ // Run the kernelFunc. Optionally profiling it.
+ //
+ var inputs = kernelParams.inputs, attrs = kernelParams.attrs;
+ var backwardsFunc = isRegisteredKernelInvocation(kernelParams) ?
+ null :
+ kernelParams.backwardsFunc;
+ var kernelProfile;
+ this.scopedRun(
+ // Stop recording to a tape when running a kernel.
+ function () { return _this.state.kernelDepth++; }, function () { return _this.state.kernelDepth--; }, function () {
+ if (!_this.ENV.getBool('DEBUG') && !_this.state.profiling) {
+ outputs = kernelFunc();
+ }
+ else {
+ kernelProfile = _this.profiler.profileKernel(kernelOrScopeName, inputs, function () { return kernelFunc(); });
+ if (_this.ENV.getBool('DEBUG')) {
+ _this.profiler.logKernelProfile(kernelProfile);
+ }
+ outputs = kernelProfile.outputs;
+ }
+ });
+ if (isTapeOn) {
+ this.addTapeNode(kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs);
+ }
+ if (this.state.profiling) {
+ this.state.activeProfile.kernels.push({
+ name: kernelOrScopeName,
+ bytesAdded: this.state.numBytes - startingBytecount,
+ totalBytesSnapshot: this.state.numBytes,
+ tensorsAdded: this.state.numTensors - startingNumTensors,
+ totalTensorsSnapshot: this.state.numTensors,
+ inputShapes: Object.keys(inputs).map(function (key) { return inputs[key] != null ? inputs[key].shape : null; }),
+ outputShapes: outputs.map(function (item) { return item.shape; }),
+ kernelTimeMs: kernelProfile.timeMs,
+ extraInfo: kernelProfile.extraInfo
+ });
+ }
+ return (Array.isArray(out) ? outputs : outputs[0]);
+ };
+ /**
+ * Saves tensors used in forward mode for use in backward mode.
+ *
+ * @param tensors the list of tensors to save.
+ */
+ Engine.prototype.saveTensorsForBackwardMode = function (tensors) {
+ var _this = this;
+ var saved = tensors.map(function (tensor) { return _this.keep(_this.clone(tensor)); });
+ return saved;
+ };
+ /**
+ * Returns a list of tensors to save for a given gradient calculation.
+ *
+ * @param kernelName name of kernel to look up gradient for.
+ * @param inputs a map of input tensors.
+ * @param outputs an array of output tensors from forward mode of kernel.
+ */
+ Engine.prototype.getTensorsForGradient = function (kernelName, inputs, outputs) {
+ var gradConfig = getGradient(kernelName);
+ if (gradConfig != null) {
+ var inputsToSave = gradConfig.inputsToSave || [];
+ var outputsToSave_1 = gradConfig.outputsToSave || [];
+ // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs
+ // specified in inputsToSave will be saved.
+ var inputTensorsToSave = void 0;
+ if (gradConfig.saveAllInputs) {
+ assert(Array.isArray(inputs), function () { return 'saveAllInputs is true, expected inputs to be an array.'; });
+ inputTensorsToSave = Object.keys(inputs).map(function (key) { return inputs[key]; });
+ }
+ else {
+ inputTensorsToSave = inputsToSave.map(function (inputName) { return inputs[inputName]; });
+ }
+ var outputTensorsToSave = outputs.filter(function (_, i) { return outputsToSave_1[i]; });
+ return inputTensorsToSave.concat(outputTensorsToSave);
+ }
+ // We return an empty list rather than throw an error because the kernel we
+ // are looking up may not actually be relevant to backproping through the
+ // overall function
+ //
+ // See 'does not error if irrelevant (pruned) ops are missing grads' test
+ // in gradients_test.ts for an example.
+ return [];
+ };
+ /**
+ * Internal method used by public APIs for tensor creation. Makes a new
+ * tensor with the provided shape, dtype and values. It always
+ * creates a new data id and writes the values to the underlying backend.
+ */
+ Engine.prototype.makeTensor = function (values, shape, dtype, backend) {
+ if (values == null) {
+ throw new Error('Values passed to engine.makeTensor() are null');
+ }
+ dtype = dtype || 'float32';
+ backend = backend || this.backend;
+ var backendVals = values;
+ if (dtype === 'string' && isString(values[0])) {
+ backendVals = values.map(function (d) { return encodeString(d); });
+ }
+ var dataId = backend.write(backendVals, shape, dtype);
+ var t = new Tensor(shape, dtype, dataId, this.nextTensorId());
+ this.trackTensor(t, backend);
+ // Count bytes for string tensors.
+ if (dtype === 'string') {
+ var info = this.state.tensorInfo.get(dataId);
+ var newBytes = bytesFromStringArray(backendVals);
+ this.state.numBytes += newBytes - info.bytes;
+ info.bytes = newBytes;
+ }
+ return t;
+ };
+ /**
+ * Internal method used by backends. Makes a new tensor
+ * that is a wrapper around an existing data id. It doesn't create
+ * a new data id, only increments the ref count used in memory tracking.
+ */
+ Engine.prototype.makeTensorFromDataId = function (dataId, shape, dtype, backend) {
+ dtype = dtype || 'float32';
+ var t = new Tensor(shape, dtype, dataId, this.nextTensorId());
+ this.trackTensor(t, backend);
+ return t;
+ };
+ Engine.prototype.makeVariable = function (initialValue, trainable, name, dtype) {
+ if (trainable === void 0) { trainable = true; }
+ name = name || this.nextVariableId().toString();
+ if (dtype != null && dtype !== initialValue.dtype) {
+ initialValue = initialValue.cast(dtype);
+ }
+ var v = new Variable(initialValue, trainable, name, this.nextTensorId());
+ if (this.state.registeredVariables[v.name] != null) {
+ throw new Error("Variable with name " + v.name + " was already registered");
+ }
+ this.state.registeredVariables[v.name] = v;
+ this.incRef(v, this.backend);
+ return v;
+ };
+ Engine.prototype.trackTensor = function (a, backend) {
+ this.state.numTensors++;
+ if (a.dtype === 'string') {
+ this.state.numStringTensors++;
+ }
+ // Bytes for complex numbers are counted by their components. Bytes for
+ // string tensors are counted when writing values.
+ var bytes = 0;
+ if (a.dtype !== 'complex64' && a.dtype !== 'string') {
+ bytes = a.size * bytesPerElement(a.dtype);
+ }
+ this.state.numBytes += bytes;
+ if (!this.state.tensorInfo.has(a.dataId)) {
+ this.state.numDataBuffers++;
+ this.state.tensorInfo.set(a.dataId, {
+ backend: backend || this.backend,
+ dtype: a.dtype,
+ shape: a.shape,
+ bytes: bytes
+ });
+ }
+ if (!(a instanceof Variable)) {
+ this.track(a);
+ }
+ };
+ // Track the tensor by dataId and increase the refCount for the dataId in the
+ // backend.
+ // TODO(pyu10055): This is currently used by makeVariable method, to increase
+ // refCount on the backend for the dataId. It can potentially be replaced with
+ // Identity op indead of calling backend directly.
+ Engine.prototype.incRef = function (a, backend) {
+ this.trackTensor(a, backend);
+ this.backend.incRef(a.dataId);
+ };
+ Engine.prototype.removeDataId = function (dataId, backend) {
+ if (this.state.tensorInfo.has(dataId) &&
+ this.state.tensorInfo.get(dataId).backend === backend) {
+ this.state.tensorInfo.delete(dataId);
+ this.state.numDataBuffers--;
+ }
+ };
+ Engine.prototype.disposeTensor = function (a) {
+ if (!this.state.tensorInfo.has(a.dataId)) {
+ return;
+ }
+ var info = this.state.tensorInfo.get(a.dataId);
+ this.state.numTensors--;
+ if (a.dtype === 'string') {
+ this.state.numStringTensors--;
+ this.state.numBytes -= info.bytes;
+ }
+ // Don't count bytes for complex numbers as they are counted by their
+ // components.
+ if (a.dtype !== 'complex64' && a.dtype !== 'string') {
+ var bytes = a.size * bytesPerElement(a.dtype);
+ this.state.numBytes -= bytes;
+ }
+ // Remove the reference to dataId if backend dispose the data successfully
+ if (info.backend.disposeData(a.dataId)) {
+ this.removeDataId(a.dataId, info.backend);
+ }
+ // TODO(nsthorat): Construct an error and save the stack trace for
+ // debugging when in debug mode. Creating a stack trace is too expensive
+ // to do unconditionally.
+ };
+ Engine.prototype.disposeVariables = function () {
+ for (var varName in this.state.registeredVariables) {
+ var v = this.state.registeredVariables[varName];
+ this.disposeVariable(v);
+ }
+ };
+ Engine.prototype.disposeVariable = function (v) {
+ this.disposeTensor(v);
+ if (this.state.registeredVariables[v.name] != null) {
+ delete this.state.registeredVariables[v.name];
+ }
+ };
+ Engine.prototype.memory = function () {
+ var info = this.backend.memory();
+ info.numTensors = this.state.numTensors;
+ info.numDataBuffers = this.state.numDataBuffers;
+ info.numBytes = this.state.numBytes;
+ if (this.state.numStringTensors > 0) {
+ info.unreliable = true;
+ if (info.reasons == null) {
+ info.reasons = [];
+ }
+ info.reasons.push('Memory usage by string tensors is approximate ' +
+ '(2 bytes per character)');
+ }
+ return info;
+ };
+ Engine.prototype.profile = function (query) {
+ return __awaiter(this, void 0, void 0, function () {
+ var startBytes, startNumTensors, _a, _b, _c, kernel, _d, _e, e_1_1;
+ var e_1, _f;
+ return __generator(this, function (_g) {
+ switch (_g.label) {
+ case 0:
+ this.state.profiling = true;
+ startBytes = this.state.numBytes;
+ startNumTensors = this.state.numTensors;
+ this.state.activeProfile.kernels = [];
+ _a = this.state.activeProfile;
+ return [4 /*yield*/, query()];
+ case 1:
+ _a.result = _g.sent();
+ this.state.profiling = false;
+ this.state.activeProfile.peakBytes = Math.max.apply(Math, __spread(this.state.activeProfile.kernels.map(function (d) { return d.totalBytesSnapshot; })));
+ this.state.activeProfile.newBytes = this.state.numBytes - startBytes;
+ this.state.activeProfile.newTensors =
+ this.state.numTensors - startNumTensors;
+ _g.label = 2;
+ case 2:
+ _g.trys.push([2, 8, 9, 10]);
+ _b = __values(this.state.activeProfile.kernels), _c = _b.next();
+ _g.label = 3;
+ case 3:
+ if (!!_c.done) return [3 /*break*/, 7];
+ kernel = _c.value;
+ _d = kernel;
+ return [4 /*yield*/, kernel.kernelTimeMs];
+ case 4:
+ _d.kernelTimeMs = _g.sent();
+ _e = kernel;
+ return [4 /*yield*/, kernel.extraInfo];
+ case 5:
+ _e.extraInfo = _g.sent();
+ _g.label = 6;
+ case 6:
+ _c = _b.next();
+ return [3 /*break*/, 3];
+ case 7: return [3 /*break*/, 10];
+ case 8:
+ e_1_1 = _g.sent();
+ e_1 = { error: e_1_1 };
+ return [3 /*break*/, 10];
+ case 9:
+ try {
+ if (_c && !_c.done && (_f = _b.return)) _f.call(_b);
+ }
+ finally { if (e_1) throw e_1.error; }
+ return [7 /*endfinally*/];
+ case 10: return [2 /*return*/, this.state.activeProfile];
+ }
+ });
+ });
+ };
+ Engine.prototype.isTapeOn = function () {
+ return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;
+ };
+ Engine.prototype.addTapeNode = function (kernelName, inputs, outputs, gradientsFunc, saved, attrs) {
+ var _this = this;
+ var tapeNode = { id: this.state.nextTapeNodeId++, kernelName: kernelName, inputs: inputs, outputs: outputs, saved: saved };
+ var gradConfig = getGradient(kernelName);
+ if (gradConfig != null) {
+ gradientsFunc = gradConfig.gradFunc;
+ }
+ if (gradientsFunc != null) {
+ tapeNode.gradient = function (dys) {
+ // TODO(smilkov): To optimize back-prop, pass dys that are not used in
+ // the backprop graph to the user as null instead of zeros
+ dys = dys.map(function (dy, i) {
+ if (dy == null) {
+ var output = outputs[i];
+ var vals = makeZerosTypedArray(output.size, output.dtype);
+ return _this.makeTensor(vals, output.shape, output.dtype);
+ }
+ return dy;
+ });
+ // Grad functions of ops with single outputs expect a dy, while ops
+ // with multiple outputs expect dys (array of dy).
+ return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);
+ };
+ }
+ this.state.activeTape.push(tapeNode);
+ };
+ Engine.prototype.keep = function (result) {
+ result.kept = true;
+ return result;
+ };
+ Engine.prototype.startTape = function () {
+ if (this.state.gradientDepth === 0) {
+ this.state.activeTape = [];
+ }
+ this.state.gradientDepth++;
+ };
+ Engine.prototype.endTape = function () {
+ this.state.gradientDepth--;
+ };
+ /**
+ * Start a scope. Use this with endScope() to achieve the same functionality
+ * as scope() without the need for a function closure.
+ */
+ Engine.prototype.startScope = function (name) {
+ var scopeInfo = {
+ track: [],
+ name: 'unnamed scope',
+ id: this.state.nextScopeId++
+ };
+ if (name) {
+ scopeInfo.name = name;
+ }
+ this.state.scopeStack.push(scopeInfo);
+ this.state.activeScope = scopeInfo;
+ };
+ /**
+ * End a scope. Use this with startScope() to achieve the same functionality
+ * as scope() without the need for a function closure.
+ */
+ Engine.prototype.endScope = function (result) {
+ var _this = this;
+ var tensorsToTrackInParent = getTensorsInContainer(result);
+ var tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(function (t) { return t.id; }));
+ // Dispose the arrays tracked in this scope.
+ for (var i = 0; i < this.state.activeScope.track.length; i++) {
+ var tensor = this.state.activeScope.track[i];
+ if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {
+ tensor.dispose();
+ }
+ }
+ var oldScope = this.state.scopeStack.pop();
+ this.state.activeScope = this.state.scopeStack.length === 0 ?
+ null :
+ this.state.scopeStack[this.state.scopeStack.length - 1];
+ // Track the current result in the parent scope.
+ tensorsToTrackInParent.forEach(function (tensor) {
+ // Only track the tensor if was allocated in the inner scope and is not
+ // globally kept.
+ if (!tensor.kept && tensor.scopeId === oldScope.id) {
+ _this.track(tensor);
+ }
+ });
+ };
+ /**
+ * Returns gradients of `f` with respect to each of the `xs`. The gradients
+ * returned are of the same length as `xs`, but some might be null if `f`
+ * was not a function of that `x`. It also takes optional dy to multiply the
+ * gradient, which defaults to `1`.
+ */
+ Engine.prototype.gradients = function (f, xs, dy, allowNoGradients) {
+ var _this = this;
+ if (allowNoGradients === void 0) { allowNoGradients = false; }
+ assert(xs.length > 0, function () { return 'gradients() received an empty list of xs.'; });
+ if (dy != null && dy.dtype !== 'float32') {
+ throw new Error("dy must have 'float32' dtype, but has '" + dy.dtype + "'");
+ }
+ var y = this.scopedRun(function () { return _this.startTape(); }, function () { return _this.endTape(); }, function () { return _this.tidy('forward', f); });
+ assert(y instanceof Tensor, function () { return 'The result y returned by f() must be a tensor.'; });
+ // Filter out the nodes that don't connect x => y.
+ var filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);
+ if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {
+ throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +
+ 'that the f you passed encloses all operations that lead from x ' +
+ 'to y.');
+ }
+ return this.tidy('backward', function () {
+ var accumulatedGradientMap = {};
+ accumulatedGradientMap[y.id] = (dy == null) ? ones$1(y.shape) : dy;
+ // Backprop gradients through the filtered nodes.
+ backpropagateGradients(accumulatedGradientMap, filteredTape,
+ // Pass the tidy function to avoid circular dep with `tape.ts`.
+ function (f) { return _this.tidy(f); },
+ // Pass an add function to avoide a circular dep with `tape.ts`.
+ add$1);
+ var grads = xs.map(function (x) { return accumulatedGradientMap[x.id]; });
+ if (_this.state.gradientDepth === 0) {
+ // This means that we are not computing higher-order gradients
+ // and can clean up the tape.
+ _this.state.activeTape.forEach(function (node) {
+ var e_2, _a;
+ try {
+ for (var _b = __values(node.saved), _c = _b.next(); !_c.done; _c = _b.next()) {
+ var tensor = _c.value;
+ tensor.dispose();
+ }
+ }
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
+ finally {
+ try {
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
+ }
+ finally { if (e_2) throw e_2.error; }
+ }
+ });
+ _this.state.activeTape = null;
+ }
+ return { value: y, grads: grads };
+ });
+ };
+ Engine.prototype.customGrad = function (f) {
+ var _this = this;
+ assert(isFunction(f), function () { return 'The f passed in customGrad(f) must be a function.'; });
+ return function () {
+ var inputs = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ inputs[_i] = arguments[_i];
+ }
+ assert(inputs.every(function (t) { return t instanceof Tensor; }), function () { return 'The args passed in customGrad(f)(x1, x2,...) must all be ' +
+ 'tensors'; });
+ var res;
+ var inputMap = {};
+ inputs.forEach(function (input, i) {
+ inputMap[i] = input;
+ });
+ var forwardFunc = function (_, save) {
+ res = f.apply(void 0, __spread(inputs, [save]));
+ assert(res.value instanceof Tensor, function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.value` is a tensor'; });
+ assert(isFunction(res.gradFunc), function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.gradFunc` is a function.'; });
+ return res.value;
+ };
+ var backwardsFunc = function (dy, saved) {
+ var gradRes = res.gradFunc(dy, saved);
+ var grads = Array.isArray(gradRes) ? gradRes : [gradRes];
+ assert(grads.length === inputs.length, function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.gradFunc` is a function that returns ' +
+ 'the same number of tensors as inputs passed to f(...).'; });
+ assert(grads.every(function (t) { return t instanceof Tensor; }), function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.gradFunc` is a function that returns ' +
+ 'a list of only tensors.'; });
+ var gradMap = {};
+ grads.forEach(function (grad, i) {
+ gradMap[i] = function () { return grad; };
+ });
+ return gradMap;
+ };
+ return _this.runKernelFunc({
+ forwardFunc: forwardFunc,
+ backwardsFunc: backwardsFunc,
+ inputs: inputMap,
+ });
+ };
+ };
+ Engine.prototype.readSync = function (dataId) {
+ // Route the read to the correct backend.
+ var info = this.state.tensorInfo.get(dataId);
+ return info.backend.readSync(dataId);
+ };
+ Engine.prototype.read = function (dataId) {
+ // Route the read to the correct backend.
+ var info = this.state.tensorInfo.get(dataId);
+ return info.backend.read(dataId);
+ };
+ Engine.prototype.time = function (query) {
+ return __awaiter(this, void 0, void 0, function () {
+ var start, timingInfo;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ start = now();
+ return [4 /*yield*/, this.backend.time(query)];
+ case 1:
+ timingInfo = _a.sent();
+ timingInfo.wallMs = now() - start;
+ return [2 /*return*/, timingInfo];
+ }
+ });
+ });
+ };
+ /**
+ * Tracks a Tensor in the current scope to be automatically cleaned up
+ * when the current scope ends, and returns the value.
+ *
+ * @param result The Tensor to track in the current scope.
+ */
+ Engine.prototype.track = function (result) {
+ if (this.state.activeScope != null) {
+ result.scopeId = this.state.activeScope.id;
+ this.state.activeScope.track.push(result);
+ }
+ return result;
+ };
+ Object.defineProperty(Engine.prototype, "registeredVariables", {
+ get: function () {
+ return this.state.registeredVariables;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Resets the engine state. Removes all backends but does not remove
+ * registered backend factories.
+ */
+ Engine.prototype.reset = function () {
+ // Make any pending promise obsolete.
+ this.pendingBackendInitId++;
+ this.state.dispose();
+ this.ENV.reset();
+ this.state = new EngineState();
+ for (var backendName in this.registry) {
+ this.disposeRegisteredKernels(backendName);
+ this.registry[backendName].dispose();
+ delete this.registry[backendName];
+ }
+ this.backendName = null;
+ this.backendInstance = null;
+ this.pendingBackendInit = null;
+ };
+ return Engine;
+ }());
+ Engine.nextTensorId = 0;
+ Engine.nextVariableId = 0;
+ function ones$1(shape) {
+ var values = makeOnesTypedArray(sizeFromShape(shape), 'float32');
+ return ENGINE.makeTensor(values, shape, 'float32');
+ }
+ function getOrMakeEngine() {
+ var ns = getGlobalNamespace();
+ if (ns._tfengine == null) {
+ var environment = new Environment(ns);
+ ns._tfengine = new Engine(environment);
+ }
+ setEnvironmentGlobal(ns._tfengine.ENV);
+ // Tell the current tensor interface that the global engine is responsible
+ // for tracking.
+ setTensorTracker(function () { return ns._tfengine; });
+ return ns._tfengine;
+ }
+ var ENGINE = getOrMakeEngine();
+ /**
+ * A implementation of the add op for use within engine and tape.
+ *
+ * This allows us to avoid a circular dependency between add.ts and engine.
+ * It is exported to be available in tape tests.
+ */
+ function add$1(a, b) {
+ // We duplicate Add here to avoid a circular dependency with add.ts.
+ var inputs = { a: a, b: b };
+ return ENGINE.runKernel(Add, inputs);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function inferShape(val, dtype) {
+ var firstElem = val;
+ if (isTypedArray(val)) {
+ return dtype === 'string' ? [] : [val.length];
+ }
+ if (!Array.isArray(val)) {
+ return []; // Scalar.
+ }
+ var shape = [];
+ while (Array.isArray(firstElem) ||
+ isTypedArray(firstElem) && dtype !== 'string') {
+ shape.push(firstElem.length);
+ firstElem = firstElem[0];
+ }
+ if (Array.isArray(val) &&
+ env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {
+ deepAssertShapeConsistency(val, shape, []);
+ }
+ return shape;
+ }
+ function deepAssertShapeConsistency(val, shape, indices) {
+ indices = indices || [];
+ if (!(Array.isArray(val)) && !isTypedArray(val)) {
+ assert(shape.length === 0, function () { return "Element arr[" + indices.join('][') + "] is a primitive, " +
+ ("but should be an array/TypedArray of " + shape[0] + " elements"); });
+ return;
+ }
+ assert(shape.length > 0, function () { return "Element arr[" + indices.join('][') + "] should be a primitive, " +
+ ("but is an array of " + val.length + " elements"); });
+ assert(val.length === shape[0], function () { return "Element arr[" + indices.join('][') + "] should have " + shape[0] + " " +
+ ("elements, but has " + val.length + " elements"); });
+ var subShape = shape.slice(1);
+ for (var i = 0; i < val.length; ++i) {
+ deepAssertShapeConsistency(val[i], subShape, indices.concat(i));
+ }
+ }
+ function assertDtype(expectedDtype, actualDType, argName, functionName) {
+ if (expectedDtype === 'string_or_numeric') {
+ return;
+ }
+ if (expectedDtype == null) {
+ throw new Error("Expected dtype cannot be null.");
+ }
+ if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||
+ expectedDtype === 'numeric' && actualDType === 'string') {
+ throw new Error("Argument '" + argName + "' passed to '" + functionName + "' must " +
+ ("be " + expectedDtype + " tensor, but got " + actualDType + " tensor"));
+ }
+ }
+ function convertToTensor(x, argName, functionName, parseAsDtype) {
+ if (parseAsDtype === void 0) { parseAsDtype = 'numeric'; }
+ if (x instanceof Tensor) {
+ assertDtype(parseAsDtype, x.dtype, argName, functionName);
+ return x;
+ }
+ var inferredDtype = inferDtype(x);
+ // If the user expects a bool/int/float, use that info to update the
+ // inferredDtype when it is not a string.
+ if (inferredDtype !== 'string' &&
+ ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {
+ inferredDtype = parseAsDtype;
+ }
+ assertDtype(parseAsDtype, inferredDtype, argName, functionName);
+ if ((x == null) ||
+ (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&
+ typeof x !== 'boolean' && typeof x !== 'string')) {
+ var type = x == null ? 'null' : x.constructor.name;
+ throw new Error("Argument '" + argName + "' passed to '" + functionName + "' must be a " +
+ ("Tensor or TensorLike, but got '" + type + "'"));
+ }
+ var inferredShape = inferShape(x, inferredDtype);
+ if (!isTypedArray(x) && !Array.isArray(x)) {
+ x = [x];
+ }
+ var skipTypedArray = true;
+ var values = inferredDtype !== 'string' ?
+ toTypedArray(x, inferredDtype) :
+ flatten(x, [], skipTypedArray);
+ return ENGINE.makeTensor(values, inferredShape, inferredDtype);
+ }
+ function convertToTensorArray(arg, argName, functionName, parseAsDtype) {
+ if (parseAsDtype === void 0) { parseAsDtype = 'numeric'; }
+ if (!Array.isArray(arg)) {
+ throw new Error("Argument " + argName + " passed to " + functionName + " must be a " +
+ '`Tensor[]` or `TensorLike[]`');
+ }
+ var tensors = arg;
+ return tensors.map(function (t, i) { return convertToTensor(t, argName + "[" + i + "]", functionName, parseAsDtype); });
+ }
+
+ var OP_SCOPE_SUFFIX = '__op';
+ /**
+ * Used for wrapping functions that perform math operations on
+ * Tensors. The function will be wrapped in a named scope that cleans all
+ * memory usage after the function is done.
+ */
+ function op(f) {
+ var keys = Object.keys(f);
+ if (keys.length !== 1) {
+ throw new Error("Please provide an object with a single key " +
+ "(operation name) mapping to a function. Got an object with " +
+ (keys.length + " keys."));
+ }
+ var opName = keys[0];
+ var fn = f[opName];
+ // Strip the underscore from the end of the function name.
+ if (opName.endsWith('_')) {
+ opName = opName.substring(0, opName.length - 1);
+ }
+ // add an __op suffix to distinguish ops from kernels in tf.profile
+ opName = opName + OP_SCOPE_SUFFIX;
+ // tslint:disable-next-line:no-any
+ var f2 = function () {
+ var args = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ args[_i] = arguments[_i];
+ }
+ ENGINE.startScope(opName);
+ try {
+ var result = fn.apply(void 0, __spread(args));
+ if (isPromise(result)) {
+ console.error('Cannot return a Promise inside of tidy.');
+ }
+ ENGINE.endScope(result);
+ return result;
+ }
+ catch (ex) {
+ ENGINE.endScope(null);
+ throw ex;
+ }
+ };
+ Object.defineProperty(f2, 'name', { value: opName, configurable: true });
+ // tslint:disable-next-line:no-any
+ return f2;
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes absolute value element-wise: `abs(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.abs().print(); // or tf.abs(x)
+ * ```
+ * @param x The input `tf.Tensor`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function abs_(x) {
+ var $x = convertToTensor(x, 'x', 'abs');
+ if ($x.dtype === 'complex64') {
+ var inputs = { x: $x };
+ return ENGINE.runKernel(ComplexAbs, inputs);
+ }
+ else {
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Abs, inputs);
+ }
+ }
+ var abs = op({ abs_: abs_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes acos of the input `tf.Tensor` element-wise: `acos(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.acos().print(); // or tf.acos(x)
+ * ```
+ * @param x The input tensor.
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function acos_(x) {
+ var $x = convertToTensor(x, 'x', 'acos');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Acos, inputs);
+ }
+ var acos = op({ acos_: acos_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise:
+ * `acosh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([10, 1, 3, 5.7]);
+ *
+ * x.acosh().print(); // or tf.acosh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function acosh_(x) {
+ var $x = convertToTensor(x, 'x', 'acosh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Acosh, inputs);
+ }
+ var acosh = op({ acosh_: acosh_ });
+
+ /**
+ * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
+ *
+ * a.add(b).print(); // or tf.add(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast add a with b.
+ * const a = tf.scalar(5);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
+ *
+ * a.add(b).print(); // or tf.add(a, b)
+ * ```
+ * @param a The first `tf.Tensor` to add.
+ * @param b The second `tf.Tensor` to add. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function add_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'add');
+ var $b = convertToTensor(b, 'b', 'add');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Add, inputs);
+ }
+ var add = op({ add_: add_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ *
+ * tf.addN([a, b, c]).print();
+ * ```
+ * @param tensors A list of tensors with the same shape and dtype.
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function addN_(tensors) {
+ assert(Array.isArray(tensors), function () { return 'The argument passed to tf.addN() must be a list of tensors'; });
+ assert(tensors.length >= 1, function () { return "Must pass at least one tensor to tf.addN(), but got " +
+ ("" + tensors.length); });
+ var $tensors = tensors.map(function (t, i) { return convertToTensor(t, "tensors" + i, 'addN'); });
+ var firstTensor = $tensors[0];
+ $tensors.forEach(function (t) {
+ if (t.dtype !== firstTensor.dtype) {
+ throw new Error('All tensors passed to tf.addN() must have the same dtype');
+ }
+ });
+ $tensors.forEach(function (t) {
+ if (!arraysEqual(t.shape, firstTensor.shape)) {
+ throw new Error('All tensors passed to tf.addN() must have the same shape');
+ }
+ });
+ var inputs = $tensors;
+ return ENGINE.runKernel(AddN, inputs);
+ }
+ var addN = op({ addN_: addN_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the logical and of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 1, 1], 'bool');
+ *
+ * x.all().print(); // or tf.all(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
+ *
+ * const axis = 1;
+ * x.all(axis).print(); // or tf.all(x, axis)
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype bool.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function all_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'all', 'bool');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(All, inputs, attrs);
+ }
+ var all = op({ all_: all_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the logical or of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 1, 1], 'bool');
+ *
+ * x.any().print(); // or tf.any(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
+ *
+ * const axis = 1;
+ * x.any(axis).print(); // or tf.any(x, axis)
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype bool.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function any_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'any', 'bool');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Any, inputs, attrs);
+ }
+ // tslint:disable-next-line:variable-name
+ var any = op({ any_: any_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the indices of the maximum values along an `axis`.
+ *
+ * The result has the same shape as `input` with the dimension along `axis`
+ * removed.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.argMax().print(); // or tf.argMax(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
+ *
+ * const axis = 1;
+ * x.argMax(axis).print(); // or tf.argMax(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function argMax_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'argMax');
+ var inputs = { x: $x };
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(ArgMax, inputs, attrs);
+ }
+ var argMax = op({ argMax_: argMax_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the indices of the minimum values along an `axis`.
+ *
+ * The result has the same shape as `input` with the dimension along `axis`
+ * removed.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.argMin().print(); // or tf.argMin(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
+ *
+ * const axis = 1;
+ * x.argMin(axis).print(); // or tf.argMin(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function argMin_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'argMin');
+ var inputs = { x: $x };
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(ArgMin, inputs, attrs);
+ }
+ var argMin = op({ argMin_: argMin_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes asin of the input `tf.Tensor` element-wise: `asin(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.asin().print(); // or tf.asin(x)
+ * ```
+ * @param x The input tensor.
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function asin_(x) {
+ var $x = convertToTensor(x, 'x', 'asin');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Asin, inputs);
+ }
+ var asin = op({ asin_: asin_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise:
+ * `asinh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.asinh().print(); // or tf.asinh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function asinh_(x) {
+ var $x = convertToTensor(x, 'x', 'asinh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Asinh, inputs);
+ }
+ var asinh = op({ asinh_: asinh_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes atan of the input `tf.Tensor` element-wise: `atan(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.atan().print(); // or tf.atan(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function atan_(x) {
+ var $x = convertToTensor(x, 'x', 'atan');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Atan, inputs);
+ }
+ var atan = op({ atan_: atan_ });
+
+ /**
+ * Computes arctangent of `tf.Tensor`s a / b element-wise: `atan2(a, b)`.
+ * Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1.0, 1.0, -1.0, .7]);
+ * const b = tf.tensor1d([2.0, 13.0, 3.5, .21]);
+ *
+ * tf.atan2(a, b).print()
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function atan2_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'atan2');
+ var $b = convertToTensor(b, 'b', 'atan2');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Atan2, inputs);
+ }
+ var atan2 = op({ atan2_: atan2_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise:
+ * `atanh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, .1, -.1, .7]);
+ *
+ * x.atanh().print(); // or tf.atanh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function atanh_(x) {
+ var $x = convertToTensor(x, 'x', 'atanh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Atanh, inputs);
+ }
+ var atanh = op({ atanh_: atanh_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Casts a `tf.Tensor` to a new dtype.
+ *
+ * ```js
+ * const x = tf.tensor1d([1.5, 2.5, 3]);
+ * tf.cast(x, 'int32').print();
+ * ```
+ * @param x The input tensor to be casted.
+ * @param dtype The dtype to cast the input tensor to.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function cast_(x, dtype) {
+ var $x = convertToTensor(x, 'x', 'cast');
+ // Sanity checks.
+ if (!isValidDtype(dtype)) {
+ throw new Error("Failed to cast to unknown dtype " + dtype);
+ }
+ if (dtype === 'string' && $x.dtype !== 'string' ||
+ dtype !== 'string' && $x.dtype === 'string') {
+ throw new Error('Only strings can be casted to strings');
+ }
+ var inputs = { x: $x };
+ var attrs = { dtype: dtype };
+ return ENGINE.runKernel(Cast, inputs, attrs);
+ }
+ var cast = op({ cast_: cast_ });
+
+ function computePool2DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'channelsLast'; }
+ var _a = __read(parseTupleParam(filterSize), 2), filterHeight = _a[0], filterWidth = _a[1];
+ var filterShape;
+ if (dataFormat === 'channelsLast') {
+ filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]];
+ }
+ else if (dataFormat === 'channelsFirst') {
+ filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]];
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ return computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, false, dataFormat);
+ }
+ /**
+ * Computes the information for a forward pass of a convolution/pooling
+ * operation.
+ */
+ function computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, depthwise, dataFormat) {
+ var _a, _b;
+ if (depthwise === void 0) { depthwise = false; }
+ if (dataFormat === void 0) { dataFormat = 'channelsLast'; }
+ var _c = __read([-1, -1, -1, -1], 4), batchSize = _c[0], inHeight = _c[1], inWidth = _c[2], inChannels = _c[3];
+ if (dataFormat === 'channelsLast') {
+ _a = __read(inShape, 4), batchSize = _a[0], inHeight = _a[1], inWidth = _a[2], inChannels = _a[3];
+ }
+ else if (dataFormat === 'channelsFirst') {
+ _b = __read(inShape, 4), batchSize = _b[0], inChannels = _b[1], inHeight = _b[2], inWidth = _b[3];
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ var _d = __read(filterShape, 4), filterHeight = _d[0], filterWidth = _d[1], filterChannels = _d[3];
+ var _e = __read(parseTupleParam(strides), 2), strideHeight = _e[0], strideWidth = _e[1];
+ var _f = __read(parseTupleParam(dilations), 2), dilationHeight = _f[0], dilationWidth = _f[1];
+ var effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
+ var effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
+ var _g = getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat), padInfo = _g.padInfo, outHeight = _g.outHeight, outWidth = _g.outWidth;
+ var outChannels = depthwise ? filterChannels * inChannels : filterChannels;
+ var outShape;
+ if (dataFormat === 'channelsFirst') {
+ outShape = [batchSize, outChannels, outHeight, outWidth];
+ }
+ else if (dataFormat === 'channelsLast') {
+ outShape = [batchSize, outHeight, outWidth, outChannels];
+ }
+ return {
+ batchSize: batchSize,
+ dataFormat: dataFormat,
+ inHeight: inHeight,
+ inWidth: inWidth,
+ inChannels: inChannels,
+ outHeight: outHeight,
+ outWidth: outWidth,
+ outChannels: outChannels,
+ padInfo: padInfo,
+ strideHeight: strideHeight,
+ strideWidth: strideWidth,
+ filterHeight: filterHeight,
+ filterWidth: filterWidth,
+ effectiveFilterHeight: effectiveFilterHeight,
+ effectiveFilterWidth: effectiveFilterWidth,
+ dilationHeight: dilationHeight,
+ dilationWidth: dilationWidth,
+ inShape: inShape,
+ outShape: outShape,
+ filterShape: filterShape
+ };
+ }
+ function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) {
+ if (zeroPad == null) {
+ zeroPad = computeDefaultPad(inShape, fieldSize, stride);
+ }
+ var inputRows = inShape[0];
+ var inputCols = inShape[1];
+ var outputRows = round$1((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ var outputCols = round$1((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ return [outputRows, outputCols];
+ }
+ function computeDefaultPad(inputShape, fieldSize, stride, dilation) {
+ if (dilation === void 0) { dilation = 1; }
+ var effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation);
+ return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2);
+ }
+ function parseTupleParam(param) {
+ if (typeof param === 'number') {
+ return [param, param, param];
+ }
+ if (param.length === 2) {
+ return [param[0], param[1], 1];
+ }
+ return param;
+ }
+ /* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d
+ * Atrous convolution is equivalent to standard convolution with upsampled
+ * filters with effective_filter_height =
+ * filter_height + (filter_height - 1) * (dilation - 1)
+ * and effective_filter_width =
+ * filter_width + (filter_width - 1) * (dilation - 1),
+ * produced by inserting dilation - 1 zeros along consecutive elements across
+ * the filters' spatial dimensions.
+ * When there is a dilation, this converts a filter dimension to the
+ * effective filter dimension, so it can be used in a standard convolution.
+ */
+ function getEffectiveFilterSize(filterSize, dilation) {
+ if (dilation <= 1) {
+ return filterSize;
+ }
+ return filterSize + (filterSize - 1) * (dilation - 1);
+ }
+ function getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, filterHeight, filterWidth, roundingMode, dataFormat) {
+ var padInfo;
+ var outHeight;
+ var outWidth;
+ if (typeof pad === 'number') {
+ var padType = (pad === 0) ? 'VALID' : 'NUMBER';
+ padInfo = { top: pad, bottom: pad, left: pad, right: pad, type: padType };
+ var outShape = computeOutputShape2D([inHeight, inWidth], filterHeight, strideHeight, pad, roundingMode);
+ outHeight = outShape[0];
+ outWidth = outShape[1];
+ }
+ else if (pad === 'same') {
+ outHeight = Math.ceil(inHeight / strideHeight);
+ outWidth = Math.ceil(inWidth / strideWidth);
+ var padAlongHeight = Math.max(0, (outHeight - 1) * strideHeight + filterHeight - inHeight);
+ var padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth);
+ var top = Math.floor(padAlongHeight / 2);
+ var bottom = padAlongHeight - top;
+ var left = Math.floor(padAlongWidth / 2);
+ var right = padAlongWidth - left;
+ padInfo = { top: top, bottom: bottom, left: left, right: right, type: 'SAME' };
+ }
+ else if (pad === 'valid') {
+ padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: 'VALID' };
+ outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
+ outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
+ }
+ else if (typeof pad === 'object') {
+ var top = dataFormat === 'channelsLast' ? pad[1][0] : pad[2][0];
+ var bottom = dataFormat === 'channelsLast' ? pad[1][1] : pad[2][1];
+ var left = dataFormat === 'channelsLast' ? pad[2][0] : pad[3][0];
+ var right = dataFormat === 'channelsLast' ? pad[2][1] : pad[3][1];
+ var padType = (top === 0 && bottom === 0 && left === 0 && right === 0) ?
+ 'VALID' :
+ 'EXPLICIT';
+ padInfo = { top: top, bottom: bottom, left: left, right: right, type: padType };
+ outHeight = round$1((inHeight - filterHeight + top + bottom) / strideHeight + 1, roundingMode);
+ outWidth = round$1((inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode);
+ }
+ else {
+ throw Error("Unknown padding parameter: " + pad);
+ }
+ return { padInfo: padInfo, outHeight: outHeight, outWidth: outWidth };
+ }
+ /**
+ * Rounds a value depending on the rounding mode
+ * @param value
+ * @param roundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function round$1(value, roundingMode) {
+ if (!roundingMode) {
+ return Math.trunc(value);
+ }
+ switch (roundingMode) {
+ case 'round':
+ // used for Caffe Conv
+ return Math.round(value);
+ case 'ceil':
+ // used for Caffe Pool
+ return Math.ceil(value);
+ case 'floor':
+ return Math.floor(value);
+ default:
+ throw new Error("Unknown roundingMode " + roundingMode);
+ }
+ }
+ function tupleValuesAreOne(param) {
+ var _a = __read(parseTupleParam(param), 3), dimA = _a[0], dimB = _a[1], dimC = _a[2];
+ return dimA === 1 && dimB === 1 && dimC === 1;
+ }
+ function eitherStridesOrDilationsAreOne(strides, dilations) {
+ return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations);
+ }
+ /**
+ * Check validity of pad when using dimRoundingMode.
+ * @param opDesc A string of op description
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid` output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @throws unknown padding parameter
+ */
+ function checkPadOnDimRoundingMode(opDesc, pad, dimRoundingMode) {
+ if (dimRoundingMode != null) {
+ if (typeof pad === 'string') {
+ throw Error("Error in " + opDesc + ": pad must be an integer when using " +
+ ("dimRoundingMode " + dimRoundingMode + " but got pad " + pad + "."));
+ }
+ else if (typeof pad === 'number') {
+ assert(isInt(pad), function () { return "Error in " + opDesc + ": pad must be an integer when using " +
+ ("dimRoundingMode " + dimRoundingMode + " but got pad " + pad + "."); });
+ }
+ else if (typeof pad === 'object') {
+ pad.forEach(function (p) {
+ p.forEach(function (v) {
+ assert(isInt(v), function () { return "Error in " + opDesc + ": pad must be an integer when using " +
+ ("dimRoundingMode " + dimRoundingMode + " but got pad " + v + "."); });
+ });
+ });
+ }
+ else {
+ throw Error("Error in " + opDesc + ": Unknown padding parameter: " + pad);
+ }
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reshapes a `tf.Tensor` to a given shape.
+ *
+ * Given an input tensor, returns a new tensor with the same values as the
+ * input tensor with shape `shape`.
+ *
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component of
+ * shape can be -1.
+ *
+ * If shape is 1-D or higher, then the operation returns a tensor with shape
+ * shape filled with the values of tensor. In this case, the number of
+ * elements implied by shape must be the same as the number of elements in
+ * tensor.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * x.reshape([2, 2]).print();
+ * ```
+ *
+ * @param x The input tensor to be reshaped.
+ * @param shape An array of integers defining the output tensor shape.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function reshape_(x, shape) {
+ var $x = convertToTensor(x, 'x', 'reshape', 'string_or_numeric');
+ var inputs = { x: $x };
+ var attrs = { shape: shape };
+ return ENGINE.runKernel(Reshape, inputs, attrs);
+ }
+ var reshape = op({ reshape_: reshape_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 2D average pooling of an image.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function avgPool_(x, filterSize, strides, pad, dimRoundingMode) {
+ var $x = convertToTensor(x, 'x', 'avgPool', 'float32');
+ var dilations = 1;
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in avgPool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in avgPool: x must be rank 4 but got rank " + x4D.rank + "."; });
+ checkPadOnDimRoundingMode('avgPool', pad, dimRoundingMode);
+ var inputs = { x: x4D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(AvgPool, inputs, attrs);
+ res = cast(res, $x.dtype);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var avgPool = op({ avgPool_: avgPool_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 3D average pooling.
+ *
+ * ```js
+ * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);
+ * const result = tf.avgPool3d(x, 2, 1, 'valid');
+ * result.print();
+ * ```
+ *
+ * @param x The input tensor, of rank 5 or rank 4 of shape
+ * `[batch, depth, height, width, inChannels]`.
+ * @param filterSize The filter size:
+ * `[filterDepth, filterHeight, filterWidth]`.
+ * If `filterSize` is a single number,
+ * then `filterDepth == filterHeight == filterWidth`.
+ * @param strides The strides of the pooling:
+ * `[strideDepth, strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideDepth == strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function avgPool3d_(x, filterSize, strides, pad, dimRoundingMode, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ var $x = convertToTensor(x, 'x', 'avgPool3d', 'float32');
+ var x5D = $x;
+ var reshapedTo5D = false;
+ if ($x.rank === 4) {
+ reshapedTo5D = true;
+ x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);
+ }
+ assert(x5D.rank === 5, function () { return "Error in avgPool3d: x must be rank 5 but got rank " + x5D.rank + "."; });
+ assert(dataFormat === 'NDHWC', function () { return "Error in avgPool3d: Only NDHWC is currently supported, " +
+ ("but got dataFormat of " + dataFormat); });
+ checkPadOnDimRoundingMode('avgPool3d', pad, dimRoundingMode);
+ var inputs = { x: x5D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dataFormat: dataFormat };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(AvgPool3D, inputs, attrs);
+ res = cast(res, x5D.dtype);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var avgPool3d = op({ avgPool3d_: avgPool3d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new tensor with the same values and shape as the specified
+ * tensor.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2]);
+ *
+ * x.clone().print();
+ * ```
+ *
+ * @param x The tensor to clone.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function clone_(x) {
+ var $x = convertToTensor(x, 'x', 'clone', 'string_or_numeric');
+ var inputs = { x: $x };
+ // Note this op is called tf.identity in python. Hence the kernel name used
+ // here.
+ return ENGINE.runKernel(Identity, inputs);
+ }
+ var clone = op({ clone_: clone_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Concatenates a list of `tf.Tensor`s along a given axis.
+ *
+ * The tensors ranks and types must match, and their sizes must match in all
+ * dimensions except `axis`.
+ *
+ * Also available are stricter rank-specific methods that assert that
+ * `tensors` are of the given rank:
+ * - `tf.concat1d`
+ * - `tf.concat2d`
+ * - `tf.concat3d`
+ * - `tf.concat4d`
+ *
+ * Except `tf.concat1d` (which does not have axis param), all methods have
+ * same signature as this method.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * a.concat(b).print(); // or a.concat(b)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.concat([a, b, c]).print();
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 2], [10, 20]]);
+ * const b = tf.tensor2d([[3, 4], [30, 40]]);
+ * const axis = 1;
+ * tf.concat([a, b], axis).print();
+ * ```
+ * @param tensors A list of tensors to concatenate.
+ * @param axis The axis to concate along. Defaults to 0 (the first dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function concat_(tensors, axis) {
+ if (axis === void 0) { axis = 0; }
+ assert(tensors.length >= 1, function () { return 'Pass at least one tensor to concat'; });
+ var $tensors = convertToTensorArray(tensors, 'tensors', 'concat', 'string_or_numeric');
+ if ($tensors[0].dtype === 'complex64') {
+ $tensors.forEach(function (tensor) {
+ if (tensor.dtype !== 'complex64') {
+ throw new Error("Cannot concatenate complex64 tensors with a tensor\n with dtype " + tensor.dtype + ". ");
+ }
+ });
+ }
+ if ($tensors.length === 1) {
+ return clone($tensors[0]);
+ }
+ var inputs = $tensors;
+ var attr = { axis: axis };
+ return ENGINE.runKernel(Concat, inputs, attr);
+ }
+ var concat = op({ concat_: concat_ });
+
+ /**
+ * Computes the dot product of two matrices, A * B. These must be matrices.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2], [1, 2]);
+ * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * a.matMul(b).print(); // or tf.matMul(a, b)
+ * ```
+ * @param a First matrix in dot product operation.
+ * @param b Second matrix in dot product operation.
+ * @param transposeA If true, `a` is transposed before multiplication.
+ * @param transposeB If true, `b` is transposed before multiplication.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function matMul_(a, b, transposeA, transposeB) {
+ var _a;
+ if (transposeA === void 0) { transposeA = false; }
+ if (transposeB === void 0) { transposeB = false; }
+ var $a = convertToTensor(a, 'a', 'matMul');
+ var $b = convertToTensor(b, 'b', 'matMul');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ var attrs = { transposeA: transposeA, transposeB: transposeB };
+ return ENGINE.runKernel(BatchMatMul, inputs, attrs);
+ }
+ var matMul$1 = op({ matMul_: matMul_ });
+
+ /**
+ * Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.
+ *
+ * We also expose `tf.mulStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([2, 3, 4, 5]);
+ *
+ * a.mul(b).print(); // or tf.mul(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast mul a with b.
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.scalar(5);
+ *
+ * a.mul(b).print(); // or tf.mul(a, b)
+ * ```
+ * @param a The first tensor to multiply.
+ * @param b The second tensor to multiply. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function mul_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'mul');
+ var $b = convertToTensor(b, 'b', 'mul');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Multiply, inputs);
+ }
+ var mul = op({ mul_: mul_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes sigmoid element-wise, `1 / (1 + exp(-x))`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, -1, 2, -3]);
+ *
+ * x.sigmoid().print(); // or tf.sigmoid(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sigmoid_(x) {
+ var $x = convertToTensor(x, 'x', 'sigmoid', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sigmoid, inputs);
+ }
+ var sigmoid = op({ sigmoid_: sigmoid_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a slice from a `tf.Tensor` starting at coordinates `begin`
+ * and is of size `size`.
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that `x` is of the given rank:
+ * - `tf.slice1d`
+ * - `tf.slice2d`
+ * - `tf.slice3d`
+ * - `tf.slice4d`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.slice([1], [2]).print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * x.slice([1, 0], [1, 2]).print();
+ * ```
+ * @param x The input `tf.Tensor` to slice from.
+ * @param begin The coordinates to start the slice from. The length can be
+ * less than the rank of x - the rest of the axes will have implicit 0 as
+ * start. Can also be a single number, in which case it specifies the
+ * first axis.
+ * @param size The size of the slice. The length can be less than the rank of
+ * x - the rest of the axes will have implicit -1. A value of -1 requests
+ * the rest of the dimensions in the axis. Can also be a single number,
+ * in which case it specifies the size of the first axis.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function slice_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice', 'string_or_numeric');
+ if ($x.rank === 0) {
+ throw new Error('Slicing scalar is not possible');
+ }
+ var inputs = { x: $x };
+ var attrs = { begin: begin, size: size };
+ return ENGINE.runKernel(Slice, inputs, attrs);
+ }
+ var slice = op({ slice_: slice_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes hyperbolic tangent of the input `tf.Tensor` element-wise: `tanh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, 70]);
+ *
+ * x.tanh().print(); // or tf.tanh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function tanh_(x) {
+ var $x = convertToTensor(x, 'x', 'tanh', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Tanh, inputs);
+ }
+ var tanh = op({ tanh_: tanh_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the next state and output of a BasicLSTMCell.
+ *
+ * Returns `[newC, newH]`.
+ *
+ * Derived from tf.contrib.rnn.BasicLSTMCell.
+ *
+ * @param forgetBias Forget bias for the cell.
+ * @param lstmKernel The weights for the cell.
+ * @param lstmBias The bias for the cell.
+ * @param data The input to the cell.
+ * @param c Previous cell state.
+ * @param h Previous cell output.
+ *
+ * @doc {heading: 'Operations', subheading: 'RNN'}
+ */
+ function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) {
+ var $forgetBias = convertToTensor(forgetBias, 'forgetBias', 'basicLSTMCell');
+ var $lstmKernel = convertToTensor(lstmKernel, 'lstmKernel', 'basicLSTMCell');
+ var $lstmBias = convertToTensor(lstmBias, 'lstmBias', 'basicLSTMCell');
+ var $data = convertToTensor(data, 'data', 'basicLSTMCell');
+ var $c = convertToTensor(c, 'c', 'basicLSTMCell');
+ var $h = convertToTensor(h, 'h', 'basicLSTMCell');
+ var combined = concat([$data, $h], 1);
+ var weighted = matMul$1(combined, $lstmKernel);
+ var res = add(weighted, $lstmBias);
+ // i = input_gate, j = new_input, f = forget_gate, o = output_gate
+ var batchSize = res.shape[0];
+ var sliceCols = res.shape[1] / 4;
+ var sliceSize = [batchSize, sliceCols];
+ var i = slice(res, [0, 0], sliceSize);
+ var j = slice(res, [0, sliceCols], sliceSize);
+ var f = slice(res, [0, sliceCols * 2], sliceSize);
+ var o = slice(res, [0, sliceCols * 3], sliceSize);
+ var newC = add(mul(sigmoid(i), tanh(j)), mul($c, sigmoid(add($forgetBias, f))));
+ var newH = mul(tanh(newC), sigmoid(o));
+ return [newC, newH];
+ }
+ var basicLSTMCell = op({ basicLSTMCell_: basicLSTMCell_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
+ * shape `blockShape + [batch]`, interleaves these blocks back into the grid
+ * defined by the spatial dimensions `[1, ..., M]`, to obtain a result with
+ * the same rank as the input. The spatial dimensions of this intermediate
+ * result are then optionally cropped according to `crops` to produce the
+ * output. This is the reverse of `tf.spaceToBatchND`. See below for a precise
+ * description.
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]);
+ * const blockShape = [2, 2];
+ * const crops = [[0, 0], [0, 0]];
+ *
+ * x.batchToSpaceND(blockShape, crops).print();
+ * ```
+ *
+ * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
+ * remainingShape`, where spatialShape has `M` dimensions.
+ * @param blockShape A 1-D array. Must have shape `[M]`, all values must
+ * be >= 1.
+ * @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0.
+ * `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input
+ * dimension `i + 1`, which corresponds to spatial dimension `i`. It is required
+ * that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]`
+ *
+ * This operation is equivalent to the following steps:
+ *
+ * 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ...,
+ * blockShape[M-1], batch / prod(blockShape), x.shape[1], ...,
+ * x.shape[N-1]]`
+ *
+ * 2. Permute dimensions of `reshaped`to produce `permuted` of shape `[batch /
+ * prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M],
+ * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch /
+ * prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] *
+ * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted`
+ * according to `crops` to produce the output of shape: `[batch /
+ * prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1],
+ * ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] -
+ * crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function batchToSpaceND_(x, blockShape, crops) {
+ var $x = convertToTensor(x, 'x', 'batchToSpaceND');
+ var prod = blockShape.reduce(function (a, b) { return a * b; });
+ assert($x.rank >= 1 + blockShape.length, function () { return "input rank is " + $x.rank + " but should be > than blockShape.length " + blockShape.length; });
+ assert(crops.length === blockShape.length, function () { return "crops.length is " + crops.length + " but should be equal to blockShape.length " + blockShape.length; });
+ assert($x.shape[0] % prod === 0, function () { return "input tensor batch is " + $x.shape[0] + " but is not divisible by the product of " +
+ ("the elements of blockShape " + blockShape.join(' * ') + " === " + prod); });
+ var inputs = { x: $x };
+ var attrs = { blockShape: blockShape, crops: crops };
+ return ENGINE.runKernel(BatchToSpaceND, inputs, attrs);
+ }
+ var batchToSpaceND = op({ batchToSpaceND_: batchToSpaceND_ });
+
+ function xAs4D(x) {
+ var x4D;
+ if (x.rank === 0 || x.rank === 1) {
+ x4D = reshape(x, [1, 1, 1, x.size]);
+ }
+ else if (x.rank === 2) {
+ x4D = reshape(x, [1, 1, x.shape[0], x.shape[1]]);
+ }
+ else if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ else {
+ x4D = x;
+ }
+ return x4D;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Batch normalization.
+ *
+ * As described in
+ * [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167).
+ *
+ * Mean, variance, scale, and offset can be of two shapes:
+ * - The same shape as the input.
+ * - In the common case, the depth dimension is the last dimension of x, so
+ * the values would be an `tf.Tensor1D` of shape [depth].
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that parameters passed are of given rank
+ * - `tf.batchNorm2d`
+ * - `tf.batchNorm3d`
+ * - `tf.batchNorm4d`
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function batchNorm_(x, mean, variance, offset, scale, varianceEpsilon) {
+ if (varianceEpsilon == null) {
+ varianceEpsilon = 0.001;
+ }
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($mean.rank === $variance.rank, function () { return 'Batch normalization gradient requires mean and variance to have ' +
+ 'equal ranks.'; });
+ assert($offset == null || $mean.rank === $offset.rank, function () { return 'Batch normalization gradient requires mean and offset to have ' +
+ 'equal ranks.'; });
+ assert($scale == null || $mean.rank === $scale.rank, function () { return 'Batch normalization gradient requires mean and scale to have ' +
+ 'equal ranks.'; });
+ var x4D = xAs4D($x);
+ var inputs = {
+ x: x4D,
+ scale: $scale,
+ offset: $offset,
+ mean: $mean,
+ variance: $variance
+ };
+ var attrs = { varianceEpsilon: varianceEpsilon };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(FusedBatchNorm, inputs, attrs);
+ return reshape(res, $x.shape);
+ }
+ var batchNorm = op({ batchNorm_: batchNorm_ });
+
+ /**
+ * Batch normalization, strictly for 2D. For the more relaxed version, see
+ * `tf.batchNorm`.
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ */
+ function batchNorm2d_(x, mean, variance, offset, scale, varianceEpsilon) {
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($x.rank === 2, function () { return "Error in batchNorm2D: x must be rank 2 but got rank " +
+ ($x.rank + "."); });
+ assert($mean.rank === 2 || $mean.rank === 1, function () { return "Error in batchNorm2D: mean must be rank 2 or rank 1 but " +
+ ("got rank " + $mean.rank + "."); });
+ assert($variance.rank === 2 || $variance.rank === 1, function () { return "Error in batchNorm2D: variance must be rank 2 or rank 1 " +
+ ("but got rank " + $variance.rank + "."); });
+ if ($scale != null) {
+ assert($scale.rank === 2 || $scale.rank === 1, function () { return "Error in batchNorm2D: scale must be rank 2 or rank 1 " +
+ ("but got rank " + $scale.rank + "."); });
+ }
+ if ($offset != null) {
+ assert($offset.rank === 2 || $offset.rank === 1, function () { return "Error in batchNorm2D: offset must be rank 2 or rank 1 " +
+ ("but got rank " + $offset.rank + "."); });
+ }
+ return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);
+ }
+ var batchNorm2d = op({ batchNorm2d_: batchNorm2d_ });
+
+ /**
+ * Batch normalization, strictly for 3D. For the more relaxed version, see
+ * `tf.batchNorm`.
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ */
+ function batchNorm3d_(x, mean, variance, offset, scale, varianceEpsilon) {
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($x.rank === 3, function () { return "Error in batchNorm3D: x must be rank 3 but got rank " +
+ ($x.rank + "."); });
+ assert($mean.rank === 3 || $mean.rank === 1, function () { return "Error in batchNorm3D: mean must be rank 3 or rank 1 but " +
+ ("got rank " + $mean.rank + "."); });
+ assert($variance.rank === 3 || $variance.rank === 1, function () { return "Error in batchNorm3D: variance must be rank 3 or rank 1 " +
+ ("but got rank " + $variance.rank + "."); });
+ if ($scale != null) {
+ assert($scale.rank === 3 || $scale.rank === 1, function () { return "Error in batchNorm3D: scale must be rank 3 or rank 1 " +
+ ("but got rank " + $scale.rank + "."); });
+ }
+ if ($offset != null) {
+ assert($offset.rank === 3 || $offset.rank === 1, function () { return "Error in batchNorm3D: offset must be rank 3 or rank 1 " +
+ ("but got rank " + $offset.rank + "."); });
+ }
+ return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);
+ }
+ var batchNorm3d = op({ batchNorm3d_: batchNorm3d_ });
+
+ /**
+ * Batch normalization, strictly for 4D. For the more relaxed version, see
+ * `tf.batchNorm`.
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ */
+ function batchNorm4d_(x, mean, variance, offset, scale, varianceEpsilon) {
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($x.rank === 4, function () { return "Error in batchNorm4D: x must be rank 4 but got rank " +
+ ($x.rank + "."); });
+ assert($mean.rank === 4 || $mean.rank === 1, function () { return "Error in batchNorm4D: mean must be rank 4 or rank 1 but " +
+ ("got rank " + $mean.rank + "."); });
+ assert($variance.rank === 4 || $variance.rank === 1, function () { return "Error in batchNorm4D: variance must be rank 4 or rank 1 " +
+ ("but got rank " + $variance.rank + "."); });
+ if ($scale != null) {
+ assert($scale.rank === 4 || $scale.rank === 1, function () { return "Error in batchNorm4D: scale must be rank 4 or rank 1 " +
+ ("but got rank " + $scale.rank + "."); });
+ }
+ if ($offset != null) {
+ assert($offset.rank === 4 || $offset.rank === 1, function () { return "Error in batchNorm4D: offset must be rank 4 or rank 1 " +
+ ("but got rank " + $offset.rank + "."); });
+ }
+ return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);
+ }
+ var batchNorm4d = op({ batchNorm4d_: batchNorm4d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Outputs a vector with length `size` and the same dtype as `weights`.
+ *
+ * If `weights` are empty, then index `i` stores the number of times the value
+ * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the
+ * sum of the value in `weights` at each index where the corresponding value in
+ * `x` is `i`.
+ *
+ * Values in `x` outside of the range [0, size) are ignored.
+ *
+ * @param x The input int tensor, rank 1.
+ * @param weights The weights tensor, must have the same shape as x, or a
+ * length-0 Tensor, in which case it acts as all weights equal to 1.
+ * @param size Non-negative integer.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function bincount_(x, weights, size) {
+ var $x = convertToTensor(x, 'x', 'bincount');
+ var $weights = convertToTensor(weights, 'weights', 'bincount');
+ assert($x.dtype === 'int32', function () { return "Error in bincount: input " +
+ ("dtype must be int32, but got " + $x.dtype); });
+ assert(size >= 0, function () { return "size must be non-negative, but got " + size + "."; });
+ assert($weights.size === $x.size || $weights.size === 0, function () { return "Error in bincount: weights must have the same size as input or" +
+ ("0-length, but got input shape: " + $x.shape + ", weights shape: ") +
+ ($weights.shape + "."); });
+ var inputs = { x: $x, weights: $weights };
+ var attrs = { size: size };
+ return ENGINE.runKernel(Bincount, inputs, attrs);
+ }
+ var bincount = op({ bincount_: bincount_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Return the shape of s0 op s1 with broadcast.
+ *
+ * compute r0, the broadcasted shape as a tensor.
+ * s0, s1 and r0 are all integer vectors.
+ *
+ * This function returns the shape of the result of an operation between
+ * two tensors of size s0 and s1 performed with broadcast.
+ *
+ * @param s0 A tensor representing a shape
+ * @param s1 A tensor representing a shape
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function broadcastArgs_(s0, s1) {
+ var shape1Input = convertToTensor(s0, 's0', 'broadcastArgs', 'int32');
+ var shape2Input = convertToTensor(s1, 's1', 'broadcastArgs', 'int32');
+ if (shape1Input.rank !== 1) {
+ throw new Error('broadcastArgs(): first input must be a vector (rank=1). ' +
+ ("Has rank " + shape1Input.rank));
+ }
+ if (shape2Input.rank !== 1) {
+ throw new Error('broadcastArgs(): second input must be a vector (rank=1). ' +
+ ("Has rank " + shape2Input.rank));
+ }
+ var inputs = { s0: shape1Input, s1: shape2Input };
+ return ENGINE.runKernel(BroadcastArgs, inputs);
+ }
+ var broadcastArgs = op({ broadcastArgs_: broadcastArgs_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Broadcast an array to a compatible shape NumPy-style.
+ *
+ * The tensor's shape is compared to the broadcast shape from end to beginning.
+ * Ones are prepended to the tensor's shape until is has the same length as
+ * the broadcast shape. If input.shape[i]==shape[i], the (i+1)-th axis is
+ * already broadcast-compatible. If input.shape[i]==1 and shape[i]==N, then
+ * the input tensor is tiled N times along that axis (using tf.tile).
+ *
+ * @param input The tensor that is to be broadcasted.
+ * @param shape The input is to be broadcast to this shape.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function broadcastTo_(x, shape) {
+ var input = convertToTensor(x, 'broadcastTo', 'x');
+ var xShape = input.shape;
+ if (shape.some(function (d) { return !(d > 0) || d % 1 !== 0; })) {
+ throw new Error("broadcastTo(): Invalid broadcast shape [" + shape + "].");
+ }
+ if (shape.length < input.rank) {
+ throw new Error("broadcastTo(): shape.length=" + shape.length + " < input.rank=" + input.rank + ".");
+ }
+ if (shape.length > input.rank) {
+ var newShape = input.shape.slice();
+ while (newShape.length < shape.length) {
+ newShape.unshift(1);
+ }
+ input = reshape(input, newShape);
+ }
+ var inputShape = input.shape;
+ var reps = Array.from(shape);
+ for (var i = shape.length - 1; i >= 0; i--) {
+ if (inputShape[i] === shape[i]) {
+ reps[i] = 1;
+ }
+ else if (input.shape[i] !== 1) {
+ throw new Error("broadcastTo(): [" + xShape + "] cannot be broadcast to [" + shape + "].");
+ }
+ }
+ var axes = reps.map(function (n, i) { return n > 1 ? i : -1; }).filter(function (i) { return i >= 0; });
+ if (axes.length === 0) {
+ return clone(input);
+ }
+ // TODO call broadcastTo kernel directly once backends implement broadcstTo
+ var inputs = { x: input };
+ var attrs = { reps: reps };
+ return ENGINE.runKernel(Tile, inputs, attrs);
+ }
+ var broadcastTo = op({ broadcastTo_: broadcastTo_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.
+ *
+ * The values are stored in CPU as `TypedArray`. Fill the buffer using
+ * `buffer.set()`, or by modifying directly `buffer.values`.
+ *
+ * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with
+ * those values.
+ *
+ * ```js
+ * // Create a buffer and set values at particular indices.
+ * const buffer = tf.buffer([2, 2]);
+ * buffer.set(3, 0, 0);
+ * buffer.set(5, 1, 0);
+ *
+ * // Convert the buffer back to a tensor.
+ * buffer.toTensor().print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The dtype of the buffer. Defaults to 'float32'.
+ * @param values The values of the buffer as `TypedArray`. Defaults to
+ * zeros.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function buffer(shape, dtype, values) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ dtype = dtype || 'float32';
+ assertNonNegativeIntegerDimensions(shape);
+ return new TensorBuffer(shape, dtype, values);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.ceil().print(); // or tf.ceil(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function ceil_(x) {
+ var $x = convertToTensor(x, 'x', 'ceil', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Ceil, inputs);
+ }
+ var ceil = op({ ceil_: ceil_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
+ * ```
+ * @param x The input tensor.
+ * @param clipValueMin Lower-bound of range to be clipped to.
+ * @param clipValueMax Upper-bound of range to be clipped to.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function clipByValue_(x, clipValueMin, clipValueMax) {
+ var $x = convertToTensor(x, 'x', 'clipByValue');
+ assert((clipValueMin <= clipValueMax), function () { return "Error in clip: min (" + clipValueMin + ") must be " +
+ ("less than or equal to max (" + clipValueMax + ")."); });
+ var inputs = { x: $x };
+ var attrs = { clipValueMin: clipValueMin, clipValueMax: clipValueMax };
+ return ENGINE.runKernel(ClipByValue, inputs, attrs);
+ }
+ var clipByValue = op({ clipByValue_: clipByValue_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts two real numbers to a complex number.
+ *
+ * Given a tensor `real` representing the real part of a complex number, and a
+ * tensor `imag` representing the imaginary part of a complex number, this
+ * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],
+ * where r represents the real part and i represents the imag part.
+ *
+ * The input tensors real and imag must have the same shape.
+ *
+ * ```js
+ * const real = tf.tensor1d([2.25, 3.25]);
+ * const imag = tf.tensor1d([4.75, 5.75]);
+ * const complex = tf.complex(real, imag);
+ *
+ * complex.print();
+ * ```
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function complex_(real, imag) {
+ var $real = convertToTensor(real, 'real', 'complex');
+ var $imag = convertToTensor(imag, 'imag', 'complex');
+ assertShapesMatch($real.shape, $imag.shape, "real and imag shapes, " + $real.shape + " and " + $imag.shape + ", " +
+ "must match in call to tf.complex().");
+ var inputs = { real: $real, imag: $imag };
+ return ENGINE.runKernel(Complex, inputs);
+ }
+ var complex = op({ complex_: complex_ });
+
+ /**
+ * Concatenates a list of`tf.Tensor1D`s along an axis. See `concat` for details.
+ *
+ * For example, if:
+ * A: shape(3) = |r1, g1, b1|
+ * B: shape(2) = |r2, g2|
+ * C = tf.concat1d([A, B]) == |r1, g1, b1, r2, g2|
+ *
+ * @param tensors A list of`tf.Tensor`s to concatenate.
+ * @return The concatenated array.
+ */
+ function concat1d_(tensors) {
+ return concat(tensors, 0 /* axis */);
+ }
+ var concat1d = op({ concat1d_: concat1d_ });
+
+ /**
+ * Concatenates a list of`tf.Tensor2D`s along an axis. See `concat` for details.
+ *
+ * For example, if:
+ * A: shape(2, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ *
+ * B: shape(2, 3) = | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * C = tf.concat2d([A, B], axis)
+ *
+ * if axis = 0:
+ * C: shape(4, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ * | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * if axis = 1:
+ * C = shape(2, 6) = | r1, g1, b1, r3, g3, b3 |
+ * | r2, g2, b2, r4, g4, b4 |
+ *
+ *
+ * @param tensors A list of `tf.Tensor`s to concatenate.
+ * @param axis The axis to concatenate along.
+ * @return The concatenated array.
+ */
+ function concat2d_(tensors, axis) {
+ return concat(tensors, axis);
+ }
+ var concat2d = op({ concat2d_: concat2d_ });
+
+ /**
+ * Concatenates a list of `tf.Tensor3D`s along an axis.
+ * See `concat` for details.
+ *
+ * For example, if:
+ * A: shape(2, 1, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ *
+ * B: shape(2, 1, 3) = | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * C = tf.concat3d([A, B], axis)
+ *
+ * if axis = 0:
+ * C: shape(4, 1, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ * | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * if axis = 1:
+ * C: shape(2, 2, 3) = | r1, g1, b1, r3, g3, b3 |
+ * | r2, g2, b2, r4, g4, b4 |
+ *
+ * if axis = 2:
+ * C = shape(2, 1, 6) = | r1, g1, b1, r3, g3, b3 |
+ * | r2, g2, b2, r4, g4, b4 |
+ *
+ * @param tensors A list of`tf.Tensor`s to concatenate.
+ * @param axis The axis to concate along.
+ * @return The concatenated array.
+ */
+ function concat3d_(tensors, axis) {
+ return concat(tensors, axis);
+ }
+ var concat3d = op({ concat3d_: concat3d_ });
+
+ /**
+ * Concatenates a list of `tf.Tensor4D`s along an axis.
+ * See `concat` for details.
+ *
+ * @param tensors A list of `tf.Tensor`s to concatenate.
+ * @param axis The axis to concate along.
+ * @return The concatenated array.
+ */
+ function concat4d_(tensors, axis) {
+ return concat(tensors, axis);
+ }
+ var concat4d = op({ concat4d_: concat4d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes a 2D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv2d_(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var $x = convertToTensor(x, 'x', 'conv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in conv2d: input must be rank 4, but got rank " + x4D.rank + "."; });
+ assert($filter.rank === 4, function () { return "Error in conv2d: filter must be rank 4, but got rank " +
+ ($filter.rank + "."); });
+ checkPadOnDimRoundingMode('conv2d', pad, dimRoundingMode);
+ var inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];
+ assert(inDepth === $filter.shape[2], function () { return "Error in conv2d: depth of input (" + inDepth + ") must match " +
+ ("input depth for filter " + $filter.shape[2] + "."); });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in conv2D: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var inputs = { x: x4D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv2D, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var conv2d$1 = op({ conv2d_: conv2d_ });
+
+ /**
+ * Computes a 1D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 3 or rank 2, of shape
+ * `[batch, width, inChannels]`. If rank 2, batch of 1 is assumed.
+ * @param filter The filter, rank 3, of shape
+ * `[filterWidth, inDepth, outDepth]`.
+ * @param stride The number of entries by which the filter is moved right at
+ * each step.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat An optional string from "NWC", "NCW". Defaults to "NWC",
+ * the data is stored in the order of [batch, in_width, in_channels]. Only
+ * "NWC" is currently supported.
+ * @param dilation The dilation rate in which we sample input values in
+ * atrous convolution. Defaults to `1`. If it is greater than 1, then
+ * stride must be `1`.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv1d_(x, filter, stride, pad, dataFormat, dilation, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NWC'; }
+ if (dilation === void 0) { dilation = 1; }
+ var $x = convertToTensor(x, 'x', 'conv1d');
+ var $filter = convertToTensor(filter, 'filter', 'conv1d');
+ var x3D = $x;
+ var reshapedTo3D = false;
+ if ($x.rank === 2) {
+ reshapedTo3D = true;
+ x3D = reshape($x, [1, $x.shape[0], $x.shape[1]]);
+ }
+ assert(x3D.rank === 3, function () { return "Error in conv1d: input must be rank 3, but got rank " + x3D.rank + "."; });
+ assert($filter.rank === 3, function () { return "Error in conv1d: filter must be rank 3, but got rank " +
+ ($filter.rank + "."); });
+ checkPadOnDimRoundingMode('conv1d', pad, dimRoundingMode);
+ assert(x3D.shape[2] === $filter.shape[1], function () { return "Error in conv1d: depth of input (" + x3D.shape[2] + ") must match " +
+ ("input depth for filter " + $filter.shape[1] + "."); });
+ assert(eitherStridesOrDilationsAreOne(stride, dilation), function () { return 'Error in conv1D: Either stride or dilation must be 1. ' +
+ ("Got stride " + stride + " and dilation '" + dilation + "'"); });
+ assert(dataFormat === 'NWC', function () { return "Error in conv1d: got dataFormat of " + dataFormat + " but only NWC is currently supported."; });
+ var filter4D = reshape($filter, [1, $filter.shape[0], $filter.shape[1], $filter.shape[2]]);
+ var input4D = reshape(x3D, [x3D.shape[0], 1, x3D.shape[1], x3D.shape[2]]);
+ var strides = [1, stride];
+ var dilations = [1, dilation];
+ var conv2dDataFormat = 'NHWC';
+ var res = conv2d$1(input4D, filter4D, strides, pad, conv2dDataFormat, dilations, dimRoundingMode);
+ if (reshapedTo3D) {
+ return reshape(res, [res.shape[2], res.shape[3]]);
+ }
+ return reshape(res, [res.shape[0], res.shape[2], res.shape[3]]);
+ }
+ var conv1d = op({ conv1d_: conv1d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the derivative of the input of a 2D convolution.
+ *
+ * @param xShape The shape of the input: [batch, height, width, inDepth].
+ * If length of 3, batch of 1 is assumed.
+ * @param dy The derivative of the output, of rank 4 or rank 3 of shape
+ * `[batch, outHeight, outWidth, outDepth]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm used:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function conv2DBackpropInput_(xShape, dy, filter, strides, pad, dataFormat, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ assert(xShape.length === dy.rank, function () { return "Length of inShape " +
+ ("(" + xShape.length + ") and rank of dy (" + dy.rank + ") must match"); });
+ var xShape4D = xShape;
+ var dy4D = dy;
+ var reshapedTo4D = false;
+ if (dy.rank === 3) {
+ reshapedTo4D = true;
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ xShape4D = [1, xShape[0], xShape[1], xShape[2]];
+ }
+ assert(xShape4D.length === 4, function () { return "Error in conv2dDerInput: inShape must be length 4, but got length " +
+ (xShape4D.length + "."); });
+ assert(dy4D.rank === 4, function () { return "Error in conv2dDerInput: dy must be rank 4, but got " +
+ ("rank " + dy4D.rank); });
+ assert(filter.rank === 4, function () { return "Error in conv2dDerInput: filter must be rank 4, but got " +
+ ("rank " + filter.rank); });
+ var inDepth = dataFormat === 'NHWC' ? xShape4D[3] : xShape4D[1];
+ var outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1];
+ assert(inDepth === filter.shape[2], function () { return "Error in conv2dDerInput: depth of input (" + inDepth + ") must " +
+ ("match input depth for filter " + filter.shape[2] + "."); });
+ assert(outDepth === filter.shape[3], function () { return "Error in conv2dDerInput: depth of output (" + outDepth + ") must " +
+ ("match output depth for filter " + filter.shape[3] + "."); });
+ checkPadOnDimRoundingMode('conv2dDerInput', pad, dimRoundingMode);
+ var inputs = { dy: dy4D, filter: filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dimRoundingMode: dimRoundingMode, inputShape: xShape4D };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv2DBackpropInput, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var conv2DBackpropInput = op({ conv2DBackpropInput_: conv2DBackpropInput_ });
+
+ /**
+ * Computes the transposed 2D convolution of an image, also known as a
+ * deconvolution.
+ *
+ * @param x The input image, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inDepth]`. If rank 3, batch of 1 is assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, outDepth, inDepth]`.
+ * `inDepth` must match `inDepth` in `x`.
+ * @param outputShape Output shape, of rank 4 or rank 3:
+ * `[batch, height, width, outDepth]`. If rank 3, batch of 1 is assumed.
+ * @param strides The strides of the original convolution:
+ * `[strideHeight, strideWidth]`.
+ * @param pad The type of padding algorithm used in the non-transpose version
+ * of the op.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv2dTranspose_(x, filter, outputShape, strides, pad, dimRoundingMode) {
+ var $x = convertToTensor(x, 'x', 'conv2dTranspose');
+ var $filter = convertToTensor(filter, 'filter', 'conv2dTranspose');
+ return conv2DBackpropInput(outputShape, $x, $filter, strides, pad, 'NHWC', dimRoundingMode);
+ }
+ var conv2dTranspose = op({ conv2dTranspose_: conv2dTranspose_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes a 3D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 5 or rank 4, of shape
+ * `[batch, depth, height, width, channels]`. If rank 4,
+ * batch of 1 is assumed.
+ * @param filter The filter, rank 5, of shape
+ * `[filterDepth, filterHeight, filterWidth, inChannels, outChannels]`.
+ * inChannels must match between input and filter.
+ * @param strides The strides of the convolution: `[strideDepth, strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat: An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ * @param dilations The dilation rates: `[dilationDepth, dilationHeight,
+ * dilationWidth]` in which we sample input values across the height
+ * and width dimensions in atrous convolution. Defaults to `[1, 1, 1]`.
+ * If `dilations` is a single number, then
+ * `dilationDepth == dilationHeight == dilationWidth`. If it is greater
+ * than 1, then all values of `strides` must be 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv3d_(x, filter, strides, pad, dataFormat, dilations) {
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ if (dilations === void 0) { dilations = [1, 1, 1]; }
+ var $x = convertToTensor(x, 'x', 'conv3d');
+ var $filter = convertToTensor(filter, 'filter', 'conv3d');
+ var x5D = $x;
+ var reshapedTo5D = false;
+ if ($x.rank === 4) {
+ reshapedTo5D = true;
+ x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);
+ }
+ assert(x5D.rank === 5, function () { return "Error in conv3d: input must be rank 5, but got rank " + x5D.rank + "."; });
+ assert($filter.rank === 5, function () { return "Error in conv3d: filter must be rank 5, but got rank " +
+ ($filter.rank + "."); });
+ assert(x5D.shape[4] === $filter.shape[3], function () { return "Error in conv3d: depth of input (" + x5D.shape[4] + ") must match " +
+ ("input depth for filter " + $filter.shape[3] + "."); });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in conv3D: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ assert(dataFormat === 'NDHWC', function () { return "Error in conv3d: got dataFormat of " + dataFormat + " but only NDHWC is currently supported."; });
+ var inputs = { x: x5D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv3D, inputs, attrs);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var conv3d = op({ conv3d_: conv3d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the derivative of the input of a 3D convolution.
+ *
+ * @param xShape The shape of the input: [batch, depth, height, width,
+ * in_channels]. If length of 4, batch of 1 is assumed.
+ * @param dy The derivative of the output, of rank 5 or rank 4 of shape
+ * `[batch, outDepth, outHeight, outWidth, in_channels]`.
+ * If rank 4, batch of 1 is assumed.
+ * @param filter The filter, rank 5, of shape
+ * `[filterDepth, filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideDepth, strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm used:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ */
+ function conv3DBackpropInput_(xShape, dy, filter, strides, pad) {
+ assert(xShape.length === dy.rank, function () { return "Length of inShape " +
+ ("(" + xShape.length + ") and rank of dy (" + dy.rank + ") must match"); });
+ var xShape5D = xShape;
+ var dy5D = dy;
+ var reshapedTo5D = false;
+ if (dy.rank === 4) {
+ reshapedTo5D = true;
+ dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);
+ xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]];
+ }
+ var inDepth = xShape5D[4];
+ var outDepth = dy5D.shape[4];
+ assert(xShape5D.length === 5, function () { return "Error in conv3dDerInput: inShape must be length 5, but got length " +
+ (xShape5D.length + "."); });
+ assert(dy5D.rank === 5, function () { return "Error in conv3dDerInput: dy must be rank 5, but got " +
+ ("rank " + dy5D.rank); });
+ assert(filter.rank === 5, function () { return "Error in conv3dDerInput: filter must be rank 5, but got " +
+ ("rank " + filter.rank); });
+ assert(inDepth === filter.shape[3], function () { return "Error in conv3dDerInput: depth of input (" + inDepth + ") must " +
+ ("match input depth for filter " + filter.shape[3] + "."); });
+ assert(outDepth === filter.shape[4], function () { return "Error in conv3dDerInput: depth of output (" + outDepth + ") must " +
+ ("match output depth for filter " + filter.shape[4] + "."); });
+ var inputs = { dy: dy5D, filter: filter };
+ var attrs = { pad: pad, strides: strides, inputShape: xShape5D };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv3DBackpropInputV2, inputs, attrs);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var conv3DBackpropInput = op({ conv3DBackpropInput_: conv3DBackpropInput_ });
+
+ /**
+ * Computes the transposed 3D convolution of a volume, also known as a
+ * deconvolution.
+ *
+ * @param x The input image, of rank 5 or rank 4, of shape
+ * `[batch, depth, height, width, inDepth]`. If rank 4, batch of 1 is assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[depth, filterHeight, filterWidth, outDepth, inDepth]`.
+ * `inDepth` must match `inDepth` in `x`.
+ * @param outputShape Output shape, of rank 5 or rank 4:
+ * `[batch, depth, height, width, outDepth]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param strides The strides of the original convolution:
+ * `[strideDepth, strideHeight, strideWidth]`.
+ * @param pad The type of padding algorithm used in the non-transpose version
+ * of the op.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv3dTranspose_(x, filter, outputShape, strides, pad) {
+ var $x = convertToTensor(x, 'x', 'conv3dTranspose');
+ var $filter = convertToTensor(filter, 'filter', 'conv3dTranspose');
+ return conv3DBackpropInput(outputShape, $x, $filter, strides, pad);
+ }
+ var conv3dTranspose = op({ conv3dTranspose_: conv3dTranspose_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes cos of the input `tf.Tensor` element-wise: `cos(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.cos().print(); // or tf.cos(x)
+ * ```
+ * @param x The input tensor. Must be float32 type.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function cos_(x) {
+ var $x = convertToTensor(x, 'x', 'cos', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Cos, inputs);
+ }
+ var cos = op({ cos_: cos_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes hyperbolic cos of the input `tf.Tensor` element-wise: `cosh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.cosh().print(); // or tf.cosh(x)
+ * ```
+ * @param x The input tensor. Must be float32 type.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function cosh_(x) {
+ var $x = convertToTensor(x, 'x', 'cosh', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Cosh, inputs);
+ }
+ var cosh = op({ cosh_: cosh_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the cumulative sum of a `tf.Tensor` along `axis`.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2, 3, 4]);
+ * x.cumsum().print();
+ * ```
+ * ```js
+ * const x = tf.tensor([[1, 2], [3, 4]]);
+ * x.cumsum().print();
+ * ```
+ *
+ * @param x The input tensor to be summed.
+ * @param axis The axis along which to sum. Optional. Defaults to 0.
+ * @param exclusive Whether to perform exclusive cumulative sum. Optional.
+ * Defaults to false. If set to true then the sum of each tensor entry
+ * does not include its own value, but only the values previous to it
+ * along the specified axis.
+ * @param reverse Whether to sum in the opposite direction. Optional.
+ * Defaults to false.
+ *
+ * @doc {heading: 'Operations', subheading: 'Scan'}
+ */
+ function cumsum_(x, axis, exclusive, reverse) {
+ if (axis === void 0) { axis = 0; }
+ if (exclusive === void 0) { exclusive = false; }
+ if (reverse === void 0) { reverse = false; }
+ var $x = convertToTensor(x, 'x', 'cumsum');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, exclusive: exclusive, reverse: reverse };
+ return ENGINE.runKernel(Cumsum, inputs, attrs);
+ }
+ var cumsum = op({ cumsum_: cumsum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Outputs a vector with length `size` and the same dtype as `weights`.
+ *
+ * If `weights` are empty, then index `i` stores the number of times the value
+ * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the
+ * sum of the value in `weights` at each index where the corresponding value in
+ * `x` is `i`.
+ *
+ * Values in `x` outside of the range [0, size) are ignored.
+ *
+ * @param x The input int tensor, rank 1 or rank 2.
+ * @param weights The weights tensor, must have the same shape as x, or a
+ * length-0 Tensor, in which case it acts as all weights equal to 1.
+ * @param size Non-negative integer.
+ * @param binaryOutput Optional. Whether the kernel should count the appearance
+ * or number of occurrences. Defaults to False.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function denseBincount_(x, weights, size, binaryOutput) {
+ if (binaryOutput === void 0) { binaryOutput = false; }
+ var $x = convertToTensor(x, 'x', 'denseBincount');
+ var $weights = convertToTensor(weights, 'weights', 'denseBincount');
+ assert($x.dtype === 'int32', function () { return "Error in denseBincount: input " +
+ ("dtype must be int32, but got " + $x.dtype); });
+ assert($x.rank <= 2, function () { return "Error in denseBincount: input must be at most rank 2, but got " +
+ ("rank " + $x.rank + "."); });
+ assert(size >= 0, function () { return "size must be non-negative, but got " + size + "."; });
+ assert($weights.size === $x.size || $weights.size === 0, function () { return "Error in denseBincount: weights must have the same shape as x or " +
+ ("0-length, but got x shape: " + $x.shape + ", weights shape: ") +
+ ($weights.shape + "."); });
+ var inputs = { x: $x, weights: $weights };
+ var attrs = { size: size, binaryOutput: binaryOutput };
+ return ENGINE.runKernel(DenseBincount, inputs, attrs);
+ }
+ var denseBincount = op({ denseBincount_: denseBincount_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Rearranges data from depth into blocks of spatial data. More specifically,
+ * this op outputs a copy of the input tensor where values from the `depth`
+ * dimension are moved in spatial blocks to the `height` and `width` dimensions.
+ * The attr `blockSize` indicates the input block size and how the data is
+ * moved.
+ *
+ * - Chunks of data of size `blockSize * blockSize` from depth are rearranged
+ * into non-overlapping blocks of size `blockSize x blockSize`
+ *
+ * - The width the output tensor is `inputWidth * blockSize`, whereas the
+ * height is `inputHeight * blockSize`
+ *
+ * - The Y, X coordinates within each block of the output image are determined
+ * by the high order component of the input channel index
+ *
+ * - The depth of the input tensor must be divisible by `blockSize *
+ * blockSize`
+ *
+ * The `dataFormat` attr specifies the layout of the input and output tensors
+ * with the following options: "NHWC": [ `batch, height, width, channels` ]
+ * "NCHW": [ `batch, channels, height, width` ]
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [1, 1, 1, 4]);
+ * const blockSize = 2;
+ * const dataFormat = "NHWC";
+ *
+ * tf.depthToSpace(x, blockSize, dataFormat).print();
+ * ```
+ *
+ * @param x The input tensor of rank 4
+ * @param blockSIze An `int` that is `>= 2`. The size of the spatial block
+ * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to "NHWC"
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function depthToSpace_(x, blockSize, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var $x = convertToTensor(x, 'x', 'depthToSpace', 'float32');
+ var inputHeight = (dataFormat === 'NHWC') ? $x.shape[1] : $x.shape[2];
+ var inputWidth = (dataFormat === 'NHWC') ? $x.shape[2] : $x.shape[3];
+ var inputDepth = (dataFormat === 'NHWC') ? $x.shape[3] : $x.shape[1];
+ assert(blockSize > 1, function () { return "blockSize should be > 1 for depthToSpace, but was: " + blockSize; });
+ assert(inputHeight * blockSize >= 0, function () { return "Negative dimension size caused by overflow when multiplying\n " + inputHeight + " and " + blockSize + " for depthToSpace with input shape\n " + $x.shape; });
+ assert(inputWidth * blockSize >= 0, function () { return "Negative dimension size caused by overflow when multiplying\n " + inputWidth + " and " + blockSize + " for depthToSpace with input shape\n " + $x.shape; });
+ assert((inputDepth % (blockSize * blockSize) === 0), function () { return "Dimension size must be evenly divisible by " + blockSize * blockSize + " but is " + inputDepth + " for depthToSpace with input shape " + $x.shape; });
+ var inputs = { x: $x };
+ var attrs = { blockSize: blockSize, dataFormat: dataFormat };
+ return ENGINE.runKernel(DepthToSpace, inputs, attrs);
+ }
+ var depthToSpace = op({ depthToSpace_: depthToSpace_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Depthwise 2D convolution.
+ *
+ * Given a 4D `input` array and a `filter` array of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
+ * `inChannels` convolutional filters of depth 1, this op applies a
+ * different filter to each input channel (expanding from 1 channel to
+ * `channelMultiplier` channels for each), then concatenates the results
+ * together. The output has `inChannels * channelMultiplier` channels.
+ *
+ * See
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
+ * for more details.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter tensor, rank 4, of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`. If strides is a single number, then `strideHeight ==
+ * strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function depthwiseConv2d_(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in depthwiseConv2d: input must be rank 4, but got " +
+ ("rank " + x4D.rank + "."); });
+ assert($filter.rank === 4, function () { return "Error in depthwiseConv2d: filter must be rank 4, but got rank " +
+ ($filter.rank + "."); });
+ assert(x4D.shape[3] === $filter.shape[2], function () { return "Error in depthwiseConv2d: number of input channels " +
+ ("(" + x4D.shape[3] + ") must match the inChannels dimension in ") +
+ ("filter " + $filter.shape[2] + "."); });
+ checkPadOnDimRoundingMode('depthwiseConv2d', pad, dimRoundingMode);
+ var inputs = { x: x4D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(DepthwiseConv2dNative, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var depthwiseConv2d$1 = op({ depthwiseConv2d_: depthwiseConv2d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns a diagonal tensor with a given diagonal values.
+ *
+ * Given a diagonal, this operation returns a tensor with the diagonal and
+ * everything else padded with zeros.
+ *
+ * Assume the input has dimensions `[D1,..., Dk]`, then the output is a tensor
+ * of rank 2k with dimensions `[D1,..., Dk, D1,..., Dk]`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * tf.diag(x).print()
+ * ```
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 6, 8], [4, 2])
+ *
+ * tf.diag(x).print()
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function diag_(x) {
+ var $x = convertToTensor(x, 'x', 'diag');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Diag, inputs);
+ }
+ var diag = op({ diag_: diag_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the grayscale dilation over the input `x`.
+ *
+ * @param x The input tensor, rank 3 or rank 4 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filter The filter tensor, rank 3, of shape
+ * `[filterHeight, filterWidth, depth]`.
+ * @param strides The strides of the sliding window for each dimension of the
+ * input tensor: `[strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat Specify the data format of the input and output data.
+ * Defaults to 'NHWC'. Only 'NHWC' is currently supported. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * for atrous morphological dilation. Defaults to `[1, 1]`. If `dilations`
+ * is a single number, then `dilationHeight == dilationWidth`. If it is
+ * greater than 1, then all values of `strides` must be 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function dilation2d_(x, filter, strides, pad, dilations, dataFormat) {
+ if (dilations === void 0) { dilations = [1, 1]; }
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var $x = convertToTensor(x, 'x', 'dilation2d');
+ var $filter = convertToTensor(filter, 'filter', 'dilation2d');
+ assert($x.rank === 3 || $x.rank === 4, function () { return "Error in dilation2d: input must be rank 3 or 4, but got rank " +
+ ($x.rank + "."); });
+ assert($filter.rank === 3, function () { return "Error in dilation2d: filter must be rank 3, but got rank " +
+ ($filter.rank + "."); });
+ assert(dataFormat === 'NHWC', function () { return "Error in dilation2d: Only NHWC is currently supported, " +
+ ("but got dataFormat of " + dataFormat); });
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ reshapedTo4D = true;
+ }
+ var inputs = { x: x4D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dilations: dilations };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Dilation2D, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var dilation2d = op({ dilation2d_: dilation2d_ });
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
+ * The result is rounded with floor function.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.floorDiv(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ *
+ * a.floorDiv(b).print(); // or tf.floorDiv(a, b)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function floorDiv_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'floorDiv');
+ var $b = convertToTensor(b, 'b', 'floorDiv');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(FloorDiv, inputs);
+ }
+ var floorDiv = op({ floorDiv_: floorDiv_ });
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.div(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ *
+ * a.div(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function div_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'div');
+ var $b = convertToTensor(b, 'b', 'div');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ if ($a.dtype === 'int32' && $b.dtype === 'int32') {
+ return floorDiv($a, $b);
+ }
+ var inputs = { a: $a, b: $b };
+ var attrs = {};
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(RealDiv, inputs, attrs);
+ }
+ var div = op({ div_: div_ });
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the axes in the output space that should be reduced to produce
+ * the input space.
+ */
+ function getReductionAxes(inShape, outShape) {
+ var result = [];
+ for (var i = 0; i < outShape.length; i++) {
+ var inDim = inShape[inShape.length - i - 1];
+ var outAxis = outShape.length - i - 1;
+ var outDim = outShape[outAxis];
+ if (inDim == null || (inDim === 1 && outDim > 1)) {
+ result.unshift(outAxis);
+ }
+ }
+ return result;
+ }
+ function assertAndGetBroadcastShape(shapeA, shapeB) {
+ var result = [];
+ var l = Math.max(shapeA.length, shapeB.length);
+ for (var i = 0; i < l; i++) {
+ var a = shapeA[shapeA.length - i - 1];
+ if (a == null) {
+ a = 1;
+ }
+ var b = shapeB[shapeB.length - i - 1];
+ if (b == null) {
+ b = 1;
+ }
+ if (a === 1) {
+ result.unshift(b);
+ }
+ else if (b === 1) {
+ result.unshift(a);
+ }
+ else if (a !== b) {
+ var errMsg = "Operands could not be broadcast together with shapes " +
+ (shapeA + " and " + shapeB + ".");
+ throw Error(errMsg);
+ }
+ else {
+ result.unshift(a);
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Returns the truth value of (a == b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.equal(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function equal_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'equal', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'equal', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Equal, inputs);
+ }
+ var equal = op({ equal_: equal_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the elements, either `a` or `b` depending on the `condition`.
+ *
+ * If the condition is true, select from `a`, otherwise select from `b`.
+ *
+ * ```js
+ * const cond = tf.tensor1d([false, false, true], 'bool');
+ * const a = tf.tensor1d([1 , 2, 3]);
+ * const b = tf.tensor1d([-1, -2, -3]);
+ *
+ * a.where(cond, b).print();
+ * ```
+ *
+ * @param condition The input condition. Must be of dtype bool.
+ * @param a If `condition` is rank 1, `a` may have a higher rank but
+ * its first dimension must match the size of `condition`.
+ * @param b A tensor with the same dtype as `a` and with shape that is
+ * compatible with `a`.
+ * @return A tensor with same dtype as `a` and `b`, and shape that is
+ * broadcastable from `a` and `b`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function where_(condition, a, b) {
+ var $a = convertToTensor(a, 'a', 'where');
+ var $b = convertToTensor(b, 'b', 'where');
+ var $condition = convertToTensor(condition, 'condition', 'where', 'bool');
+ // TODO: move this logic to forward function when the broadcastTo op is
+ // implemented in WASM.
+ // Find the broadcastable shape for $condition, $a, and $b.
+ var broadcastShape = assertAndGetBroadcastShape(assertAndGetBroadcastShape($condition.shape, $a.shape), $b.shape);
+ var $broadcastedCondition = broadcastTo($condition, broadcastShape);
+ var $broadcastedA = broadcastTo($a, broadcastShape);
+ var $broadcastedB = broadcastTo($b, broadcastShape);
+ var inputs = {
+ condition: $broadcastedCondition,
+ t: $broadcastedA,
+ e: $broadcastedB
+ };
+ return ENGINE.runKernel(Select, inputs);
+ }
+ var where = op({ where_: where_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 0 with the same shape as the
+ * given tensor.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2]);
+ * tf.zerosLike(x).print();
+ * ```
+ *
+ * @param x The tensor of required shape.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function zerosLike_(x) {
+ var $x = convertToTensor(x, 'x', 'zerosLike');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(ZerosLike, inputs);
+ }
+ var zerosLike = op({ zerosLike_: zerosLike_ });
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0
+ * if denominator is 0.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ * const c = tf.tensor1d([0, 0, 0, 0]);
+ *
+ * a.divNoNan(b).print(); // or tf.divNoNan(a, b)
+ * a.divNoNan(c).print(); // or tf.divNoNan(a, c)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ * const c = tf.scalar(0);
+ *
+ * a.divNoNan(b).print(); // or tf.divNoNan(a, b)
+ * a.divNoNan(c).print(); // or tf.divNoNan(a, c)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function divNoNan_(a, b) {
+ var _a;
+ // TODO: Make this into its own kernel.
+ var $a = convertToTensor(a, 'a', 'div');
+ var $b = convertToTensor(b, 'b', 'div');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var divResult = div($a, $b);
+ var zeros = zerosLike(divResult);
+ var bEqualsZero = equal($b, zeros);
+ return where(bEqualsZero, zeros, divResult);
+ }
+ var divNoNan = op({ divNoNan_: divNoNan_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the dot product of two matrices and/or vectors, `t1` and `t2`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor2d([[1, 2], [3, 4]]);
+ * const c = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);
+ *
+ * a.dot(b).print(); // or tf.dot(a, b)
+ * b.dot(a).print();
+ * b.dot(c).print();
+ * ```
+ * @param t1 The first tensor in the dot operation.
+ * @param t2 The second tensor in the dot operation.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function dot_(t1, t2) {
+ var $t1 = convertToTensor(t1, 't1', 'dot');
+ var $t2 = convertToTensor(t2, 't2', 'dot');
+ assert(($t1.rank === 1 || $t1.rank === 2) && ($t2.rank === 1 || $t2.rank === 2), function () { return "Error in dot: inputs must all be rank 1 or 2, but got ranks " +
+ ($t1.rank + " and " + $t2.rank + "."); });
+ var t1Inner = ($t1.rank === 1 ? $t1.size : $t1.shape[1]);
+ var t2Inner = ($t2.rank === 1 ? $t2.size : $t2.shape[0]);
+ assert(t1Inner === t2Inner, function () { return "Error in dot: inner dimensions of inputs must match, but got " +
+ (t1Inner + " and " + t2Inner + "."); });
+ if ($t1.rank === 1 && $t2.rank === 1) {
+ var t12D = reshape($t1, [1, -1]);
+ var t22D = reshape($t2, [-1, 1]);
+ var t1t2 = matMul$1(t12D, t22D);
+ return reshape(t1t2, []);
+ }
+ else if ($t1.rank === 1 && $t2.rank === 2) {
+ var t12D = reshape($t1, [1, -1]);
+ var t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]);
+ var t1t2 = matMul$1(t12D, t22D);
+ return reshape(t1t2, [t1t2.size]);
+ }
+ else if ($t1.rank === 2 && $t2.rank === 1) {
+ var t22D = reshape($t2, [-1, 1]);
+ var t1t2 = matMul$1($t1, t22D);
+ return reshape(t1t2, [t1t2.size]);
+ }
+ else {
+ var t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]);
+ var t1t2 = matMul$1($t1, t22D);
+ return t1t2;
+ }
+ }
+ var dot = op({ dot_: dot_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Tensor contraction over specified indices and outer product.
+ *
+ * `einsum` allows defining Tensors by defining their element-wise computation.
+ * This computation is based on
+ * [Einstein summation](https://en.wikipedia.org/wiki/Einstein_notation).
+ *
+ * Some special cases include:
+ *
+ * Matrix multiplication:
+ * ```js
+ * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);
+ * const y = tf.tensor2d([[0, 1], [2, 3], [4, 5]]);
+ * x.print();
+ * y.print();
+ * tf.einsum('ij,jk->ik', x, y).print();
+ * ```
+ *
+ * Dot product:
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ * const y = tf.tensor1d([0, 1, 2]);
+ * x.print();
+ * y.print();
+ * tf.einsum('i,i->', x, y).print();
+ * ```
+ *
+ * Batch dot product:
+ * ```js
+ * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);
+ * const y = tf.tensor2d([[0, 1, 2], [3, 4, 5]]);
+ * x.print();
+ * y.print();
+ * tf.einsum('bi,bi->b', x, y).print();
+ * ```
+ *
+ * Outer prouduct:
+ * ```js
+ * const x = tf.tensor1d([1, 3, 5]);
+ * const y = tf.tensor1d([2, 4, 6]);
+ * x.print();
+ * y.print();
+ * tf.einsum('i,j->ij', x, y).print();
+ * ```
+ *
+ * Matrix transpose:
+ * ```js
+ * const x = tf.tensor2d([[1, 2], [3, 4]]);
+ * x.print();
+ * tf.einsum('ij->ji', x).print();
+ * ```
+ *
+ * Batch matrix transpose:
+ * ```js
+ * const x = tf.tensor3d([[[1, 2], [3, 4]], [[-1, -2], [-3, -4]]]);
+ * x.print();
+ * tf.einsum('bij->bji', x).print();
+ * ```
+ *
+ * Limitations:
+ *
+ * This implementation of einsum has the following limitations:
+ *
+ * - Does not support >2 input tensors.
+ * - Does not support duplicate axes for any given input tensor. E.g., equation
+ * 'ii->' is not suppoted.
+ * - The `...` notation is not supported.
+ *
+ * @param equation a string describing the contraction, in the same format as
+ * [numpy.einsum](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html).
+ * @param tensors the input(s) to contract (each one a Tensor), whose shapes
+ * should be consistent with equation.
+ * @returns The output tensor.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Matrices'}
+ */
+ function einsum_(equation) {
+ var tensors = [];
+ for (var _i = 1; _i < arguments.length; _i++) {
+ tensors[_i - 1] = arguments[_i];
+ }
+ var $tensors = tensors.map(function (t, i) { return convertToTensor(t, "tensors" + i, 'einsum'); });
+ var attrs = { equation: equation };
+ return ENGINE.runKernel(Einsum, $tensors, attrs);
+ }
+ var einsum = op({ einsum_: einsum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes exponential linear element-wise: `x > 0 ? x : (e ^ x) - 1`.
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 1, -3, 2]);
+ *
+ * x.elu().print(); // or tf.elu(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function elu_(x) {
+ var $x = convertToTensor(x, 'x', 'elu', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Elu, inputs);
+ }
+ var elu = op({ elu_: elu_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes gause error function of the input `tf.Tensor` element-wise:
+ * `erf(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, .1, -.1, .7]);
+ *
+ * x.erf().print(); // or tf.erf(x);
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function erf_(x) {
+ var $x = convertToTensor(x, 'x', 'erf');
+ assert($x.dtype === 'int32' || $x.dtype === 'float32', function () { return 'Input dtype must be `int32` or `float32`.'; });
+ if ($x.dtype === 'int32') {
+ $x = cast($x, 'float32');
+ }
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Erf, inputs);
+ }
+ var erf = op({ erf_: erf_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, -3]);
+ *
+ * x.exp().print(); // or tf.exp(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function exp_(x) {
+ var $x = convertToTensor(x, 'x', 'exp');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Exp, inputs);
+ }
+ var exp = op({ exp_: exp_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns a `tf.Tensor` that has expanded rank, by inserting a dimension
+ * into the tensor's shape.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const axis = 1;
+ * x.expandDims(axis).print();
+ * ```
+ *
+ * @param x The input tensor whose dimensions to be expanded.
+ * @param axis The dimension index at which to insert shape of `1`. Defaults
+ * to 0 (the first dimension).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function expandDims_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'expandDims', 'string_or_numeric');
+ assert(axis <= $x.rank, function () { return 'Axis must be <= rank of the tensor'; });
+ var inputs = { input: $x };
+ var attrs = { dim: axis };
+ return ENGINE.runKernel(ExpandDims, inputs, attrs);
+ }
+ var expandDims = op({ expandDims_: expandDims_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes exponential of the input `tf.Tensor` minus one element-wise.
+ * `e ^ x - 1`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, -3]);
+ *
+ * x.expm1().print(); // or tf.expm1(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function expm1_(x) {
+ var $x = convertToTensor(x, 'x', 'expm1');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Expm1, inputs);
+ }
+ var expm1 = op({ expm1_: expm1_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Construct a tensor by repeating it the number of times given by reps.
+ *
+ * This operation creates a new tensor by replicating `input` `reps`
+ * times. The output tensor's i'th dimension has `input.shape[i] *
+ * reps[i]` elements, and the values of `input` are replicated
+ * `reps[i]` times along the i'th dimension. For example, tiling
+ * `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ *
+ * a.tile([2]).print(); // or a.tile([2])
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * a.tile([1, 2]).print(); // or a.tile([1, 2])
+ * ```
+ * @param x The tensor to tile.
+ * @param reps Determines the number of replications per dimension.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function tile_(x, reps) {
+ var $x = convertToTensor(x, 'x', 'tile', 'string_or_numeric');
+ assert($x.rank === reps.length, function () { return "Error in transpose: rank of input " + $x.rank + " " +
+ ("must match length of reps " + reps + "."); });
+ var inputs = { x: $x };
+ var attrs = { reps: reps };
+ return ENGINE.runKernel(Tile, inputs, attrs);
+ }
+ var tile = op({ tile_: tile_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Create an identity matrix.
+ *
+ * @param numRows Number of rows.
+ * @param numColumns Number of columns. Defaults to `numRows`.
+ * @param batchShape If provided, will add the batch shape to the beginning
+ * of the shape of the returned `tf.Tensor` by repeating the identity
+ * matrix.
+ * @param dtype Data type.
+ * @returns Identity matrix of the specified size and data type, possibly
+ * with batch repetition if `batchShape` is specified.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function eye_(numRows, numColumns, batchShape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (numColumns == null) {
+ numColumns = numRows;
+ }
+ var buff = buffer([numRows, numColumns], dtype);
+ var n = numRows <= numColumns ? numRows : numColumns;
+ for (var i = 0; i < n; ++i) {
+ buff.set(1, i, i);
+ }
+ var out = reshape(buff.toTensor(), [numRows, numColumns]);
+ if (batchShape == null) {
+ return out;
+ }
+ else {
+ if (batchShape.length === 1) {
+ return tile(expandDims(out, 0), [batchShape[0], 1, 1]);
+ }
+ else if (batchShape.length === 2) {
+ // tslint:disable-next-line:no-unnecessary-type-assertion
+ return tile(expandDims(expandDims(out, 0), 0), [batchShape[0], batchShape[1], 1, 1]);
+ }
+ else if (batchShape.length === 3) {
+ // tslint:disable-next-line:no-unnecessary-type-assertion
+ return tile(expandDims(expandDims(expandDims(out, 0), 0), 0), [
+ batchShape[0], batchShape[1], batchShape[2], 1, 1
+ ]);
+ }
+ else {
+ throw new Error("eye() currently supports only 1D and 2D " +
+ (
+ // tslint:disable-next-line:no-any
+ "batchShapes, but received " + batchShape.length + "D."));
+ }
+ }
+ }
+ var eye = op({ eye_: eye_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` filled with a scalar value.
+ *
+ * ```js
+ * tf.fill([2, 2], 4).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param value The scalar value to fill the tensor with.
+ * @param dtype The type of an element in the resulting tensor. Defaults to
+ * 'float'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function fill(shape, value, dtype) {
+ var attrs = { shape: shape, value: value, dtype: dtype };
+ return ENGINE.runKernel(Fill, {}, attrs);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes floor of input `tf.Tensor` element-wise: `floor(x)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.floor().print(); // or tf.floor(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function floor_(x) {
+ var $x = convertToTensor(x, 'x', 'floor', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Floor, inputs);
+ }
+ var floor = op({ floor_: floor_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gather slices from tensor `x`'s axis `axis` according to `indices`.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const indices = tf.tensor1d([1, 3, 3], 'int32');
+ *
+ * x.gather(indices).print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const indices = tf.tensor1d([1, 1, 0], 'int32');
+ *
+ * x.gather(indices).print();
+ * ```
+ * @param x The input tensor whose slices to be gathered.
+ * @param indices The indices of the values to extract.
+ * @param axis The axis over which to select values. Defaults to 0.
+ * @param batchDims Optional. The number of batch dimensions. It must be less
+ * than or equal to rank(indices). Defaults to 0.
+ * The output tensor will have shape of
+ * `x.shape[:axis] + indices.shape[batchDims:] + x.shape[axis + 1:]`
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function gather_(x, indices, axis, batchDims) {
+ if (axis === void 0) { axis = 0; }
+ if (batchDims === void 0) { batchDims = 0; }
+ var $x = convertToTensor(x, 'x', 'gather');
+ var $indices = convertToTensor(indices, 'indices', 'gather', 'int32');
+ var inputs = { x: $x, indices: $indices };
+ var attrs = { axis: axis, batchDims: batchDims };
+ return ENGINE.runKernel(GatherV2, inputs, attrs);
+ }
+ var gather = op({ gather_: gather_ });
+
+ /**
+ * Returns the truth value of (a > b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.greater(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function greater_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'greater', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'greater', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Greater, inputs);
+ }
+ var greater = op({ greater_: greater_ });
+
+ /**
+ * Returns the truth value of (a >= b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.greaterEqual(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function greaterEqual_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'greaterEqual', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'greaterEqual', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(GreaterEqual, inputs);
+ }
+ var greaterEqual = op({ greaterEqual_: greaterEqual_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the imaginary part of a complex (or real) tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of type float that is
+ * the imaginary part of each element in input considered as a complex number.
+ * If input is real, a tensor of all zeros is returned.
+ *
+ * ```js
+ * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);
+ * tf.imag(x).print();
+ * ```
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function imag_(input) {
+ var $input = convertToTensor(input, 'input', 'imag');
+ var inputs = { input: $input };
+ return ENGINE.runKernel(Imag, inputs);
+ }
+ var imag = op({ imag_: imag_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns which elements of x are finite.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isFinite().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function isFinite_(x) {
+ var $x = convertToTensor(x, 'x', 'isFinite');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(IsFinite, inputs);
+ }
+ var isFinite$1 = op({ isFinite_: isFinite_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns which elements of x are Infinity or -Infinity.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isInf().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function isInf_(x) {
+ var $x = convertToTensor(x, 'x', 'isInf');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(IsInf, inputs);
+ }
+ var isInf = op({ isInf_: isInf_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * RReturns which elements of x are NaN.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isNaN().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function isNaN_(x) {
+ var $x = convertToTensor(x, 'x', 'isNaN');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(IsNan, inputs);
+ }
+ var isNaN$1 = op({ isNaN_: isNaN_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes leaky rectified linear element-wise.
+ *
+ * See
+ * [http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf](
+ * http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.leakyRelu(0.1).print(); // or tf.leakyRelu(x, 0.1)
+ * ```
+ * @param x The input tensor.
+ * @param alpha The scaling factor for negative values, defaults to 0.2.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function leakyRelu_(x, alpha) {
+ if (alpha === void 0) { alpha = 0.2; }
+ var $x = convertToTensor(x, 'x', 'leakyRelu');
+ var inputs = { x: $x };
+ var attrs = { alpha: alpha };
+ return ENGINE.runKernel(LeakyRelu, inputs, attrs);
+ }
+ var leakyRelu = op({ leakyRelu_: leakyRelu_ });
+
+ /**
+ * Returns the truth value of (a < b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.less(b).print();
+ * ```
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function less_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'less', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'less', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Less, inputs);
+ }
+ var less = op({ less_: less_ });
+
+ /**
+ * Returns the truth value of (a <= b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.lessEqual(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function lessEqual_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'lessEqual', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'lessEqual', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(LessEqual, inputs);
+ }
+ var lessEqual = op({ lessEqual_: lessEqual_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Return an evenly spaced sequence of numbers over the given interval.
+ *
+ * ```js
+ * tf.linspace(0, 9, 10).print();
+ * ```
+ * @param start The start value of the sequence.
+ * @param stop The end value of the sequence.
+ * @param num The number of values to generate.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function linspace(start, stop, num) {
+ if (num <= 0) {
+ throw new Error('The number of values should be positive.');
+ }
+ var attrs = { start: start, stop: stop, num: num };
+ return ENGINE.runKernel(LinSpace, {}, attrs);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Normalizes the activation of a local neighborhood across or within
+ * channels.
+ *
+ * @param x The input tensor. The 4-D input tensor is treated as a 3-D array
+ * of 1D vectors (along the last dimension), and each vector is
+ * normalized independently.
+ * @param depthRadius The number of adjacent channels in the 1D normalization
+ * window.
+ * @param bias A constant bias term for the basis.
+ * @param alpha A scale factor, usually positive.
+ * @param beta An exponent.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function localResponseNormalization_(x, depthRadius, bias, alpha, beta) {
+ if (depthRadius === void 0) { depthRadius = 5; }
+ if (bias === void 0) { bias = 1; }
+ if (alpha === void 0) { alpha = 1; }
+ if (beta === void 0) { beta = 0.5; }
+ var $x = convertToTensor(x, 'x', 'localResponseNormalization');
+ assert($x.rank === 4 || $x.rank === 3, function () { return "Error in localResponseNormalization: x must be rank 3 or 4 but got\n rank " + $x.rank + "."; });
+ assert(isInt(depthRadius), function () { return "Error in localResponseNormalization: depthRadius must be an " +
+ ("integer but got depthRadius " + depthRadius + "."); });
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ var inputs = { x: x4D };
+ var attrs = { depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(LRN, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ else {
+ return res;
+ }
+ }
+ var localResponseNormalization = op({ localResponseNormalization_: localResponseNormalization_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes natural logarithm of the input `tf.Tensor` element-wise: `ln(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.E]);
+ *
+ * x.log().print(); // or tf.log(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function log_(x) {
+ var $x = convertToTensor(x, 'x', 'log', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Log, inputs);
+ }
+ var log = op({ log_: log_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes natural logarithm of the input `tf.Tensor` plus one
+ * element-wise: `ln(1 + x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.E - 1]);
+ *
+ * x.log1p().print(); // or tf.log1p(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function log1p_(x) {
+ var $x = convertToTensor(x, 'x', 'log1p');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Log1p, inputs);
+ }
+ var log1p = op({ log1p_: log1p_ });
+
+ /**
+ * Overrides the gradient computation of a function `f`.
+ *
+ * Takes a function
+ * `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}`
+ * and returns another function `g(...inputs)` which takes the same inputs as
+ * `f`. When called, `g` returns `f().value`. In backward mode, custom gradients
+ * with respect to each input of `f` are computed using `f().gradFunc`.
+ *
+ * The `save` function passsed to `f` should be used for saving tensors needed
+ * in the gradient. And the `saved` passed to the `gradFunc` is a
+ * `NamedTensorMap`, which contains those saved tensor.
+ *
+ * ```js
+ * const customOp = tf.customGrad((x, save) => {
+ * // Save x to make sure it's available later for the gradient.
+ * save([x]);
+ * // Override gradient of our custom x ^ 2 op to be dy * abs(x);
+ * return {
+ * value: x.square(),
+ * // Note `saved.x` which points to the `x` we saved earlier.
+ * gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]
+ * };
+ * });
+ *
+ * const x = tf.tensor1d([-1, -2, 3]);
+ * const dx = tf.grad(x => customOp(x));
+ *
+ * console.log(`f(x):`);
+ * customOp(x).print();
+ * console.log(`f'(x):`);
+ * dx(x).print();
+ * ```
+ *
+ * @param f The function to evaluate in forward mode, which should return
+ * `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc`
+ * returns the custom gradients of `f` with respect to its inputs.
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function customGrad(f) {
+ return ENGINE.customGrad(f);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes `-1 * x` element-wise.
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);
+ *
+ * x.neg().print(); // or tf.neg(x)
+ * ```
+ *
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function neg_(x) {
+ var $x = convertToTensor(x, 'x', 'neg');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Neg, inputs);
+ }
+ var neg = op({ neg_: neg_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes softplus of the input `tf.Tensor` element-wise: `log(exp(x) + 1)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.softplus().print(); // or tf.softplus(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function softplus_(x) {
+ var $x = convertToTensor(x, 'x', 'softplus');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Softplus, inputs);
+ }
+ var softplus = op({ softplus_: softplus_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes log sigmoid of the input `tf.Tensor` element-wise:
+ * `logSigmoid(x)`. For numerical stability, we use `-tf.softplus(-x)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.logSigmoid().print(); // or tf.logSigmoid(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function logSigmoid_(x) {
+ var $x = convertToTensor(x, 'x', 'logSigmoid');
+ // Use a custom gradient to maintain previous implementation.
+ // There is no LogSigmoid kernel in TF so we can't use engine.runKernel
+ // directly
+ var customOp = customGrad(function (x) {
+ // TODO(yassogba) we can remove the chained softplus call here only
+ // after backends have modualrized softplus at which point we can call
+ // engine runKernel(..., Sotfplus, ...) directly.
+ var value = neg(softplus(neg(x)));
+ var gradFunc = function (dy) {
+ var derX = mul(dy, sigmoid(neg(x)));
+ return derX;
+ };
+ return { value: value, gradFunc: gradFunc };
+ });
+ return customOp($x);
+ }
+ var logSigmoid = op({ logSigmoid_: logSigmoid_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the maximum of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.max().print(); // or tf.max(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.max(axis).print(); // or tf.max(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function max_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'max');
+ var inputs = { x: $x };
+ var attrs = { reductionIndices: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Max, inputs, attrs);
+ }
+ var max = op({ max_: max_ });
+
+ /**
+ * Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.sub(b).print(); // or tf.sub(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast subtract a with b.
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.scalar(5);
+ *
+ * a.sub(b).print(); // or tf.sub(a, b)
+ * ```
+ * @param a The first `tf.Tensor` to subtract from.
+ * @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function sub_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'sub');
+ var $b = convertToTensor(b, 'b', 'sub');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Sub, inputs);
+ }
+ var sub = op({ sub_: sub_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the sum of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If axes has no entries, all dimensions are reduced, and a
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.sum().print(); // or tf.sum(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.sum(axis).print(); // or tf.sum(x, axis)
+ * ```
+ *
+ * @param x The input tensor to compute the sum over. If the dtype is `bool`
+ * it will be converted to `int32` and the output dtype will be `int32`.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function sum_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'sum');
+ if ($x.dtype === 'bool') {
+ $x = cast($x, 'int32');
+ }
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Sum, inputs, attrs);
+ }
+ var sum = op({ sum_: sum_ });
+
+ /**
+ * Computes the log softmax.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ *
+ * a.logSoftmax().print(); // or tf.logSoftmax(a)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
+ *
+ * a.logSoftmax().print(); // or tf.logSoftmax(a)
+ * ```
+ *
+ * @param logits The logits array.
+ * @param axis The dimension softmax would be performed on. Defaults to `-1`
+ * which indicates the last dimension.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function logSoftmax_(logits, axis) {
+ if (axis === void 0) { axis = -1; }
+ var $logits = convertToTensor(logits, 'logits', 'logSoftmax');
+ if (axis === -1) {
+ axis = $logits.rank - 1;
+ }
+ if (axis !== $logits.rank - 1) {
+ throw Error('Log Softmax along a non-last dimension is not yet supported. ' +
+ ("Logits was rank " + $logits.rank + " and axis was " + axis));
+ }
+ // const forward: ForwardFunc<Tensor> = (backend, save) => {
+ // const keepDims = true;
+ // const xMax = max(logits, axis, true);
+ // const shifted = sub(logits, xMax);
+ // const value =
+ // sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis,
+ // keepDims)));
+ // save([value]);
+ // return value;
+ // };
+ // Use a custom gradient for numerical stability.
+ var customOp = customGrad(function (logits, save) {
+ var keepDims = true;
+ var xMax = max(logits, axis, true);
+ var shifted = sub(logits, xMax);
+ var value = sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis, keepDims)));
+ save([value]);
+ var gradFunc = function (dy, saved) {
+ var _a = __read(saved, 1), value = _a[0];
+ var keepDims = true;
+ var softmax = exp(value);
+ return sub(dy, mul(sum(dy, axis, keepDims), softmax));
+ };
+ return { value: value, gradFunc: gradFunc };
+ });
+ return customOp($logits);
+ // TODO Use Engine.runKernel when CPU/WebGL/WASM backends implement this.
+ // const inputs: LogSoftmaxInputs = {logits: $logits};
+ // const attrs: LogSoftmaxAttrs = {axis};
+ // return ENGINE.runKernel(
+ // LogSoftmax, inputs as {} as NamedTensorMap,
+ // attrs as {} as NamedAttrMap);
+ }
+ var logSoftmax = op({ logSoftmax_: logSoftmax_ });
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function combineLocations(outputLoc, reduceLoc, axes) {
+ var rank = outputLoc.length + reduceLoc.length;
+ var loc = [];
+ var outIdx = 0;
+ var reduceIdx = 0;
+ for (var dim = 0; dim < rank; dim++) {
+ if (axes.indexOf(dim) === -1) {
+ loc.push(outputLoc[outIdx++]);
+ }
+ else {
+ loc.push(reduceLoc[reduceIdx++]);
+ }
+ }
+ return loc;
+ }
+ function expandShapeToKeepDim(shape, axes) {
+ var reduceSubShape = axes.map(function (x) { return 1; });
+ return combineLocations(shape, reduceSubShape, axes);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the log(sum(exp(elements across the reduction dimensions)).
+ *
+ * Reduces the input along the dimensions given in `axis`. Unless `keepDims`
+ * is true, the rank of the array is reduced by 1 for each entry in `axis`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axis` has no entries, all dimensions are reduced, and an array with a
+ * single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.logSumExp().print(); // or tf.logSumExp(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.logSumExp(axis).print(); // or tf.logSumExp(a, axis)
+ * ```
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. If null (the default),
+ * reduces all dimensions.
+ * @param keepDims If true, retains reduced dimensions with length
+ * of 1. Defaults to false.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function logSumExp_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'logSumExp');
+ var axes = parseAxisParam(axis, $x.shape);
+ var xMax = max($x, axes, true /* keepDims */);
+ var a = sub($x, xMax);
+ var b = exp(a);
+ var c = sum(b, axes);
+ var d = log(c);
+ var res = add(reshape(xMax, d.shape), d);
+ if (keepDims) {
+ var newShape = expandShapeToKeepDim(res.shape, axes);
+ return reshape(res, newShape);
+ }
+ return res;
+ }
+ var logSumExp = op({ logSumExp_: logSumExp_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `a AND b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalAnd(b).print();
+ * ```
+ *
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalAnd_(a, b) {
+ var $a = convertToTensor(a, 'a', 'logicalAnd', 'bool');
+ var $b = convertToTensor(b, 'b', 'logicalAnd', 'bool');
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(LogicalAnd, inputs);
+ }
+ var logicalAnd = op({ logicalAnd_: logicalAnd_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `NOT x` element-wise.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, true], 'bool');
+ *
+ * a.logicalNot().print();
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype 'bool'.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalNot_(x) {
+ var $x = convertToTensor(x, 'x', 'logicalNot', 'bool');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(LogicalNot, inputs);
+ }
+ var logicalNot = op({ logicalNot_: logicalNot_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `a OR b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalOr(b).print();
+ * ```
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalOr_(a, b) {
+ var $a = convertToTensor(a, 'a', 'logicalOr', 'bool');
+ var $b = convertToTensor(b, 'b', 'logicalOr', 'bool');
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(LogicalOr, inputs);
+ }
+ var logicalOr = op({ logicalOr_: logicalOr_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `a XOR b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalXor(b).print();
+ * ```
+ *
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalXor_(a, b) {
+ var $a = convertToTensor(a, 'a', 'logicalXor', 'bool');
+ var $b = convertToTensor(b, 'b', 'logicalXor', 'bool');
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ // x ^ y = (x | y) & ~(x & y)
+ return logicalAnd(logicalOr(a, b), logicalNot(logicalAnd(a, b)));
+ }
+ var logicalXor = op({ logicalXor_: logicalXor_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 2D max pooling of an image.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in dilated pooling. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function maxPool_(x, filterSize, strides, pad, dimRoundingMode) {
+ var $x = convertToTensor(x, 'x', 'maxPool');
+ var dilations = 1;
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in maxPool: input must be rank 4 but got rank " + x4D.rank + "."; });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in maxPool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ checkPadOnDimRoundingMode('maxPool', pad, dimRoundingMode);
+ var inputs = { x: x4D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(MaxPool, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var maxPool = op({ maxPool_: maxPool_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 3D max pooling.
+ *
+ * ```js
+ * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);
+ * const result = tf.maxPool3d(x, 2, 1, 'valid');
+ * result.print();
+ * ```
+ *
+ * @param x The input tensor, of rank 5 or rank 4 of shape
+ * `[batch, depth, height, width, inChannels]`.
+ * @param filterSize The filter size:
+ * `[filterDepth, filterHeight, filterWidth]`.
+ * If `filterSize` is a single number,
+ * then `filterDepth == filterHeight == filterWidth`.
+ * @param strides The strides of the pooling:
+ * `[strideDepth, strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideDepth == strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function maxPool3d_(x, filterSize, strides, pad, dimRoundingMode, dataFormat) {
+ if (filterSize === void 0) { filterSize = [1, 1, 1]; }
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ var $x = convertToTensor(x, 'x', 'maxPool3d');
+ var x5D = $x;
+ var reshapedTo5D = false;
+ if ($x.rank === 4) {
+ reshapedTo5D = true;
+ x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);
+ }
+ assert(x5D.rank === 5, function () { return "Error in maxPool3d: x must be rank 5 but got rank " + x5D.rank + "."; });
+ assert(dataFormat === 'NDHWC', function () { return "Error in maxPool3d: Only NDHWC is currently supported, " +
+ ("but got dataFormat of " + dataFormat); });
+ checkPadOnDimRoundingMode('maxPool3d', pad, dimRoundingMode);
+ var inputs = { x: x5D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dataFormat: dataFormat };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(MaxPool3D, inputs, attrs);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var maxPool3d = op({ maxPool3d_: maxPool3d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 2D max pooling of an image with Argmax index.
+ * The indices in argmax are flattened, so that a maximum value at position `[b,
+ * y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
+ * include_batch_in_index is False; `((b * height + y) * width + x) * channels
+ * +c` if include_batch_in_index is True.
+ *
+ * The indices returned are always in `[0, height) x [0, width)` before
+ * flattening.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param includeBatchIndex Defaults to False. Whether to include batch
+ * dimension in flattened index of argmax.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function maxPoolWithArgmax_(x, filterSize, strides, pad, includeBatchInIndex) {
+ if (includeBatchInIndex === void 0) { includeBatchInIndex = false; }
+ var $x = convertToTensor(x, 'x', 'maxPoolWithArgmax');
+ var inputs = { x: $x };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, includeBatchInIndex: includeBatchInIndex };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var result = ENGINE.runKernel(MaxPoolWithArgmax, inputs, attrs);
+ return { result: result[0], indexes: result[1] };
+ }
+ var maxPoolWithArgmax = op({ maxPoolWithArgmax_: maxPoolWithArgmax_ });
+
+ /**
+ * Returns the max of a and b (`a > b ? a : b`) element-wise.
+ * Supports broadcasting.
+ *
+ * We also expose `tf.maximumStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.maximum(b).print(); // or tf.maximum(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast maximum a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.maximum(b).print(); // or tf.maximum(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function maximum_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'maximum');
+ var $b = convertToTensor(b, 'b', 'maximum');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ if ($a.dtype === 'bool') {
+ $a = cast($a, 'int32');
+ $b = cast($b, 'int32');
+ }
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Maximum, inputs);
+ }
+ var maximum = op({ maximum_: maximum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the mean of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is
+ * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with
+ * a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.mean().print(); // or tf.mean(a)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.mean(axis).print(); // or tf.mean(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function mean_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'mean');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Mean, inputs, attrs);
+ }
+ var mean = op({ mean_: mean_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 0.
+ *
+ * ```js
+ * tf.zeros([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The type of an element in the resulting tensor. Can
+ * be 'float32', 'int32' or 'bool'. Defaults to 'float'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function zeros(shape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (dtype === 'complex64') {
+ var real = zeros(shape, 'float32');
+ var imag = zeros(shape, 'float32');
+ return complex(real, imag);
+ }
+ var values = makeZerosTypedArray(sizeFromShape(shape), dtype);
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 1.
+ *
+ * ```js
+ * tf.ones([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The type of an element in the resulting tensor. Defaults to
+ * 'float'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function ones(shape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (dtype === 'complex64') {
+ var real = ones(shape, 'float32');
+ var imag = zeros(shape, 'float32');
+ return complex(real, imag);
+ }
+ var values = makeOnesTypedArray(sizeFromShape(shape), dtype);
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Broadcasts parameters for evaluation on an N-D grid.
+ *
+ * Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
+ * of N-D coordinate arrays for evaluating expressions on an N-D grid.
+ *
+ * Notes:
+ * `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
+ * When the `indexing` argument is set to 'xy' (the default), the broadcasting
+ * instructions for the first two dimensions are swapped.
+ * Examples:
+ * Calling `const [X, Y] = meshgrid(x, y)` with the tensors
+ *
+ * ```javascript
+ * const x = [1, 2, 3];
+ * const y = [4, 5, 6];
+ * const [X, Y] = tf.meshgrid(x, y);
+ * // X = [[1, 2, 3],
+ * // [1, 2, 3],
+ * // [1, 2, 3]]
+ * // Y = [[4, 4, 4],
+ * // [5, 5, 5],
+ * // [6, 6, 6]]
+ * ```
+ *
+ * @param x Tensor with rank geq 1.
+ * @param y Tensor with rank geq 1.
+ * @param indexing
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function meshgrid(x, y, _a) {
+ var _b = (_a === void 0 ? {} : _a).indexing, indexing = _b === void 0 ? 'xy' : _b;
+ if (indexing !== 'xy' && indexing !== 'ij') {
+ throw new TypeError(indexing + " is not a valid third argument to meshgrid");
+ }
+ if (x === undefined) {
+ return [];
+ }
+ var $x = convertToTensor(x, 'x', 'meshgrid', x instanceof Tensor ? x.dtype : 'float32');
+ if (y === undefined) {
+ return [$x];
+ }
+ var $y = convertToTensor(y, 'y', 'meshgrid', y instanceof Tensor ? y.dtype : 'float32');
+ var w = sizeFromShape($x.shape);
+ var h = sizeFromShape($y.shape);
+ if (indexing === 'xy') {
+ $x = reshape($x, [1, -1]);
+ $y = reshape($y, [-1, 1]);
+ return [
+ matMul$1(ones([h, 1], $x.dtype), $x),
+ matMul$1($y, ones([1, w], $y.dtype)),
+ ];
+ }
+ $x = reshape($x, [-1, 1]);
+ $y = reshape($y, [1, -1]);
+ return [
+ matMul$1($x, ones([1, h], $x.dtype)),
+ matMul$1(ones([w, 1], $y.dtype), $y),
+ ];
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the minimum value from the input.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the array is reduced by 1 for each entry in `axes`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axes` has no entries, all dimensions are reduced, and an array with a
+ * single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.min().print(); // or tf.min(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.min(axis).print(); // or tf.min(x, axis)
+ * ```
+ *
+ * @param x The input Tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function min_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'min');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(Min, inputs, attrs);
+ }
+ var min = op({ min_: min_ });
+
+ /**
+ * Returns the min of a and b (`a < b ? a : b`) element-wise.
+ * Supports broadcasting.
+ *
+ * We also expose `minimumStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.minimum(b).print(); // or tf.minimum(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast minimum a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.minimum(b).print(); // or tf.minimum(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function minimum_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'minimum');
+ var $b = convertToTensor(b, 'b', 'minimum');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ if ($a.dtype === 'bool') {
+ $a = cast($a, 'int32');
+ $b = cast($b, 'int32');
+ }
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Minimum, inputs);
+ }
+ var minimum = op({ minimum_: minimum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Pads a `tf.Tensor` using mirror padding.
+ *
+ * This operation implements the `REFLECT` and `SYMMETRIC` modes of pad.
+ *
+ * ```js
+ * const x = tf.range(0, 9).reshape([1, 1, 3, 3]);
+ * x.mirrorPad([[0, 0], [0, 0], [2, 2], [2, 2]], 'reflect').print();
+ * ```
+ * @param x The tensor to pad.
+ * @param paddings An array of length `R` (the rank of the tensor), where
+ * each element is a length-2 tuple of ints `[padBefore, padAfter]`,
+ * specifying how much to pad along each dimension of the tensor.
+ * In "reflect" mode, the padded regions do not include the borders,
+ * while in "symmetric" mode the padded regions do include the borders.
+ * For example, if the input is `[1, 2, 3]` and paddings is `[0, 2]`,
+ * then the output is `[1, 2, 3, 2, 1]` in "reflect" mode, and
+ * `[1, 2, 3, 3, 2]` in "symmetric" mode.
+ * If `mode` is "reflect" then both `paddings[D, 0]` and `paddings[D, 1]`
+ * must be no greater than `x.shape[D] - 1`. If mode is "symmetric"
+ * then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than
+ * `x.shape[D]`
+ * @param mode String to specify padding mode. Can be `'reflect' | 'symmetric'`
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function mirrorPad_(x, paddings, mode) {
+ assert(mode === 'reflect' || mode === 'symmetric', function () { return "Invalid mode. Mode must be either reflect or symmetric. " +
+ ("Got " + mode + "."); });
+ var $x = convertToTensor(x, 'x', 'mirrorPad');
+ if ($x.rank === 0) {
+ throw new Error('mirrorPad(scalar) is not defined. ' +
+ 'Pass non-scalar to mirrorPad');
+ }
+ assert(paddings.length === $x.rank, function () { return "Padding doesn't match input. Must be " + $x.rank + ". " +
+ ("Got " + paddings.length + "."); });
+ var shapeOffset = mode === 'reflect' ? 1 : 0;
+ var _loop_1 = function (i) {
+ assert(paddings[i].length === 2, function () { return "Invalid number of paddings. Must be length of 2 each."; });
+ assert(paddings[i][0] >= 0 && paddings[i][0] <= $x.shape[i] - shapeOffset &&
+ paddings[i][1] >= 0 && paddings[i][1] <= $x.shape[i] - shapeOffset, function () { return "Padding in dimension " + i + " cannot be greater than or equal " +
+ ("to " + ($x.shape[i] - shapeOffset) + " or less than 0 for input of ") +
+ ("shape " + $x.shape); });
+ };
+ for (var i = 0; i < $x.rank; i++) {
+ _loop_1(i);
+ }
+ var attrs = { paddings: paddings, mode: mode };
+ var inputs = { x: $x };
+ return ENGINE.runKernel(MirrorPad, inputs, attrs);
+ }
+ var mirrorPad = op({ mirrorPad_: mirrorPad_ });
+
+ /**
+ * Returns the mod of a and b element-wise.
+ * `floor(x / y) * y + mod(x, y) = x`
+ * Supports broadcasting.
+ *
+ * We also expose `tf.modStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.mod(b).print(); // or tf.mod(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast a mod b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.mod(b).print(); // or tf.mod(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function mod_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'mod');
+ var $b = convertToTensor(b, 'b', 'mod');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Mod, inputs);
+ }
+ var mod = op({ mod_: mod_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes square of `x` element-wise: `x ^ 2`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);
+ *
+ * x.square().print(); // or tf.square(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function square_(x) {
+ var $x = convertToTensor(x, 'x', 'square');
+ var attrs = {};
+ return ENGINE.runKernel('Square', { x: $x }, attrs);
+ }
+ var square = op({ square_: square_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Calculates the mean and variance of `x`. The mean and variance are
+ * calculated by aggregating the contents of `x` across `axes`. If `x` is
+ * 1-D and `axes = [0]` this is just the mean and variance of a vector.
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) along with to compute mean and
+ * variance. By default it reduces all dimensions.
+ * @param keepDims If true, the moments have the same dimensionality as the
+ * input.
+ * @return An object with two keys: `mean` and `variance`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function moments_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ x = convertToTensor(x, 'x', 'moments');
+ var axes = parseAxisParam(axis, x.shape);
+ var xMean = mean(x, axes, keepDims);
+ var keepDimsShape = xMean.shape;
+ if (!keepDims) {
+ keepDimsShape = expandShapeToKeepDim(xMean.shape, axes);
+ }
+ var devSquared = square(sub(cast(x, 'float32'), reshape(xMean, keepDimsShape)));
+ var variance = mean(devSquared, axes, keepDims);
+ return { mean: xMean, variance: variance };
+ }
+ var moments = op({ moments_: moments_ });
+
+ /**
+ * Computes the next states and outputs of a stack of LSTMCells.
+ *
+ * Each cell output is used as input to the next cell.
+ *
+ * Returns `[cellState, cellOutput]`.
+ *
+ * Derived from tf.contrib.rn.MultiRNNCell.
+ *
+ * @param lstmCells Array of LSTMCell functions.
+ * @param data The input to the cell.
+ * @param c Array of previous cell states.
+ * @param h Array of previous cell outputs.
+ *
+ * @doc {heading: 'Operations', subheading: 'RNN'}
+ */
+ function multiRNNCell_(lstmCells, data, c, h) {
+ var $data = convertToTensor(data, 'data', 'multiRNNCell');
+ var $c = convertToTensorArray(c, 'c', 'multiRNNCell');
+ var $h = convertToTensorArray(h, 'h', 'multiRNNCell');
+ var input = $data;
+ var newStates = [];
+ for (var i = 0; i < lstmCells.length; i++) {
+ var output = lstmCells[i](input, $c[i], $h[i]);
+ newStates.push(output[0]);
+ newStates.push(output[1]);
+ input = output[1];
+ }
+ var newC = [];
+ var newH = [];
+ for (var i = 0; i < newStates.length; i += 2) {
+ newC.push(newStates[i]);
+ newH.push(newStates[i + 1]);
+ }
+ return [newC, newH];
+ }
+ var multiRNNCell = op({ multiRNNCell_: multiRNNCell_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values drawn from a multinomial distribution.
+ *
+ * ```js
+ * const probs = tf.tensor([.75, .25]);
+ * tf.multinomial(probs, 3).print();
+ * ```
+ *
+ * @param logits 1D array with unnormalized log-probabilities, or
+ * 2D array of shape `[batchSize, numOutcomes]`. See the `normalized`
+ * parameter.
+ * @param numSamples Number of samples to draw for each row slice.
+ * @param seed The seed number.
+ * @param normalized Whether the provided `logits` are normalized true
+ * probabilities (sum to 1). Defaults to false.
+ * @return 1D array of shape `[numSamples]`, or 2D array of shape
+ * `[batchSize, numSamples]`, depending on the rank of the input.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function multinomial_(logits, numSamples, seed, normalized) {
+ if (normalized === void 0) { normalized = false; }
+ var $logits = convertToTensor(logits, 'logits', 'multinomial');
+ var numOutcomes = $logits.size;
+ var origRank = $logits.rank;
+ if (numOutcomes < 2) {
+ throw new Error("Error in multinomial: you need at least 2 outcomes, but got " +
+ (numOutcomes + "."));
+ }
+ if (origRank > 2) {
+ throw new Error("Rank of probabilities must be 1 or 2, but is " + origRank);
+ }
+ // TODO(lina128): Investigate correct seed behavior. The code seems not allow
+ // setting see to 0.
+ seed = seed || Math.random();
+ // The kernel only accepts (and returns) rank 2 tensors.
+ var logits2D = origRank === 1 ? reshape($logits, [1, -1]) : $logits;
+ var inputs = { logits: logits2D };
+ var attrs = { numSamples: numSamples, seed: seed, normalized: normalized };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Multinomial, inputs, attrs);
+ // tslint:disable-next-line:no-unnecessary-type-assertion
+ return origRank === 1 ? reshape(res, [res.size]) : res;
+ }
+ var multinomial = op({ multinomial_: multinomial_ });
+
+ /**
+ * Returns the truth value of (a != b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([0, 2, 3]);
+ *
+ * a.notEqual(b).print();
+ * ```
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function notEqual_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'notEqual', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'notEqual', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(NotEqual, inputs);
+ }
+ var notEqual = op({ notEqual_: notEqual_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take
+ * value `onValue` (defaults to 1), while all other locations take value
+ * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank
+ * `R+1` with the last axis of size `depth`.
+ *
+ * ```js
+ * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();
+ * ```
+ *
+ * @param indices `tf.Tensor` of indices with dtype `int32`.
+ * @param depth The depth of the one hot dimension.
+ * @param onValue A number used to fill in the output when the index matches
+ * the location.
+ * @param offValue A number used to fill in the output when the index does
+ * not match the location.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function oneHot_(indices, depth, onValue, offValue) {
+ if (onValue === void 0) { onValue = 1; }
+ if (offValue === void 0) { offValue = 0; }
+ if (depth < 2) {
+ throw new Error("Error in oneHot: depth must be >=2, but it is " + depth);
+ }
+ var $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');
+ var inputs = { indices: $indices };
+ var attrs = { depth: depth, onValue: onValue, offValue: offValue };
+ return ENGINE.runKernel(OneHot, inputs, attrs);
+ }
+ var oneHot = op({ oneHot_: oneHot_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 1 with the same shape as the
+ * given tensor.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2]);
+ * tf.onesLike(x).print();
+ * ```
+ * @param x A tensor.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function onesLike_(x) {
+ var $x = convertToTensor(x, 'x', 'onesLike');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(OnesLike, inputs);
+ }
+ var onesLike = op({ onesLike_: onesLike_ });
+
+ /**
+ * Computes the outer product of two vectors, `v1` and `v2`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([3, 4, 5]);
+ *
+ * tf.outerProduct(a, b).print();
+ * ```
+ * @param v1 The first vector in the outer product operation.
+ * @param v2 The second vector in the outer product operation.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function outerProduct_(v1, v2) {
+ var $v1 = convertToTensor(v1, 'v1', 'outerProduct');
+ var $v2 = convertToTensor(v2, 'v2', 'outerProduct');
+ assert($v1.rank === 1 && $v2.rank === 1, function () { return "Error in outerProduct: inputs must be rank 1, but got ranks " +
+ ($v1.rank + " and " + $v2.rank + "."); });
+ var v12D = reshape($v1, [-1, 1]);
+ var v22D = reshape($v2, [1, -1]);
+ return matMul$1(v12D, v22D);
+ }
+ var outerProduct = op({ outerProduct_: outerProduct_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Pads a `tf.Tensor` with a given value and paddings.
+ *
+ * This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`,
+ * refer to `tf.mirrorPad`
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that `paddings` is of given length.
+ * - `tf.pad1d`
+ * - `tf.pad2d`
+ * - `tf.pad3d`
+ * - `tf.pad4d`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * x.pad([[1, 2]]).print();
+ * ```
+ * @param x The tensor to pad.
+ * @param paddings An array of length `R` (the rank of the tensor), where
+ * each element is a length-2 tuple of ints `[padBefore, padAfter]`,
+ * specifying how much to pad along each dimension of the tensor.
+ * @param constantValue The pad value to use. Defaults to 0.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function pad_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ var $x = convertToTensor(x, 'x', 'pad');
+ if ($x.rank === 0) {
+ throw new Error('pad(scalar) is not defined. Pass non-scalar to pad');
+ }
+ var attrs = { paddings: paddings, constantValue: constantValue };
+ var inputs = { x: $x };
+ return ENGINE.runKernel(PadV2, inputs, attrs);
+ }
+ var pad = op({ pad_: pad_ });
+
+ /**
+ * Pads a `tf.Tensor1D` with a given value and paddings. See `pad` for details.
+ */
+ function pad1d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 2, function () { return 'Invalid number of paddings. Must be length of 2.'; });
+ return pad(x, [paddings], constantValue);
+ }
+ var pad1d = op({ pad1d_: pad1d_ });
+
+ /**
+ * Pads a `tf.Tensor2D` with a given value and paddings. See `pad` for details.
+ */
+ function pad2d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 2 && paddings[0].length === 2 &&
+ paddings[1].length === 2, function () { return 'Invalid number of paddings. Must be length of 2 each.'; });
+ return pad(x, paddings, constantValue);
+ }
+ var pad2d = op({ pad2d_: pad2d_ });
+
+ /**
+ * Pads a `tf.Tensor3D` with a given value and paddings. See `pad` for details.
+ */
+ function pad3d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 3 && paddings[0].length === 2 &&
+ paddings[1].length === 2 && paddings[2].length === 2, function () { return 'Invalid number of paddings. Must be length of 2 each.'; });
+ return pad(x, paddings, constantValue);
+ }
+ var pad3d = op({ pad3d_: pad3d_ });
+
+ /**
+ * Pads a `tf.Tensor4D` with a given value and paddings. See `pad` for details.
+ */
+ function pad4d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 4 && paddings[0].length === 2 &&
+ paddings[1].length === 2 && paddings[2].length === 2 &&
+ paddings[3].length === 2, function () { return 'Invalid number of paddings. Must be length of 2 each.'; });
+ return pad(x, paddings, constantValue);
+ }
+ var pad4d = op({ pad4d_: pad4d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * This operation divides "spatial" dimensions `[1, ..., M]` of the input into
+ * a grid of blocks of shape `blockShape`, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial
+ * dimensions `[1, ..., M]` correspond to the position within the grid,
+ * and the batch dimension combines both the position within a spatial block
+ * and the original batch position. Prior to division into blocks,
+ * the spatial dimensions of the input are optionally zero padded
+ * according to `paddings`. See below for a precise description.
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]);
+ * const blockShape = [2, 2];
+ * const paddings = [[0, 0], [0, 0]];
+ *
+ * x.spaceToBatchND(blockShape, paddings).print();
+ * ```
+ *
+ * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
+ * remainingShape`, where spatialShape has `M` dimensions.
+ * @param blockShape A 1-D array. Must have shape `[M]`, all values must
+ * be >= 1.
+ * @param paddings A 2-D array. Must have shape `[M, 2]`, all values must be >=
+ * 0. `paddings[i] = [padStart, padEnd]` specifies the amount to zero-pad
+ * from input dimension `i + 1`, which corresponds to spatial dimension `i`. It
+ * is required that
+ * `(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0`
+ *
+ * This operation is equivalent to the following steps:
+ *
+ * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input
+ * according to `paddings` to produce `padded` of shape paddedShape.
+ *
+ * 2. Reshape `padded` to `reshapedPadded` of shape:
+ * `[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape`
+ *
+ * 3. Permute dimensions of `reshapedPadded` to produce `permutedReshapedPadded`
+ * of shape: `blockShape + [batch] + [paddedShape[1] / blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1]] + remainingShape`
+ *
+ * 4. Reshape `permutedReshapedPadded` to flatten `blockShape` into the
+ * batch dimension, producing an output tensor of shape:
+ * `[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1]] + remainingShape`
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function spaceToBatchND_(x, blockShape, paddings) {
+ var $x = convertToTensor(x, 'x', 'spaceToBatchND');
+ assert($x.rank >= 1 + blockShape.length, function () { return "input rank " + $x.rank + " should be > than [blockShape] " + blockShape.length; });
+ assert(paddings.length === blockShape.length, function () { return "paddings.shape[0] " + paddings.length + " must be equal to [blockShape] " + blockShape.length; });
+ assert($x.shape.reduce(function (a, b, i) {
+ if (i > 0 && i <= blockShape.length) {
+ return a &&
+ ((b + paddings[i - 1][0] + paddings[i - 1][1]) %
+ blockShape[i - 1] ===
+ 0);
+ }
+ return a;
+ }, true), function () { return "input spatial dimensions " + $x.shape.slice(1) + " with paddings " + paddings.toString() + " must be divisible by blockShapes " + blockShape.toString(); });
+ var inputs = { x: $x };
+ var attrs = { blockShape: blockShape, paddings: paddings };
+ return ENGINE.runKernel(SpaceToBatchND, inputs, attrs);
+ }
+ var spaceToBatchND = op({ spaceToBatchND_: spaceToBatchND_ });
+
+ /**
+ * Performs an N-D pooling operation
+ *
+ * @param input The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param windowShape The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param poolingType The type of pooling, either 'max' or 'avg'.
+ * @param pad The type of padding algorithm:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_guides/python/nn#Convolution](
+ * https://www.tensorflow.org/api_guides/python/nn#Convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in dilated pooling. Defaults to `[1, 1]`. If `dilationRate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function pool_(input, windowShape, poolingType, pad, dilations, strides, dimRoundingMode) {
+ if (dilations == null) {
+ dilations = [1, 1];
+ }
+ if (strides == null) {
+ strides = 1;
+ }
+ if (pad === 0) {
+ pad = 'valid';
+ }
+ var $x = convertToTensor(input, 'x', 'maxPool');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in pool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var convInfo = computePool2DInfo(x4D.shape, windowShape, strides, dilations, pad);
+ var dilation = [convInfo.dilationHeight, convInfo.dilationWidth];
+ // The following implementation does batchToSpace(pool(spaceToBatch(x)))
+ // whenever dilation > 1 since the TF kernels do not support dilation > 1.
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L1037
+ var basePadding;
+ if (pad === 'same') {
+ basePadding = withSpaceToBatchBasePaddings([convInfo.filterHeight, convInfo.filterWidth], dilation);
+ }
+ else {
+ basePadding = [[0, 0], [0, 0]];
+ }
+ var isDilationOne = dilation[0] === 1 && dilation[1] === 1;
+ var _a = __read(requiredSpaceToBatchPaddings([convInfo.inHeight, convInfo.inWidth], dilation, basePadding), 2), adjustedPadding = _a[0], adjustedCrops = _a[1];
+ var convertedPad = isDilationOne ? pad : 'valid';
+ var convertedX = isDilationOne ? x4D : spaceToBatchND(x4D, dilation, adjustedPadding);
+ var forwardOp = poolingType === 'avg' ?
+ function () { return avgPool(convertedX, windowShape, strides, convertedPad, dimRoundingMode); } :
+ function () { return maxPool(convertedX, windowShape, strides, convertedPad, dimRoundingMode); };
+ var y = forwardOp();
+ var res = isDilationOne ? y : batchToSpaceND(y, dilation, adjustedCrops);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ // Helper function to compute crops and paddings for pool with dilation > 1.
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/array_ops.py#L2184
+ function requiredSpaceToBatchPaddings(inputShape, blockShape, basePadding) {
+ var padStart = basePadding.map(function (b) { return b[0]; });
+ var origPadEnd = basePadding.map(function (b) { return b[1]; });
+ var fullInputShape = inputShape.concat(padStart, origPadEnd);
+ var padEndExtra = blockShape.map(function (b, i) { return (b - fullInputShape[i] % b) % b; });
+ var padEnd = origPadEnd.map(function (s, i) { return s + padEndExtra[i]; });
+ var paddings = blockShape.map(function (_, i) { return [padStart[i], padEnd[i]]; });
+ var crops = blockShape.map(function (_, i) { return [0, padEndExtra[i]]; });
+ return [paddings, crops];
+ }
+ // Helper function to compute base paddings for pool with dilation > 1.
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L524
+ function withSpaceToBatchBasePaddings(filterShape, dilation) {
+ // Spatial dimensions of the filters and the upsampled filters in which we
+ // introduce (rate - 1) zeros between consecutive filter values.
+ var dilatedFilterShape = filterShape.map(function (s, i) {
+ return s + (s - 1) * (dilation[i] - 1);
+ });
+ var padExtraShape = dilatedFilterShape.map(function (s) { return s - 1; });
+ // When padding is odd, we pad more at end, following the same
+ // convention as conv2d.
+ var padExtraStart = padExtraShape.map(function (s) { return Math.floor(s / 2); });
+ var padExtraEnd = padExtraShape.map(function (s, i) { return s - padExtraStart[i]; });
+ return padExtraShape.map(function (_, i) {
+ return [padExtraStart[i], padExtraEnd[i]];
+ });
+ }
+ var pool = op({ pool_: pool_ });
+
+ /**
+ * Computes the power of one `tf.Tensor` to another. Supports broadcasting.
+ *
+ * Given a `tf.Tensor` x and a `tf.Tensor` y, this operation computes x^y for
+ * corresponding elements in x and y. The result's dtype will be the upcasted
+ * type of the `base` and `exp` dtypes.
+ *
+ * ```js
+ * const a = tf.tensor([[2, 3], [4, 5]])
+ * const b = tf.tensor([[1, 2], [3, 0]]).toInt();
+ *
+ * a.pow(b).print(); // or tf.pow(a, b)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor([[1, 2], [3, 4]])
+ * const b = tf.tensor(2).toInt();
+ *
+ * a.pow(b).print(); // or tf.pow(a, b)
+ * ```
+ * We also expose `powStrict` which has the same signature as this op and
+ * asserts that `base` and `exp` are the same shape (does not broadcast).
+ *
+ * @param base The base `tf.Tensor` to pow element-wise.
+ * @param exp The exponent `tf.Tensor` to pow element-wise.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function pow_(base, exp) {
+ var _a;
+ var $base = convertToTensor(base, 'base', 'pow');
+ var $exp = convertToTensor(exp, 'exp', 'pow');
+ _a = __read(makeTypesMatch($base, $exp), 2), $base = _a[0], $exp = _a[1];
+ var inputs = { a: $base, b: $exp };
+ return ENGINE.runKernel(Pow, inputs);
+ }
+ var pow = op({ pow_: pow_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes leaky rectified linear element-wise with parametric alphas.
+ *
+ * `x < 0 ? alpha * x : f(x) = x`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ * const alpha = tf.scalar(0.1);
+ *
+ * x.prelu(alpha).print(); // or tf.prelu(x, alpha)
+ * ```
+ * @param x The input tensor.
+ * @param alpha Scaling factor for negative values.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function prelu_(x, alpha) {
+ var $x = convertToTensor(x, 'x', 'prelu');
+ var $alpha = convertToTensor(alpha, 'alpha', 'prelu');
+ var inputs = { x: $x, alpha: $alpha };
+ return ENGINE.runKernel(Prelu, inputs);
+ }
+ var prelu = op({ prelu_: prelu_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Prints information about the `tf.Tensor` including its data.
+ *
+ * ```js
+ * const verbose = true;
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);
+ * ```
+ * @param x The tensor to be printed.
+ * @param verbose Whether to print verbose information about the ` Tensor`,
+ * including dtype and size.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function print(x, verbose) {
+ if (verbose === void 0) { verbose = false; }
+ console.log(x.toString(verbose));
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the product of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and a
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.prod().print(); // or tf.prod(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.prod(axis).print(); // or tf.prod(x, axis)
+ * ```
+ *
+ * @param x The input tensor to compute the product over. If the dtype is `bool`
+ * it will be converted to `int32` and the output dtype will be `int32`.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function prod_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'prod');
+ if ($x.dtype === 'bool') {
+ // bool is not an allowed type for the underlying kernel.
+ $x = cast($x, 'int32');
+ }
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Prod, inputs, attrs);
+ }
+ var prod = op({ prod_: prod_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a random number generator
+ * function defined by the user.
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param randFunction A random number generator function which is called
+ * for each element in the output tensor.
+ * @param dtype The data type of the output tensor. Defaults to 'float32'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function rand_(shape, randFunction, dtype) {
+ var size = sizeFromShape(shape);
+ var values = null;
+ if (dtype == null || dtype === 'float32') {
+ values = new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ for (var i = 0; i < size; i++) {
+ values[i] = randFunction();
+ }
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+ var rand = op({ rand_: rand_ });
+
+ var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};
+ function getAugmentedNamespace(n) {
+ if (n.__esModule)
+ return n;
+ var a = Object.defineProperty({}, '__esModule', { value: true });
+ Object.keys(n).forEach(function (k) {
+ var d = Object.getOwnPropertyDescriptor(n, k);
+ Object.defineProperty(a, k, d.get ? d : {
+ enumerable: true,
+ get: function () {
+ return n[k];
+ }
+ });
+ });
+ return a;
+ }
+ function createCommonjsModule(fn) {
+ var module = { exports: {} };
+ return fn(module, module.exports), module.exports;
+ }
+
+ var alea = createCommonjsModule(function (module) {
+ // A port of an algorithm by Johannes Baagøe <[email protected]>, 2010
+ // http://baagoe.com/en/RandomMusings/javascript/
+ // https://github.com/nquinlan/better-random-numbers-for-javascript-mirror
+ // Original work is under MIT license -
+ // Copyright (C) 2010 by Johannes Baagøe <[email protected]>
+ //
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
+ // of this software and associated documentation files (the "Software"), to deal
+ // in the Software without restriction, including without limitation the rights
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ // copies of the Software, and to permit persons to whom the Software is
+ // furnished to do so, subject to the following conditions:
+ //
+ // The above copyright notice and this permission notice shall be included in
+ // all copies or substantial portions of the Software.
+ //
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ // THE SOFTWARE.
+ (function (global, module, define) {
+ function Alea(seed) {
+ var me = this, mash = Mash();
+ me.next = function () {
+ var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32
+ me.s0 = me.s1;
+ me.s1 = me.s2;
+ return me.s2 = t - (me.c = t | 0);
+ };
+ // Apply the seeding algorithm from Baagoe.
+ me.c = 1;
+ me.s0 = mash(' ');
+ me.s1 = mash(' ');
+ me.s2 = mash(' ');
+ me.s0 -= mash(seed);
+ if (me.s0 < 0) {
+ me.s0 += 1;
+ }
+ me.s1 -= mash(seed);
+ if (me.s1 < 0) {
+ me.s1 += 1;
+ }
+ me.s2 -= mash(seed);
+ if (me.s2 < 0) {
+ me.s2 += 1;
+ }
+ mash = null;
+ }
+ function copy(f, t) {
+ t.c = f.c;
+ t.s0 = f.s0;
+ t.s1 = f.s1;
+ t.s2 = f.s2;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new Alea(seed), state = opts && opts.state, prng = xg.next;
+ prng.int32 = function () { return (xg.next() * 0x100000000) | 0; };
+ prng.double = function () {
+ return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53
+ };
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ function Mash() {
+ var n = 0xefc8249d;
+ var mash = function (data) {
+ data = String(data);
+ for (var i = 0; i < data.length; i++) {
+ n += data.charCodeAt(i);
+ var h = 0.02519603282416938 * n;
+ n = h >>> 0;
+ h -= n;
+ h *= n;
+ n = h >>> 0;
+ h -= n;
+ n += h * 0x100000000; // 2^32
+ }
+ return (n >>> 0) * 2.3283064365386963e-10; // 2^-32
+ };
+ return mash;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.alea = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xor128 = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "xor128" prng algorithm by
+ // George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this, strseed = '';
+ me.x = 0;
+ me.y = 0;
+ me.z = 0;
+ me.w = 0;
+ // Set up generator function.
+ me.next = function () {
+ var t = me.x ^ (me.x << 11);
+ me.x = me.y;
+ me.y = me.z;
+ me.z = me.w;
+ return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);
+ };
+ if (seed === (seed | 0)) {
+ // Integer seed.
+ me.x = seed;
+ }
+ else {
+ // String seed.
+ strseed += seed;
+ }
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 64; k++) {
+ me.x ^= strseed.charCodeAt(k) | 0;
+ me.next();
+ }
+ }
+ function copy(f, t) {
+ t.x = f.x;
+ t.y = f.y;
+ t.z = f.z;
+ t.w = f.w;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xor128 = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xorwow = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "xorwow" prng algorithm by
+ // George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this, strseed = '';
+ // Set up generator function.
+ me.next = function () {
+ var t = (me.x ^ (me.x >>> 2));
+ me.x = me.y;
+ me.y = me.z;
+ me.z = me.w;
+ me.w = me.v;
+ return (me.d = (me.d + 362437 | 0)) +
+ (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;
+ };
+ me.x = 0;
+ me.y = 0;
+ me.z = 0;
+ me.w = 0;
+ me.v = 0;
+ if (seed === (seed | 0)) {
+ // Integer seed.
+ me.x = seed;
+ }
+ else {
+ // String seed.
+ strseed += seed;
+ }
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 64; k++) {
+ me.x ^= strseed.charCodeAt(k) | 0;
+ if (k == strseed.length) {
+ me.d = me.x << 10 ^ me.x >>> 4;
+ }
+ me.next();
+ }
+ }
+ function copy(f, t) {
+ t.x = f.x;
+ t.y = f.y;
+ t.z = f.z;
+ t.w = f.w;
+ t.v = f.v;
+ t.d = f.d;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xorwow = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xorshift7 = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "xorshift7" algorithm by
+ // François Panneton and Pierre L'ecuyer:
+ // "On the Xorgshift Random Number Generators"
+ // http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this;
+ // Set up generator function.
+ me.next = function () {
+ // Update xor generator.
+ var X = me.x, i = me.i, t, v;
+ t = X[i];
+ t ^= (t >>> 7);
+ v = t ^ (t << 24);
+ t = X[(i + 1) & 7];
+ v ^= t ^ (t >>> 10);
+ t = X[(i + 3) & 7];
+ v ^= t ^ (t >>> 3);
+ t = X[(i + 4) & 7];
+ v ^= t ^ (t << 7);
+ t = X[(i + 7) & 7];
+ t = t ^ (t << 13);
+ v ^= t ^ (t << 9);
+ X[i] = v;
+ me.i = (i + 1) & 7;
+ return v;
+ };
+ function init(me, seed) {
+ var j, X = [];
+ if (seed === (seed | 0)) {
+ // Seed state array using a 32-bit integer.
+ X[0] = seed;
+ }
+ else {
+ // Seed state using a string.
+ seed = '' + seed;
+ for (j = 0; j < seed.length; ++j) {
+ X[j & 7] = (X[j & 7] << 15) ^
+ (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);
+ }
+ }
+ // Enforce an array length of 8, not all zeroes.
+ while (X.length < 8)
+ X.push(0);
+ for (j = 0; j < 8 && X[j] === 0; ++j)
+ ;
+ if (j == 8)
+ X[7] = -1;
+ me.x = X;
+ me.i = 0;
+ // Discard an initial 256 values.
+ for (j = 256; j > 0; --j) {
+ me.next();
+ }
+ }
+ init(me, seed);
+ }
+ function copy(f, t) {
+ t.x = f.x.slice();
+ t.i = f.i;
+ return t;
+ }
+ function impl(seed, opts) {
+ if (seed == null)
+ seed = +(new Date);
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (state.x)
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xorshift7 = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xor4096 = createCommonjsModule(function (module) {
+ // A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.
+ //
+ // This fast non-cryptographic random number generator is designed for
+ // use in Monte-Carlo algorithms. It combines a long-period xorshift
+ // generator with a Weyl generator, and it passes all common batteries
+ // of stasticial tests for randomness while consuming only a few nanoseconds
+ // for each prng generated. For background on the generator, see Brent's
+ // paper: "Some long-period random number generators using shifts and xors."
+ // http://arxiv.org/pdf/1004.3115v1.pdf
+ //
+ // Usage:
+ //
+ // var xor4096 = require('xor4096');
+ // random = xor4096(1); // Seed with int32 or string.
+ // assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.
+ // assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.
+ //
+ // For nonzero numeric keys, this impelementation provides a sequence
+ // identical to that by Brent's xorgens 3 implementaion in C. This
+ // implementation also provides for initalizing the generator with
+ // string seeds, or for saving and restoring the state of the generator.
+ //
+ // On Chrome, this prng benchmarks about 2.1 times slower than
+ // Javascript's built-in Math.random().
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this;
+ // Set up generator function.
+ me.next = function () {
+ var w = me.w, X = me.X, i = me.i, t, v;
+ // Update Weyl generator.
+ me.w = w = (w + 0x61c88647) | 0;
+ // Update xor generator.
+ v = X[(i + 34) & 127];
+ t = X[i = ((i + 1) & 127)];
+ v ^= v << 13;
+ t ^= t << 17;
+ v ^= v >>> 15;
+ t ^= t >>> 12;
+ // Update Xor generator array state.
+ v = X[i] = v ^ t;
+ me.i = i;
+ // Result is the combination.
+ return (v + (w ^ (w >>> 16))) | 0;
+ };
+ function init(me, seed) {
+ var t, v, i, j, w, X = [], limit = 128;
+ if (seed === (seed | 0)) {
+ // Numeric seeds initialize v, which is used to generates X.
+ v = seed;
+ seed = null;
+ }
+ else {
+ // String seeds are mixed into v and X one character at a time.
+ seed = seed + '\0';
+ v = 0;
+ limit = Math.max(limit, seed.length);
+ }
+ // Initialize circular array and weyl value.
+ for (i = 0, j = -32; j < limit; ++j) {
+ // Put the unicode characters into the array, and shuffle them.
+ if (seed)
+ v ^= seed.charCodeAt((j + 32) % seed.length);
+ // After 32 shuffles, take v as the starting w value.
+ if (j === 0)
+ w = v;
+ v ^= v << 10;
+ v ^= v >>> 15;
+ v ^= v << 4;
+ v ^= v >>> 13;
+ if (j >= 0) {
+ w = (w + 0x61c88647) | 0; // Weyl.
+ t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.
+ i = (0 == t) ? i + 1 : 0; // Count zeroes.
+ }
+ }
+ // We have detected all zeroes; make the key nonzero.
+ if (i >= 128) {
+ X[(seed && seed.length || 0) & 127] = -1;
+ }
+ // Run the generator 512 times to further mix the state before using it.
+ // Factoring this as a function slows the main generator, so it is just
+ // unrolled here. The weyl generator is not advanced while warming up.
+ i = 127;
+ for (j = 4 * 128; j > 0; --j) {
+ v = X[(i + 34) & 127];
+ t = X[i = ((i + 1) & 127)];
+ v ^= v << 13;
+ t ^= t << 17;
+ v ^= v >>> 15;
+ t ^= t >>> 12;
+ X[i] = v ^ t;
+ }
+ // Storing state as object members is faster than using closure variables.
+ me.w = w;
+ me.X = X;
+ me.i = i;
+ }
+ init(me, seed);
+ }
+ function copy(f, t) {
+ t.i = f.i;
+ t.w = f.w;
+ t.X = f.X.slice();
+ return t;
+ }
+ function impl(seed, opts) {
+ if (seed == null)
+ seed = +(new Date);
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (state.X)
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xor4096 = impl;
+ }
+ })(commonjsGlobal, // window object or global
+ module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var tychei = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "Tyche-i" prng algorithm by
+ // Samuel Neves and Filipe Araujo.
+ // See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this, strseed = '';
+ // Set up generator function.
+ me.next = function () {
+ var b = me.b, c = me.c, d = me.d, a = me.a;
+ b = (b << 25) ^ (b >>> 7) ^ c;
+ c = (c - d) | 0;
+ d = (d << 24) ^ (d >>> 8) ^ a;
+ a = (a - b) | 0;
+ me.b = b = (b << 20) ^ (b >>> 12) ^ c;
+ me.c = c = (c - d) | 0;
+ me.d = (d << 16) ^ (c >>> 16) ^ a;
+ return me.a = (a - b) | 0;
+ };
+ /* The following is non-inverted tyche, which has better internal
+ * bit diffusion, but which is about 25% slower than tyche-i in JS.
+ me.next = function() {
+ var a = me.a, b = me.b, c = me.c, d = me.d;
+ a = (me.a + me.b | 0) >>> 0;
+ d = me.d ^ a; d = d << 16 ^ d >>> 16;
+ c = me.c + d | 0;
+ b = me.b ^ c; b = b << 12 ^ d >>> 20;
+ me.a = a = a + b | 0;
+ d = d ^ a; me.d = d = d << 8 ^ d >>> 24;
+ me.c = c = c + d | 0;
+ b = b ^ c;
+ return me.b = (b << 7 ^ b >>> 25);
+ }
+ */
+ me.a = 0;
+ me.b = 0;
+ me.c = 2654435769 | 0;
+ me.d = 1367130551;
+ if (seed === Math.floor(seed)) {
+ // Integer seed.
+ me.a = (seed / 0x100000000) | 0;
+ me.b = seed | 0;
+ }
+ else {
+ // String seed.
+ strseed += seed;
+ }
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 20; k++) {
+ me.b ^= strseed.charCodeAt(k) | 0;
+ me.next();
+ }
+ }
+ function copy(f, t) {
+ t.a = f.a;
+ t.b = f.b;
+ t.c = f.c;
+ t.d = f.d;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.tychei = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var _nodeResolve_empty = {};
+
+ var _nodeResolve_empty$1 = {
+ __proto__: null,
+ 'default': _nodeResolve_empty
+ };
+
+ var require$$0 = /*@__PURE__*/ getAugmentedNamespace(_nodeResolve_empty$1);
+
+ /*
+ Copyright 2019 David Bau.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ */
+ var seedrandom$1 = createCommonjsModule(function (module) {
+ (function (global, pool, math) {
+ //
+ // The following constants are related to IEEE 754 limits.
+ //
+ var width = 256, // each RC4 output is 0 <= x < 256
+ chunks = 6, // at least six RC4 outputs for each double
+ digits = 52, // there are 52 significant digits in a double
+ rngname = 'random', // rngname: name for Math.random and Math.seedrandom
+ startdenom = math.pow(width, chunks), significance = math.pow(2, digits), overflow = significance * 2, mask = width - 1, nodecrypto; // node.js crypto module, initialized at the bottom.
+ //
+ // seedrandom()
+ // This is the seedrandom function described above.
+ //
+ function seedrandom(seed, options, callback) {
+ var key = [];
+ options = (options == true) ? { entropy: true } : (options || {});
+ // Flatten the seed string or build one from local entropy if needed.
+ var shortseed = mixkey(flatten(options.entropy ? [seed, tostring(pool)] :
+ (seed == null) ? autoseed() : seed, 3), key);
+ // Use the seed to initialize an ARC4 generator.
+ var arc4 = new ARC4(key);
+ // This function returns a random double in [0, 1) that contains
+ // randomness in every bit of the mantissa of the IEEE 754 value.
+ var prng = function () {
+ var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48
+ d = startdenom, // and denominator d = 2 ^ 48.
+ x = 0; // and no 'extra last byte'.
+ while (n < significance) { // Fill up all significant digits by
+ n = (n + x) * width; // shifting numerator and
+ d *= width; // denominator and generating a
+ x = arc4.g(1); // new least-significant-byte.
+ }
+ while (n >= overflow) { // To avoid rounding up, before adding
+ n /= 2; // last byte, shift everything
+ d /= 2; // right using integer math until
+ x >>>= 1; // we have exactly the desired bits.
+ }
+ return (n + x) / d; // Form the number within [0, 1).
+ };
+ prng.int32 = function () { return arc4.g(4) | 0; };
+ prng.quick = function () { return arc4.g(4) / 0x100000000; };
+ prng.double = prng;
+ // Mix the randomness into accumulated entropy.
+ mixkey(tostring(arc4.S), pool);
+ // Calling convention: what to return as a function of prng, seed, is_math.
+ return (options.pass || callback ||
+ function (prng, seed, is_math_call, state) {
+ if (state) {
+ // Load the arc4 state from the given state if it has an S array.
+ if (state.S) {
+ copy(state, arc4);
+ }
+ // Only provide the .state method if requested via options.state.
+ prng.state = function () { return copy(arc4, {}); };
+ }
+ // If called as a method of Math (Math.seedrandom()), mutate
+ // Math.random because that is how seedrandom.js has worked since v1.0.
+ if (is_math_call) {
+ math[rngname] = prng;
+ return seed;
+ }
+ // Otherwise, it is a newer calling convention, so return the
+ // prng directly.
+ else
+ return prng;
+ })(prng, shortseed, 'global' in options ? options.global : (this == math), options.state);
+ }
+ //
+ // ARC4
+ //
+ // An ARC4 implementation. The constructor takes a key in the form of
+ // an array of at most (width) integers that should be 0 <= x < (width).
+ //
+ // The g(count) method returns a pseudorandom integer that concatenates
+ // the next (count) outputs from ARC4. Its return value is a number x
+ // that is in the range 0 <= x < (width ^ count).
+ //
+ function ARC4(key) {
+ var t, keylen = key.length, me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];
+ // The empty key [] is treated as [0].
+ if (!keylen) {
+ key = [keylen++];
+ }
+ // Set up S using the standard key scheduling algorithm.
+ while (i < width) {
+ s[i] = i++;
+ }
+ for (i = 0; i < width; i++) {
+ s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];
+ s[j] = t;
+ }
+ // The "g" method returns the next (count) outputs as one number.
+ (me.g = function (count) {
+ // Using instance members instead of closure state nearly doubles speed.
+ var t, r = 0, i = me.i, j = me.j, s = me.S;
+ while (count--) {
+ t = s[i = mask & (i + 1)];
+ r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];
+ }
+ me.i = i;
+ me.j = j;
+ return r;
+ // For robust unpredictability, the function call below automatically
+ // discards an initial batch of values. This is called RC4-drop[256].
+ // See http://google.com/search?q=rsa+fluhrer+response&btnI
+ })(width);
+ }
+ //
+ // copy()
+ // Copies internal state of ARC4 to or from a plain object.
+ //
+ function copy(f, t) {
+ t.i = f.i;
+ t.j = f.j;
+ t.S = f.S.slice();
+ return t;
+ }
+ //
+ // flatten()
+ // Converts an object tree to nested arrays of strings.
+ //
+ function flatten(obj, depth) {
+ var result = [], typ = (typeof obj), prop;
+ if (depth && typ == 'object') {
+ for (prop in obj) {
+ try {
+ result.push(flatten(obj[prop], depth - 1));
+ }
+ catch (e) { }
+ }
+ }
+ return (result.length ? result : typ == 'string' ? obj : obj + '\0');
+ }
+ //
+ // mixkey()
+ // Mixes a string seed into a key that is an array of integers, and
+ // returns a shortened string seed that is equivalent to the result key.
+ //
+ function mixkey(seed, key) {
+ var stringseed = seed + '', smear, j = 0;
+ while (j < stringseed.length) {
+ key[mask & j] =
+ mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));
+ }
+ return tostring(key);
+ }
+ //
+ // autoseed()
+ // Returns an object for autoseeding, using window.crypto and Node crypto
+ // module if available.
+ //
+ function autoseed() {
+ try {
+ var out;
+ if (nodecrypto && (out = nodecrypto.randomBytes)) {
+ // The use of 'out' to remember randomBytes makes tight minified code.
+ out = out(width);
+ }
+ else {
+ out = new Uint8Array(width);
+ (global.crypto || global.msCrypto).getRandomValues(out);
+ }
+ return tostring(out);
+ }
+ catch (e) {
+ var browser = global.navigator, plugins = browser && browser.plugins;
+ return [+new Date, global, plugins, global.screen, tostring(pool)];
+ }
+ }
+ //
+ // tostring()
+ // Converts an array of charcodes to a string
+ //
+ function tostring(a) {
+ return String.fromCharCode.apply(0, a);
+ }
+ //
+ // When seedrandom.js is loaded, we immediately mix a few bits
+ // from the built-in RNG into the entropy pool. Because we do
+ // not want to interfere with deterministic PRNG state later,
+ // seedrandom will not call math.random on its own again after
+ // initialization.
+ //
+ mixkey(math.random(), pool);
+ //
+ // Nodejs and AMD support: export the implementation as a module using
+ // either convention.
+ //
+ if (module.exports) {
+ module.exports = seedrandom;
+ // When in node.js, try using crypto package for autoseeding.
+ try {
+ nodecrypto = require$$0;
+ }
+ catch (ex) { }
+ }
+ else {
+ // When included as a plain script, set up Math.seedrandom global.
+ math['seed' + rngname] = seedrandom;
+ }
+ // End anonymous scope, and pass initial values.
+ })(
+ // global: `self` in browsers (including strict mode and web workers),
+ // otherwise `this` in Node and other environments
+ (typeof self !== 'undefined') ? self : commonjsGlobal, [], // pool: entropy pool starts empty
+ Math // math: package containing random, pow, and seedrandom
+ );
+ });
+
+ // A library of seedable RNGs implemented in Javascript.
+ //
+ // Usage:
+ //
+ // var seedrandom = require('seedrandom');
+ // var random = seedrandom(1); // or any seed.
+ // var x = random(); // 0 <= x < 1. Every bit is random.
+ // var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.
+ // alea, a 53-bit multiply-with-carry generator by Johannes Baagøe.
+ // Period: ~2^116
+ // Reported to pass all BigCrush tests.
+ // xor128, a pure xor-shift generator by George Marsaglia.
+ // Period: 2^128-1.
+ // Reported to fail: MatrixRank and LinearComp.
+ // xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.
+ // Period: 2^192-2^32
+ // Reported to fail: CollisionOver, SimpPoker, and LinearComp.
+ // xorshift7, by François Panneton and Pierre L'ecuyer, takes
+ // a different approach: it adds robustness by allowing more shifts
+ // than Marsaglia's original three. It is a 7-shift generator
+ // with 256 bits, that passes BigCrush with no systmatic failures.
+ // Period 2^256-1.
+ // No systematic BigCrush failures reported.
+ // xor4096, by Richard Brent, is a 4096-bit xor-shift with a
+ // very long period that also adds a Weyl generator. It also passes
+ // BigCrush with no systematic failures. Its long period may
+ // be useful if you have many generators and need to avoid
+ // collisions.
+ // Period: 2^4128-2^32.
+ // No systematic BigCrush failures reported.
+ // Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random
+ // number generator derived from ChaCha, a modern stream cipher.
+ // https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf
+ // Period: ~2^127
+ // No systematic BigCrush failures reported.
+ // The original ARC4-based prng included in this library.
+ // Period: ~2^1600
+ seedrandom$1.alea = alea;
+ seedrandom$1.xor128 = xor128;
+ seedrandom$1.xorwow = xorwow;
+ seedrandom$1.xorshift7 = xorshift7;
+ seedrandom$1.xor4096 = xor4096;
+ seedrandom$1.tychei = tychei;
+ var seedrandom = seedrandom$1;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // https://en.wikipedia.org/wiki/Marsaglia_polar_method
+ var MPRandGauss = /** @class */ (function () {
+ function MPRandGauss(mean, stdDeviation, dtype, truncated, seed) {
+ this.mean = mean;
+ this.stdDev = stdDeviation;
+ this.dtype = dtype;
+ this.nextVal = NaN;
+ this.truncated = truncated;
+ if (this.truncated) {
+ this.upper = this.mean + this.stdDev * 2;
+ this.lower = this.mean - this.stdDev * 2;
+ }
+ var seedValue = seed ? seed : Math.random();
+ this.random = seedrandom.alea(seedValue.toString());
+ }
+ /** Returns next sample from a Gaussian distribution. */
+ MPRandGauss.prototype.nextValue = function () {
+ if (!isNaN(this.nextVal)) {
+ var value = this.nextVal;
+ this.nextVal = NaN;
+ return value;
+ }
+ var resultX, resultY;
+ var isValid = false;
+ while (!isValid) {
+ var v1 = void 0, v2 = void 0, s = void 0;
+ do {
+ v1 = 2 * this.random() - 1;
+ v2 = 2 * this.random() - 1;
+ s = v1 * v1 + v2 * v2;
+ } while (s >= 1 || s === 0);
+ var mul = Math.sqrt(-2.0 * Math.log(s) / s);
+ resultX = this.mean + this.stdDev * v1 * mul;
+ resultY = this.mean + this.stdDev * v2 * mul;
+ if (!this.truncated || this.isValidTruncated(resultX)) {
+ isValid = true;
+ }
+ }
+ if (!this.truncated || this.isValidTruncated(resultY)) {
+ this.nextVal = this.convertValue(resultY);
+ }
+ return this.convertValue(resultX);
+ };
+ /** Handles proper rounding for non-floating-point numbers. */
+ MPRandGauss.prototype.convertValue = function (value) {
+ if (this.dtype == null || this.dtype === 'float32') {
+ return value;
+ }
+ return Math.round(value);
+ };
+ /** Returns true if less than 2-standard-deviations from the mean. */
+ MPRandGauss.prototype.isValidTruncated = function (value) {
+ return value <= this.upper && value >= this.lower;
+ };
+ return MPRandGauss;
+ }());
+ // Marsaglia, George, and Wai Wan Tsang. 2000. "A Simple Method for Generating
+ // Gamma Variables."
+ var RandGamma = /** @class */ (function () {
+ function RandGamma(alpha, beta, dtype, seed) {
+ this.alpha = alpha;
+ this.beta = 1 / beta; // convert rate to scale parameter
+ this.dtype = dtype;
+ var seedValue = seed ? seed : Math.random();
+ this.randu = seedrandom.alea(seedValue.toString());
+ this.randn = new MPRandGauss(0, 1, dtype, false, this.randu());
+ if (alpha < 1) {
+ this.d = alpha + (2 / 3);
+ }
+ else {
+ this.d = alpha - (1 / 3);
+ }
+ this.c = 1 / Math.sqrt(9 * this.d);
+ }
+ /** Returns next sample from a gamma distribution. */
+ RandGamma.prototype.nextValue = function () {
+ var x2, v0, v1, x, u, v;
+ while (true) {
+ do {
+ x = this.randn.nextValue();
+ v = 1 + (this.c * x);
+ } while (v <= 0);
+ v *= v * v;
+ x2 = x * x;
+ v0 = 1 - (0.331 * x2 * x2);
+ v1 = (0.5 * x2) + (this.d * (1 - v + Math.log(v)));
+ u = this.randu();
+ if (u < v0 || Math.log(u) < v1) {
+ break;
+ }
+ }
+ v = (1 / this.beta) * this.d * v;
+ if (this.alpha < 1) {
+ v *= Math.pow(this.randu(), 1 / this.alpha);
+ }
+ return this.convertValue(v);
+ };
+ /** Handles proper rounding for non-floating-point numbers. */
+ RandGamma.prototype.convertValue = function (value) {
+ if (this.dtype === 'float32') {
+ return value;
+ }
+ return Math.round(value);
+ };
+ return RandGamma;
+ }());
+ var UniformRandom = /** @class */ (function () {
+ function UniformRandom(min, max, dtype, seed) {
+ var _this = this;
+ if (min === void 0) { min = 0; }
+ if (max === void 0) { max = 1; }
+ /** Handles proper rounding for non floating point numbers. */
+ this.canReturnFloat = function () { return (_this.dtype == null || _this.dtype === 'float32'); };
+ this.min = min;
+ this.range = max - min;
+ this.dtype = dtype;
+ if (seed == null) {
+ seed = Math.random();
+ }
+ if (typeof seed === 'number') {
+ seed = seed.toString();
+ }
+ if (!this.canReturnFloat() && this.range <= 1) {
+ throw new Error("The difference between " + min + " - " + max + " <= 1 and dtype is not float");
+ }
+ this.random = seedrandom.alea(seed);
+ }
+ UniformRandom.prototype.convertValue = function (value) {
+ if (this.canReturnFloat()) {
+ return value;
+ }
+ return Math.round(value);
+ };
+ UniformRandom.prototype.nextValue = function () {
+ return this.convertValue(this.min + this.range * this.random());
+ };
+ return UniformRandom;
+ }());
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a gamma distribution.
+ *
+ * ```js
+ * tf.randomGamma([2, 2], 1).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param alpha The shape parameter of the gamma distribution.
+ * @param beta The inverse scale parameter of the gamma distribution. Defaults
+ * to 1.
+ * @param dtype The data type of the output. Defaults to float32.
+ * @param seed The seed for the random number generator.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function randomGamma_(shape, alpha, beta, dtype, seed) {
+ if (beta === void 0) { beta = 1; }
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (beta == null) {
+ beta = 1;
+ }
+ if (dtype == null) {
+ dtype = 'float32';
+ }
+ if (dtype !== 'float32' && dtype !== 'int32') {
+ throw new Error("Unsupported data type " + dtype);
+ }
+ var rgamma = new RandGamma(alpha, beta, dtype, seed);
+ var res = buffer(shape, dtype);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = rgamma.nextValue();
+ }
+ return res.toTensor();
+ }
+ var randomGamma = op({ randomGamma_: randomGamma_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a normal distribution.
+ *
+ * ```js
+ * tf.randomNormal([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param mean The mean of the normal distribution.
+ * @param stdDev The standard deviation of the normal distribution.
+ * @param dtype The data type of the output.
+ * @param seed The seed for the random number generator.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function randomNormal_(shape, mean, stdDev, dtype, seed) {
+ if (mean === void 0) { mean = 0; }
+ if (stdDev === void 0) { stdDev = 1; }
+ if (dtype != null && dtype === 'bool') {
+ throw new Error("Unsupported data type " + dtype);
+ }
+ var randGauss = new MPRandGauss(mean, stdDev, dtype, false /* truncated */, seed);
+ var res = buffer(shape, dtype);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = randGauss.nextValue();
+ }
+ return res.toTensor();
+ }
+ var randomNormal = op({ randomNormal_: randomNormal_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a uniform distribution.
+ *
+ * The generated values follow a uniform distribution in the range [minval,
+ * maxval). The lower bound minval is included in the range, while the upper
+ * bound maxval is excluded.
+ *
+ * ```js
+ * tf.randomUniform([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param minval The lower bound on the range of random values to generate.
+ * Defaults to 0.
+ * @param maxval The upper bound on the range of random values to generate.
+ * Defaults to 1.
+ * @param dtype The data type of the output tensor. Defaults to 'float32'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function randomUniform_(shape, minval, maxval, dtype, seed) {
+ if (minval === void 0) { minval = 0; }
+ if (maxval === void 0) { maxval = 1; }
+ if (dtype === void 0) { dtype = 'float32'; }
+ var res = buffer(shape, dtype);
+ var random = new UniformRandom(minval, maxval, null, seed);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = random.nextValue();
+ }
+ return res.toTensor();
+ }
+ var randomUniform = op({ randomUniform_: randomUniform_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new `tf.Tensor1D` filled with the numbers in the range provided.
+ *
+ * The tensor is a is half-open interval meaning it includes start, but
+ * excludes stop. Decrementing ranges and negative step values are also
+ * supported.sv
+ *
+ *
+ * ```js
+ * tf.range(0, 9, 2).print();
+ * ```
+ *
+ * @param start An integer start value
+ * @param stop An integer stop value
+ * @param step An integer increment (will default to 1 or -1)
+ * @param dtype The data type of the output tensor. Defaults to 'float32'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function range(start, stop, step, dtype) {
+ if (step === void 0) { step = 1; }
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (step === 0) {
+ throw new Error('Cannot have a step of zero');
+ }
+ var attrs = { start: start, stop: stop, step: step, dtype: dtype };
+ return ENGINE.runKernel(Range, {} /* inputs */, attrs);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the real part of a complex (or real) tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of type float that is
+ * the real part of each element in input considered as a complex number.
+ *
+ * If the input is real, it simply makes a clone.
+ *
+ * ```js
+ * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);
+ * tf.real(x).print();
+ * ```
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function real_(input) {
+ var $input = convertToTensor(input, 'input', 'real');
+ var inputs = { input: $input };
+ return ENGINE.runKernel(Real, inputs);
+ }
+ var real = op({ real_: real_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes reciprocal of x element-wise: `1 / x`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, 2]);
+ *
+ * x.reciprocal().print(); // or tf.reciprocal(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function reciprocal_(x) {
+ var $x = convertToTensor(x, 'x', 'reciprocal');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Reciprocal, inputs);
+ }
+ var reciprocal = op({ reciprocal_: reciprocal_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes rectified linear element-wise: `max(x, 0)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.relu().print(); // or tf.relu(x)
+ * ```
+ * @param x The input tensor. If the dtype is `bool`, the output dtype will be
+ * `int32'.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function relu_(x) {
+ var $x = convertToTensor(x, 'x', 'relu');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Relu, inputs);
+ }
+ var relu = op({ relu_: relu_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes rectified linear 6 element-wise: `min(max(x, 0), 6)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 8]);
+ *
+ * x.relu6().print(); // or tf.relu6(x)
+ * ```
+ * @param x The input tensor. If the dtype is `bool`, the output dtype will be
+ * `int32'.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function relu6_(x) {
+ var $x = convertToTensor(x, 'x', 'relu6');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Relu6, inputs);
+ }
+ var relu6 = op({ relu6_: relu6_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor` along a specified axis.
+ *
+ * Also available are stricter rank-specific methods that assert that `x` is
+ * of the given rank:
+ * - `tf.reverse1d`
+ * - `tf.reverse2d`
+ * - `tf.reverse3d`
+ * - `tf.reverse4d`
+ *
+ * Except `tf.reverse1d` (which does not have axis param), all methods have
+ * same signature as this method.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.reverse().print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.reverse(axis).print();
+ * ```
+ * @param x The input tensor to be reversed.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function reverse_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ var inputs = { x: $x };
+ var attrs = { dims: axis };
+ return ENGINE.runKernel(Reverse, inputs, attrs);
+ }
+ var reverse = op({ reverse_: reverse_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor1D`.
+ *
+ * @param x The input tensor.
+ */
+ function reverse1d_(x) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 1, function () { return "Error in reverse1D: x must be rank 1 but got rank " + $x.rank + "."; });
+ return reverse($x, 0);
+ }
+ var reverse1d = op({ reverse1d_: reverse1d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor2D` along a specified axis.
+ *
+ * @param x The input tensor.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ */
+ function reverse2d_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 2, function () { return "Error in reverse2D: x must be rank 2 but got rank " + $x.rank + "."; });
+ return reverse($x, axis);
+ }
+ var reverse2d = op({ reverse2d_: reverse2d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor3D` along a specified axis.
+ *
+ * @param x The input tensor.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ */
+ function reverse3d_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 3, function () { return "Error in reverse3D: x must be rank 3 but got rank " + $x.rank + "."; });
+ return reverse($x, axis);
+ }
+ var reverse3d = op({ reverse3d_: reverse3d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor4D` along a specified axis.
+ *
+ * @param x The input tensor.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ */
+ function reverse4d_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 4, function () { return "Error in reverse4D: x must be rank 4 but got rank " + $x.rank + "."; });
+ return reverse($x, axis);
+ }
+ var reverse4d = op({ reverse4d_: reverse4d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes round of input `tf.Tensor` element-wise: `round(x)`.
+ * It implements banker's rounding.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.round().print(); // or tf.round(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function round_(x) {
+ var $x = convertToTensor(x, 'x', 'round');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Round, inputs);
+ }
+ var round = op({ round_: round_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes reciprocal of square root of the input `tf.Tensor` element-wise:
+ * `y = 1 / sqrt(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 4, -1]);
+ *
+ * x.rsqrt().print(); // or tf.rsqrt(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function rsqrt_(x) {
+ var $x = convertToTensor(x, 'x', 'rsqrt', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Rsqrt, inputs);
+ }
+ var rsqrt = op({ rsqrt_: rsqrt_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /** This is shared code across all tensor creation methods. */
+ function makeTensor(values, shape, inferredShape, dtype) {
+ if (dtype == null) {
+ dtype = inferDtype(values);
+ }
+ if (dtype === 'complex64') {
+ throw new Error("Cannot construct a complex64 tensor directly. " +
+ "Please use tf.complex(real, imag).");
+ }
+ if (!isTypedArray(values) && !Array.isArray(values) &&
+ typeof values !== 'number' && typeof values !== 'boolean' &&
+ typeof values !== 'string') {
+ throw new Error('values passed to tensor(values) must be a number/boolean/string or ' +
+ 'an array of numbers/booleans/strings, or a TypedArray');
+ }
+ if (shape != null) {
+ assertNonNegativeIntegerDimensions(shape);
+ var providedSize_1 = sizeFromShape(shape);
+ var inferredSize_1 = sizeFromShape(inferredShape);
+ assert(providedSize_1 === inferredSize_1, function () { return "Based on the provided shape, [" + shape + "], the tensor should have " +
+ (providedSize_1 + " values but has " + inferredSize_1); });
+ for (var i = 0; i < inferredShape.length; ++i) {
+ var inferred = inferredShape[i];
+ var flatDimsDontMatch = i === inferredShape.length - 1 ?
+ inferred !== sizeFromShape(shape.slice(i)) :
+ true;
+ assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, function () { return "Error creating a new Tensor. Inferred shape " +
+ ("(" + inferredShape + ") does not match the provided ") +
+ ("shape (" + shape + "). "); });
+ }
+ }
+ if (!isTypedArray(values) && !Array.isArray(values)) {
+ values = [values];
+ }
+ shape = shape || inferredShape;
+ values = dtype !== 'string' ?
+ toTypedArray(values, dtype) :
+ flatten(values, [], true);
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-0 `tf.Tensor` (scalar) with the provided value and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.scalar` as it makes the code more readable.
+ *
+ * ```js
+ * tf.scalar(3.14).print();
+ * ```
+ *
+ * @param value The value of the scalar.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function scalar(value, dtype) {
+ if (((isTypedArray(value) && dtype !== 'string') || Array.isArray(value)) &&
+ dtype !== 'complex64') {
+ throw new Error('Error creating a new Scalar: value must be a primitive ' +
+ '(number|boolean|string)');
+ }
+ if (dtype === 'string' && isTypedArray(value) &&
+ !(value instanceof Uint8Array)) {
+ throw new Error('When making a scalar from encoded string, ' +
+ 'the value must be `Uint8Array`.');
+ }
+ var shape = [];
+ var inferredShape = [];
+ return makeTensor(value, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes scaled exponential linear element-wise.
+ *
+ * `x < 0 ? scale * alpha * (exp(x) - 1) : x`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.selu().print(); // or tf.selu(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function selu_(x) {
+ var $x = convertToTensor(x, 'x', 'selu');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Selu, inputs);
+ }
+ var selu = op({ selu_: selu_ });
+
+ /**
+ * 2-D convolution with separable filters.
+ *
+ * Performs a depthwise convolution that acts separately on channels followed
+ * by a pointwise convolution that mixes channels. Note that this is
+ * separability between dimensions [1, 2] and 3, not spatial separability
+ * between dimensions 1 and 2.
+ *
+ * See
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d)
+ * for more details.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param depthwiseFilter The depthwise filter tensor, rank 4, of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]`. This is
+ * the filter used in the first step.
+ * @param pointwiseFilter The pointwise filter tensor, rank 4, of shape
+ * `[1, 1, inChannels * channelMultiplier, outChannels]`. This is
+ * the filter used in the second step.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`. If strides is a single number, then `strideHeight ==
+ * strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function separableConv2d_(x, depthwiseFilter, pointwiseFilter, strides, pad, dilation, dataFormat) {
+ if (dilation === void 0) { dilation = [1, 1]; }
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var $x = convertToTensor(x, 'x', 'separableConv2d');
+ var $depthwiseFilter = convertToTensor(depthwiseFilter, 'depthwiseFilter', 'separableConv2d');
+ var $pointwiseFilter = convertToTensor(pointwiseFilter, 'pointwiseFilter', 'separableConv2d');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ if (dataFormat === 'NCHW') {
+ throw new Error('separableConv2d currently does not support dataFormat NCHW; only ' +
+ 'NHWC is supported');
+ }
+ assert(x4D.rank === 4, function () { return "Error in separableConv2d: input must be rank 4, but got " +
+ ("rank " + x4D.rank + "."); });
+ assert($depthwiseFilter.rank === 4, function () { return "Error in separableConv2d: depthwise filter must be rank 4, but " +
+ ("got rank " + $depthwiseFilter.rank + "."); });
+ assert($pointwiseFilter.rank === 4, function () { return "Error in separableConv2d: pointwise filter must be rank 4, but " +
+ ("got rank " + $depthwiseFilter.rank + "."); });
+ assert($pointwiseFilter.shape[0] === 1, function () { return "Error in separableConv2d: the first dimension of pointwise filter " +
+ (" must be 1, but got " + $pointwiseFilter.shape[0] + "."); });
+ assert($pointwiseFilter.shape[1] === 1, function () { return "Error in separableConv2d: the second dimension of pointwise " +
+ ("filter must be 1, but got " + $pointwiseFilter.shape[1] + "."); });
+ var inChannels = $depthwiseFilter.shape[2];
+ var channelMultiplier = $depthwiseFilter.shape[3];
+ assert($pointwiseFilter.shape[2] === inChannels * channelMultiplier, function () { return "Error in separableConv2d: the third dimension of pointwise filter " +
+ ("must be " + inChannels * channelMultiplier + ", ") +
+ ("but got " + $pointwiseFilter.shape[2] + "."); });
+ var depthwise = depthwiseConv2d$1(x4D, $depthwiseFilter, strides, pad, dataFormat, dilation);
+ var pointwiseStride = 1;
+ var res = conv2d$1(depthwise, $pointwiseFilter, pointwiseStride, 'valid', dataFormat);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var separableConv2d = op({ separableConv2d_: separableConv2d_ });
+
+ /**
+ * Computes the difference between two lists of numbers.
+ *
+ * Given a Tensor `x` and a Tensor `y`, this operation returns a Tensor `out`
+ * that represents all values that are in `x` but not in `y`. The returned
+ * Tensor `out` is sorted in the same order that the numbers appear in `x`
+ * (duplicates are preserved). This operation also returns a Tensor indices that
+ * represents the position of each out element in `x`. In other words:
+ *
+ * `out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]`
+ *
+ * ```js
+ * const x = [1, 2, 3, 4, 5, 6];
+ * const y = [1, 3, 5];
+ *
+ * const [out, indices] = await tf.setdiff1dAsync(x, y);
+ * out.print(); // [2, 4, 6]
+ * indices.print(); // [1, 3, 5]
+ * ```
+ *
+ * @param x 1-D Tensor. Values to keep.
+ * @param y 1-D Tensor. Must have the same type as x. Values to exclude in the
+ * output.
+ * @returns Promise of Tensor tuple [out, indices].
+ * out: Tensor with the same type as x.
+ * indices: A Tensor of type int32.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function setdiff1dAsync_(x, y) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $x, $y, xVals, yVals, ySet, outputSize, i, buffer, indices, i, p;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $x = convertToTensor(x, 'x', 'setdiff1d');
+ $y = convertToTensor(y, 'y', 'setdiff1d');
+ assert($x.dtype === $y.dtype, function () { return "x and y should have the same dtype, but got x (" + $x.dtype + ") and y (" + $y.dtype + ")."; });
+ assert($x.rank === 1, function () { return "x should be 1D tensor, but got x (" + $x.shape + ")."; });
+ assert($y.rank === 1, function () { return "y should be 1D tensor, but got y (" + $y.shape + ")."; });
+ return [4 /*yield*/, $x.data()];
+ case 1:
+ xVals = _a.sent();
+ return [4 /*yield*/, $y.data()];
+ case 2:
+ yVals = _a.sent();
+ ySet = new Set(yVals);
+ outputSize = 0;
+ for (i = 0; i < xVals.length; i++) {
+ if (!ySet.has(xVals[i])) {
+ outputSize++;
+ }
+ }
+ buffer = new TensorBuffer([outputSize], $x.dtype);
+ indices = new TensorBuffer([outputSize], 'int32');
+ for (i = 0, p = 0; i < xVals.length; i++) {
+ if (!ySet.has(xVals[i])) {
+ buffer.values[p] = xVals[i];
+ indices.values[p] = i;
+ p++;
+ }
+ }
+ return [2 /*return*/, [buffer.toTensor(), indices.toTensor()]];
+ }
+ });
+ });
+ }
+ var setdiff1dAsync = setdiff1dAsync_;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns an element-wise indication of the sign of a number.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3, NaN, 0]);
+ *
+ * x.sign().print(); // or tf.sign(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sign_(x) {
+ var $x = convertToTensor(x, 'x', 'sign');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sign, inputs);
+ }
+ var sign = op({ sign_: sign_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes sin of the input Tensor element-wise: `sin(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.sin().print(); // or tf.sin(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sin_(x) {
+ var $x = convertToTensor(x, 'x', 'sin', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sin, inputs);
+ }
+ var sin = op({ sin_: sin_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes hyperbolic sin of the input `tf.Tensor` element-wise: `sinh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.sinh().print(); // or tf.sinh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sinh_(x) {
+ var $x = convertToTensor(x, 'x', 'sinh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sinh, inputs);
+ }
+ var sinh = op({ sinh_: sinh_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 1D slice from 1D array starting at coordinates `begin` and is
+ * of length `size`. See `slice` for details.
+ */
+ function slice1d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice1d');
+ assert($x.rank === 1, function () { return "slice1d expects a rank-1 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, [begin], [size]);
+ }
+ var slice1d = op({ slice1d_: slice1d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 2D slice from a 2D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice2d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice2d');
+ assert($x.rank === 2, function () { return "slice2d expects a rank-2 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, begin, size);
+ }
+ var slice2d = op({ slice2d_: slice2d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 3D slice from a 3D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice3d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice3d');
+ assert($x.rank === 3, function () { return "slice3d expects a rank-3 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, begin, size);
+ }
+ var slice3d = op({ slice3d_: slice3d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 4D slice from a 4D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice4d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice4d');
+ assert($x.rank === 4, function () { return "slice4d expects a rank-4 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, begin, size);
+ }
+ var slice4d = op({ slice4d_: slice4d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the softmax normalized vector given the logits.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ *
+ * a.softmax().print(); // or tf.softmax(a)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
+ *
+ * a.softmax().print(); // or tf.softmax(a)
+ * ```
+ *
+ * @param logits The logits array.
+ * @param dim The dimension softmax would be performed on. Defaults to `-1`
+ * which indicates the last dimension.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function softmax_(logits, dim) {
+ if (dim === void 0) { dim = -1; }
+ var $logits = convertToTensor(logits, 'logits', 'softmax', 'float32');
+ if (dim === -1) {
+ dim = $logits.rank - 1;
+ }
+ if (dim !== $logits.rank - 1) {
+ throw Error('Softmax along a non-last dimension is not yet supported. ' +
+ ("Logits was rank " + $logits.rank + " and dim was " + dim));
+ }
+ var inputs = { logits: $logits };
+ var attrs = { dim: dim };
+ return ENGINE.runKernel(Softmax, inputs, attrs);
+ }
+ var softmax = op({ softmax_: softmax_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Fast Fourier transform.
+ *
+ * Computes the 1-dimensional discrete Fourier transform over the inner-most
+ * dimension of input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ * const imag = tf.tensor1d([1, 2, 3]);
+ * const x = tf.complex(real, imag);
+ *
+ * x.fft().print(); // tf.spectral.fft(x).print();
+ * ```
+ * @param input The complex input to compute an fft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function fft_(input) {
+ assert(input.dtype === 'complex64', function () { return "The dtype for tf.spectral.fft() must be complex64 " +
+ ("but got " + input.dtype + "."); });
+ var inputs = { input: input };
+ return ENGINE.runKernel(FFT, inputs);
+ }
+ var fft = op({ fft_: fft_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Inverse fast Fourier transform.
+ *
+ * Computes the inverse 1-dimensional discrete Fourier transform over the
+ * inner-most dimension of input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ * const imag = tf.tensor1d([1, 2, 3]);
+ * const x = tf.complex(real, imag);
+ *
+ * x.ifft().print(); // tf.spectral.ifft(x).print();
+ * ```
+ * @param input The complex input to compute an ifft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function ifft_(input) {
+ assert(input.dtype === 'complex64', function () { return "The dtype for tf.spectral.ifft() must be complex64 " +
+ ("but got " + input.dtype + "."); });
+ var inputs = { input: input };
+ return ENGINE.runKernel(IFFT, inputs);
+ }
+ var ifft = op({ ifft_: ifft_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Inversed real value input fast Fourier transform.
+ *
+ * Computes the 1-dimensional inversed discrete Fourier transform over the
+ * inner-most dimension of the real input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ * const imag = tf.tensor1d([0, 0, 0]);
+ * const x = tf.complex(real, imag);
+ *
+ * x.irfft().print();
+ * ```
+ * @param input The real value input to compute an irfft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function irfft_(input) {
+ var innerDimensionSize = input.shape[input.shape.length - 1];
+ var batch = input.size / innerDimensionSize;
+ var ret;
+ if (innerDimensionSize <= 2) {
+ var complexInput = reshape(input, [batch, innerDimensionSize]);
+ ret = ifft(complexInput);
+ }
+ else {
+ // The length of unique components of the DFT of a real-valued signal
+ // is 2 * (input_len - 1)
+ var outputShape = [batch, 2 * (innerDimensionSize - 1)];
+ var realInput = reshape(real(input), [batch, innerDimensionSize]);
+ var imagInput = reshape(imag(input), [batch, innerDimensionSize]);
+ var realConjugate = reverse(slice(realInput, [0, 1], [batch, innerDimensionSize - 2]), 1);
+ var imagConjugate = mul(reverse(slice(imagInput, [0, 1], [batch, innerDimensionSize - 2]), 1), scalar(-1));
+ var r = concat([realInput, realConjugate], 1);
+ var i = concat([imagInput, imagConjugate], 1);
+ var complexInput = reshape(complex(r, i), [outputShape[0], outputShape[1]]);
+ ret = ifft(complexInput);
+ }
+ ret = real(ret);
+ // reshape the result if the input is 3D tensor.
+ if (input.rank === 3 && input.shape[0] !== 0) {
+ var temp = ret;
+ var batch_1 = input.shape[0];
+ ret = reshape(ret, [batch_1, ret.shape[0] / batch_1, ret.shape[1]]);
+ temp.dispose();
+ }
+ return ret;
+ }
+ var irfft = op({ irfft_: irfft_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Splits a `tf.Tensor` into sub tensors.
+ *
+ * If `numOrSizeSplits` is a number, splits `x` along dimension `axis`
+ * into `numOrSizeSplits` smaller tensors.
+ * Requires that `numOrSizeSplits` evenly divides `x.shape[axis]`.
+ *
+ * If `numOrSizeSplits` is a number array, splits `x` into
+ * `numOrSizeSplits.length` pieces. The shape of the `i`-th piece has the
+ * same size as `x` except along dimension `axis` where the size is
+ * `numOrSizeSplits[i]`.
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [2, 4]);
+ * const [a, b] = tf.split(x, 2, 1);
+ * a.print();
+ * b.print();
+ *
+ * const [c, d, e] = tf.split(x, [1, 2, 1], 1);
+ * c.print();
+ * d.print();
+ * e.print();
+ * ```
+ *
+ * @param x The input tensor to split.
+ * @param numOrSizeSplits Either an integer indicating the number of
+ * splits along the axis or an array of integers containing the sizes of
+ * each output tensor along the axis. If a number then it must evenly divide
+ * `x.shape[axis]`; otherwise the sum of sizes must match `x.shape[axis]`.
+ * Can contain one -1 indicating that dimension is to be inferred.
+ * @param axis The dimension along which to split. Defaults to 0 (the first
+ * dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function split_(x, numOrSizeSplits, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'split');
+ var inputs = { x: $x };
+ var attr = { numOrSizeSplits: numOrSizeSplits, axis: axis };
+ return ENGINE.runKernel(SplitV, inputs, attr);
+ }
+ var split$1 = op({ split_: split_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Real value input fast Fourier transform.
+ *
+ * Computes the 1-dimensional discrete Fourier transform over the
+ * inner-most dimension of the real input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ *
+ * real.rfft().print();
+ * ```
+ * @param input The real value input to compute an rfft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function rfft_(input, fftLength) {
+ assert(input.dtype === 'float32', function () { return "The dtype for rfft() must be real value but got " + input.dtype; });
+ var innerDimensionSize = input.shape[input.shape.length - 1];
+ var batch = input.size / innerDimensionSize;
+ var adjustedInput;
+ if (fftLength != null && fftLength < innerDimensionSize) {
+ // Need to crop
+ var begin = input.shape.map(function (v) { return 0; });
+ var size = input.shape.map(function (v) { return v; });
+ size[input.shape.length - 1] = fftLength;
+ adjustedInput = slice(input, begin, size);
+ innerDimensionSize = fftLength;
+ }
+ else if (fftLength != null && fftLength > innerDimensionSize) {
+ // Need to pad with zeros
+ var zerosShape = input.shape.map(function (v) { return v; });
+ zerosShape[input.shape.length - 1] = fftLength - innerDimensionSize;
+ adjustedInput = concat([input, zeros(zerosShape)], input.shape.length - 1);
+ innerDimensionSize = fftLength;
+ }
+ else {
+ adjustedInput = input;
+ }
+ // Complement the input with zero imaginary numbers.
+ var zerosInput = zerosLike(adjustedInput);
+ var complexInput = reshape(complex(adjustedInput, zerosInput), [batch, innerDimensionSize]);
+ var ret = fft(complexInput);
+ // Exclude complex conjugations. These conjugations are put symmetrically.
+ var half = Math.floor(innerDimensionSize / 2) + 1;
+ var realValues = real(ret);
+ var imagValues = imag(ret);
+ var realComplexConjugate = split$1(realValues, [half, innerDimensionSize - half], realValues.shape.length - 1);
+ var imagComplexConjugate = split$1(imagValues, [half, innerDimensionSize - half], imagValues.shape.length - 1);
+ var outputShape = adjustedInput.shape.slice();
+ outputShape[adjustedInput.shape.length - 1] = half;
+ return reshape(complex(realComplexConjugate[0], imagComplexConjugate[0]), outputShape);
+ }
+ var rfft = op({ rfft_: rfft_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 4, -1]);
+ *
+ * x.sqrt().print(); // or tf.sqrt(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sqrt_(x) {
+ var $x = convertToTensor(x, 'x', 'sqrt', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sqrt, inputs);
+ }
+ var sqrt = op({ sqrt_: sqrt_ });
+
+ /**
+ * Returns (a - b) * (a - b) element-wise.
+ * Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast squared difference a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function squaredDifference_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'squaredDifference');
+ var $b = convertToTensor(b, 'b', 'squaredDifference');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ var attrs = {};
+ return ENGINE.runKernel(SquaredDifference, inputs, attrs);
+ }
+ var squaredDifference = op({ squaredDifference_: squaredDifference_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Removes dimensions of size 1 from the shape of a `tf.Tensor`.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]);
+ * x.squeeze().print();
+ * ```
+ *
+ * @param x The input tensor to be squeezed.
+ * @param axis An optional list of numbers. If specified, only
+ * squeezes the dimensions listed. The dimension index starts at 0. It
+ * is an error to squeeze a dimension that is not 1.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function squeeze_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'squeeze');
+ return reshape($x, squeezeShape($x.shape, axis).newShape);
+ }
+ var squeeze = op({ squeeze_: squeeze_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.stack([a, b, c]).print();
+ * ```
+ *
+ * @param tensors A list of tensor objects with the same shape and dtype.
+ * @param axis The axis to stack along. Defaults to 0 (the first dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function stack_(tensors, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $tensors = convertToTensorArray(tensors, 'tensors', 'stack', 'string_or_numeric');
+ assert($tensors.length >= 1, function () { return 'Pass at least one tensor to tf.stack'; });
+ if ($tensors.length > 0) {
+ assert(axis <= $tensors[0].rank, function () { return 'Axis must be <= rank of the tensor'; });
+ }
+ var inputs = $tensors;
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(Pack, inputs, attrs);
+ }
+ var stack = op({ stack_: stack_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes step of the input `tf.Tensor` element-wise: `x > 0 ? 1 : alpha * x`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 2, -1, -3]);
+ *
+ * x.step(.5).print(); // or tf.step(x, .5)
+ * ```
+ * @param x The input tensor.
+ * @param alpha The gradient when input is negative.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function step_(x, alpha) {
+ if (alpha === void 0) { alpha = 0.0; }
+ var $x = convertToTensor(x, 'x', 'step');
+ var inputs = { x: $x };
+ var attrs = { alpha: alpha };
+ return ENGINE.runKernel(Step, inputs, attrs);
+ }
+ var step = op({ step_: step_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a strided slice of a tensor.
+ *
+ * Roughly speaking, this op extracts a slice of size (end-begin)/stride from
+ * the given input tensor (x). Starting at the location specified by begin the
+ * slice continues by adding stride to the index until all dimensions are not
+ * less than end. Note that a stride can be negative, which causes a reverse
+ * slice.
+ *
+ * ```js
+ * const t = tf.tensor3d([1, 1, 1 ,2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+ * [3, 2, 3]);
+ * t.stridedSlice([1, 0, 0], [2, 1, 3], [1, 1, 1]).print() // [[[3, 3, 3]]]
+ * t.stridedSlice([1, 0, 0], [2, 2, 3], [1, 1, 1]).print() // [[[3, 3, 3],
+ * // [4, 4, 4]]]
+ * t.stridedSlice([1, -1, 0], [2, -3, 3], [1, -1, 1]).print() // [[[4, 4, 4],
+ * // [3, 3, 3]]]
+ * ```
+ *
+ * @param x The tensor to stride slice.
+ * @param begin The coordinates to start the slice from.
+ * @param end: The coordinates to end the slice at.
+ * @param strides: The size of the slice.
+ * @param beginMask: If the ith bit of beginMask is set, begin[i] is ignored
+ * and the fullest possible range in that dimension is used instead.
+ * @param endMask: If the ith bit of endMask is set, end[i] is ignored
+ * and the fullest possible range in that dimension is used instead.
+ * @param shrinkAxisMask: a bitmask where bit i implies that
+ * the ith specification should shrink the dimensionality. begin and end must
+ * imply a slice of size 1 in the dimension.
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function stridedSlice_(x, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) {
+ if (beginMask === void 0) { beginMask = 0; }
+ if (endMask === void 0) { endMask = 0; }
+ if (ellipsisMask === void 0) { ellipsisMask = 0; }
+ if (newAxisMask === void 0) { newAxisMask = 0; }
+ if (shrinkAxisMask === void 0) { shrinkAxisMask = 0; }
+ var $x = convertToTensor(x, 'x', 'stridedSlice', 'string_or_numeric');
+ var inputs = { x: $x };
+ var attrs = {
+ begin: begin,
+ end: end,
+ strides: strides,
+ beginMask: beginMask,
+ endMask: endMask,
+ ellipsisMask: ellipsisMask,
+ newAxisMask: newAxisMask,
+ shrinkAxisMask: shrinkAxisMask
+ };
+ return ENGINE.runKernel(StridedSlice, inputs, attrs);
+ }
+ var stridedSlice = op({ stridedSlice_: stridedSlice_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes tan of the input `tf.Tensor` element-wise, `tan(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.tan().print(); // or tf.tan(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function tan_(x) {
+ var $x = convertToTensor(x, 'x', 'tan', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Tan, inputs);
+ }
+ var tan = op({ tan_: tan_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * ```js
+ * // Pass an array of values to create a vector.
+ * tf.tensor([1, 2, 3, 4]).print();
+ * ```
+ *
+ * ```js
+ * // Pass a nested array of values to make a matrix or a higher
+ * // dimensional tensor.
+ * tf.tensor([[1, 2], [3, 4]]).print();
+ * ```
+ *
+ * ```js
+ * // Pass a flat array and specify a shape yourself.
+ * tf.tensor([1, 2, 3, 4], [2, 2]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`. If the values are strings,
+ * they will be encoded as utf-8 and kept as `Uint8Array[]`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor(values, shape, dtype) {
+ var inferredShape = inferShape(values, dtype);
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-1 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor1d` as it makes the code more readable.
+ *
+ * ```js
+ * tf.tensor1d([1, 2, 3]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be array of numbers,
+ * or a `TypedArray`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor1d(values, dtype) {
+ assertNonNull(values);
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 1) {
+ throw new Error('tensor1d() requires values to be a flat/TypedArray');
+ }
+ var shape = null;
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-2 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor2d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor2d([[1, 2], [3, 4]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. If not provided, it is inferred from
+ * `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor2d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 2) {
+ throw new Error('tensor2d() requires shape to have two numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 2 && inferredShape.length !== 1) {
+ throw new Error('tensor2d() requires values to be number[][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor2d() requires shape to be provided when `values` ' +
+ 'are a flat/TypedArray');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor3d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. If not provided, it is inferred from
+ * `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor3d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 3) {
+ throw new Error('tensor3d() requires shape to have three numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 3 && inferredShape.length !== 1) {
+ throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor3d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-4 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor4d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor4d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 4) {
+ throw new Error('tensor4d() requires shape to have four numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 4 && inferredShape.length !== 1) {
+ throw new Error('tensor4d() requires values to be number[][][][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor4d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-5 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor5d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor5d([[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor5d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 5) {
+ throw new Error('tensor5d() requires shape to have five numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 5 && inferredShape.length !== 1) {
+ throw new Error('tensor5d() requires values to be ' +
+ 'number[][][][][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor5d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-6 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor6d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor6d([[[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor6d([1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 2, 2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor6d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 6) {
+ throw new Error('tensor6d() requires shape to have six numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 6 && inferredShape.length !== 1) {
+ throw new Error('tensor6d() requires values to be number[][][][][][] or ' +
+ 'flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor6d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ shape = shape ||
+ inferredShape;
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * Finds the values and indices of the `k` largest entries along the last
+ * dimension.
+ *
+ * If the input is a vector (rank=1), finds the k largest entries in the vector
+ * and outputs their values and indices as vectors. Thus values[j] is the j-th
+ * largest entry in input, and its index is indices[j].
+ * For higher rank inputs, computes the top k entries along the last dimension.
+ *
+ * If two elements are equal, the lower-index element appears first.
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 5], [4, 3]]);
+ * const {values, indices} = tf.topk(a);
+ * values.print();
+ * indices.print();
+ * ```
+ * @param x 1-D or higher `tf.Tensor` with last dimension being at least `k`.
+ * @param k Number of top elements to look for along the last dimension.
+ * @param sorted If true, the resulting `k` elements will be sorted by the
+ * values in descending order.
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function topk_(x, k, sorted) {
+ if (k === void 0) { k = 1; }
+ if (sorted === void 0) { sorted = true; }
+ var $x = convertToTensor(x, 'x', 'topk');
+ if ($x.rank === 0) {
+ throw new Error('topk() expects the input to be of rank 1 or higher');
+ }
+ var lastDim = $x.shape[$x.shape.length - 1];
+ if (k < 0) {
+ throw new Error("'k' passed to topk() must be >= 0 but got " + k);
+ }
+ if (k > lastDim) {
+ throw new Error("'k' passed to topk() must be <= the last dimension (" + lastDim + ") " +
+ ("but got " + k));
+ }
+ var inputs = { x: $x };
+ var attrs = { k: k, sorted: sorted };
+ var _a = __read(ENGINE.runKernel(TopK, inputs, attrs), 2), values = _a[0], indices = _a[1];
+ return { values: values, indices: indices };
+ }
+ var topk = op({ topk_: topk_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a truncated normal
+ * distribution.
+ *
+ * ```js
+ * tf.truncatedNormal([2, 2]).print();
+ * ```
+ *
+ * The generated values follow a normal distribution with specified mean and
+ * standard deviation, except that values whose magnitude is more than 2
+ * standard deviations from the mean are dropped and re-picked.
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param mean The mean of the normal distribution.
+ * @param stdDev The standard deviation of the normal distribution.
+ * @param dtype The data type of the output tensor.
+ * @param seed The seed for the random number generator.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function truncatedNormal_(shape, mean, stdDev, dtype, seed) {
+ if (mean === void 0) { mean = 0; }
+ if (stdDev === void 0) { stdDev = 1; }
+ if (dtype != null && dtype === 'bool') {
+ throw new Error("Unsupported data type $ { dtype }");
+ }
+ var randGauss = new MPRandGauss(mean, stdDev, dtype, true /* truncated */, seed);
+ var res = buffer(shape, dtype);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = randGauss.nextValue();
+ }
+ return res.toTensor();
+ }
+ var truncatedNormal = op({ truncatedNormal_: truncatedNormal_ });
+
+ /**
+ * Finds unique elements along an axis of a tensor.
+ *
+ * It returns a tensor `values` containing all of the unique elements along the
+ * `axis` of the given tensor `x` in the same order that they occur along the
+ * `axis` in `x`; `x` does not need to be sorted. It also returns a tensor
+ * `indices` the same size as the number of the elements in `x` along the `axis`
+ * dimension. It contains the index in the unique output `values`.
+ *
+ * ```js
+ * // A 1-D tensor
+ * const a = tf.tensor1d([1, 1, 2, 4, 4, 4, 7, 8, 8]);
+ * const {values, indices} = tf.unique(a);
+ * values.print(); // [1, 2, 4, 7, 8,]
+ * indices.print(); // [0, 0, 1, 2, 2, 2, 3, 4, 4]
+ * ```
+ *
+ * ```js
+ * // A 2-D tensor with axis=0
+ * //
+ * // 'a' is: [[1, 0, 0],
+ * // [1, 0, 0],
+ * // [2, 0, 0]]
+ * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]);
+ * const {values, indices} = tf.unique(a, 0)
+ * values.print(); // [[1, 0, 0],
+ * // [2, 0, 0]]
+ * indices.print(); // [0, 0, 1]
+ * ```
+ *
+ * ```js
+ * // A 2-D tensor with axis=1
+ * //
+ * // 'a' is: [[1, 0, 0],
+ * // [1, 0, 0],
+ * // [2, 0, 0]]
+ * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]);
+ * const {values, indices} = tf.unique(a, 1)
+ * values.print(); // [[1, 0],
+ * // [1, 0],
+ * // [2, 0]]
+ * indices.print(); // [0, 1, 1]
+ * ```
+ * @param x A tensor (int32, string, bool).
+ * @param axis The axis of the tensor to find the unique elements.
+ * @returns [uniqueElements, indices] (see above for details)
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function unique_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'unique', 'string_or_numeric');
+ assert($x.rank > 0, function () { return 'The input tensor must be at least 1D'; });
+ var inputs = { x: $x };
+ var attrs = { axis: axis };
+ var _a = __read(ENGINE.runKernel(Unique, inputs, attrs), 2), values = _a[0], indices = _a[1];
+ return { values: values, indices: indices };
+ }
+ var unique = op({ unique_: unique_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the sum along segments of a `tf.Tensor`.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const segmentIds = tf.tensor1d([1, 2, 0, 1], 'int32');
+ * const numSegments = 3;
+ *
+ * x.unsortedSegmentSum(segmentIds, numSegments).print()
+ * //or tf.unsortedSegmentSum(x, segmentIds, numSegments)
+ * ```
+ * @param x The `tf.Tensor` that will be summed along its segments.
+ * @param segmentIds A `tf.Tensor1D` whose rank is equal to the rank of `x`'s
+ * dimension along the `axis`. Maps each element of `x` to a segment.
+ * @param numSegments The number of distinct `segmentIds`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Segment'}
+ */
+ function unsortedSegmentSum_(x, segmentIds, numSegments) {
+ var $x = convertToTensor(x, 'x', 'unsortedSegmentSum');
+ var $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'unsortedSegmentSum', 'int32');
+ assert(isInt(numSegments), function () { return 'numSegments must be of dtype int'; });
+ var inputs = { x: $x, segmentIds: $segmentIds };
+ var attrs = { numSegments: numSegments };
+ return ENGINE.runKernel(UnsortedSegmentSum, inputs, attrs);
+ }
+ var unsortedSegmentSum = op({ unsortedSegmentSum_: unsortedSegmentSum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Unstacks a `tf.Tensor` of rank-`R` into a list of rank-`(R-1)` `tf.Tensor`s.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * tf.unstack(a).forEach(tensor => tensor.print());
+ * ```
+ *
+ * @param x A tensor object.
+ * @param axis The axis to unstack along. Defaults to 0 (the first dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function unstack_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'unstack', 'string_or_numeric');
+ assert(axis >= -$x.shape.length && axis < $x.shape.length, function () { return "Axis = " + axis + " is not in [-" + $x.shape.length + ", " + $x.shape.length + ")"; });
+ var inputs = { value: $x };
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(Unpack, inputs, attrs);
+ }
+ var unstack = op({ unstack_: unstack_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new variable with the provided initial value.
+ * ```js
+ * const x = tf.variable(tf.tensor([1, 2, 3]));
+ * x.assign(tf.tensor([4, 5, 6]));
+ *
+ * x.print();
+ * ```
+ *
+ * @param initialValue Initial value for the tensor.
+ * @param trainable If true, optimizers are allowed to update it.
+ * @param name Name of the variable. Defaults to a unique id.
+ * @param dtype If set, initialValue will be converted to the given type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function variable(initialValue, trainable, name, dtype) {
+ if (trainable === void 0) { trainable = true; }
+ return ENGINE.makeVariable(initialValue, trainable, name, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function whereImpl(condShape, condVals) {
+ var indices = [];
+ for (var i = 0; i < condVals.length; i++) {
+ if (condVals[i]) {
+ indices.push(i);
+ }
+ }
+ var inBuffer = buffer(condShape, 'int32');
+ var out = buffer([indices.length, condShape.length], 'int32');
+ for (var i = 0; i < indices.length; i++) {
+ var loc = inBuffer.indexToLoc(indices[i]);
+ var offset = i * condShape.length;
+ out.values.set(loc, offset);
+ }
+ return out.toTensor();
+ }
+
+ /**
+ * Returns the coordinates of true elements of condition.
+ *
+ * The coordinates are returned in a 2-D tensor where the first dimension (rows)
+ * represents the number of true elements, and the second dimension (columns)
+ * represents the coordinates of the true elements. Keep in mind, the shape of
+ * the output tensor can vary depending on how many true values there are in
+ * input. Indices are output in row-major order. The resulting tensor has the
+ * shape `[numTrueElems, condition.rank]`.
+ *
+ * This is analogous to calling the python `tf.where(cond)` without an x or y.
+ *
+ * ```js
+ * const cond = tf.tensor1d([false, false, true], 'bool');
+ * const result = await tf.whereAsync(cond);
+ * result.print();
+ * ```
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function whereAsync_(condition) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $condition, vals, res;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $condition = convertToTensor(condition, 'condition', 'whereAsync', 'bool');
+ return [4 /*yield*/, $condition.data()];
+ case 1:
+ vals = _a.sent();
+ res = whereImpl($condition.shape, vals);
+ if (condition !== $condition) {
+ $condition.dispose();
+ }
+ return [2 /*return*/, res];
+ }
+ });
+ });
+ }
+ var whereAsync = whereAsync_;
+
+ /**
+ * Apply boolean mask to tensor.
+ *
+ * ```js
+ * const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
+ * const mask = tf.tensor1d([1, 0, 1], 'bool');
+ * const result = await tf.booleanMaskAsync(tensor, mask);
+ * result.print();
+ * ```
+ *
+ * @param tensor N-D tensor.
+ * @param mask K-D boolean tensor, K <= N and K must be known statically.
+ * @param axis A 0-D int Tensor representing the axis in tensor to mask from.
+ * By default, axis is 0 which will mask from the first dimension.
+ * Otherwise K + axis <= N.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function booleanMaskAsync_(tensor, mask, axis) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $tensor, $mask, axisFrom, maskDim, tensorShape, leadingSize, i, targetTensorShape, reshapedTensor, reshapedMask, positivePositions, indices, res;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $tensor = convertToTensor(tensor, 'tensor', 'boolMask');
+ $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');
+ axisFrom = axis == null ? 0 : axis;
+ maskDim = $mask.rank;
+ tensorShape = $tensor.shape;
+ assert(maskDim > 0, function () { return 'mask cannot be scalar'; });
+ assertShapesMatch(tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape, "mask's shape must match the first K dimensions of tensor's shape,");
+ leadingSize = 1;
+ for (i = axisFrom; i < axisFrom + maskDim; i++) {
+ leadingSize *= tensorShape[i];
+ }
+ targetTensorShape = tensorShape.slice(0, axisFrom)
+ .concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
+ reshapedTensor = reshape($tensor, targetTensorShape);
+ reshapedMask = reshape($mask, [-1]);
+ return [4 /*yield*/, whereAsync(reshapedMask)];
+ case 1:
+ positivePositions = _a.sent();
+ indices = squeeze(positivePositions, [1]);
+ res = gather(reshapedTensor, indices, axisFrom);
+ // Ensure no memory leak.
+ if (tensor !== $tensor) {
+ $tensor.dispose();
+ }
+ if (mask !== $mask) {
+ $mask.dispose();
+ }
+ indices.dispose();
+ reshapedTensor.dispose();
+ reshapedMask.dispose();
+ positivePositions.dispose();
+ return [2 /*return*/, res];
+ }
+ });
+ });
+ }
+ var booleanMaskAsync = booleanMaskAsync_;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.
+ *
+ * The returned `tf.Tensor`'s dimension `i` will correspond to the input
+ * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,
+ * where `n` is the rank of the input `tf.Tensor`. Hence by default, this
+ * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);
+ *
+ * a.transpose().print(); // or tf.transpose(a)
+ * ```
+ *
+ * @param x The tensor to transpose.
+ * @param perm The permutation of the dimensions of a.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function transpose_(x, perm) {
+ var $x = convertToTensor(x, 'x', 'transpose');
+ if (perm == null) {
+ perm = $x.shape.map(function (s, i) { return i; }).reverse();
+ }
+ assert($x.rank === perm.length, function () { return "Error in transpose: rank of input " + $x.rank + " " +
+ ("must match length of perm " + perm + "."); });
+ perm.forEach(function (axis) {
+ assert(axis >= 0 && axis < $x.rank, function () { return "All entries in 'perm' must be between 0 and " + ($x.rank - 1) +
+ (" but got " + perm); });
+ });
+ if ($x.rank <= 1) {
+ return $x.clone();
+ }
+ var inputs = { x: $x };
+ var attrs = { perm: perm };
+ return ENGINE.runKernel(Transpose, inputs, attrs);
+ }
+ var transpose = op({ transpose_: transpose_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the norm of scalar, vectors, and matrices.
+ * This function can compute several different vector norms (the 1-norm, the
+ * Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0)
+ * and matrix norms (Frobenius, 1-norm, and inf-norm).
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.norm().print(); // or tf.norm(x)
+ * ```
+ *
+ * @param x The input array.
+ * @param ord Optional. Order of the norm. Supported norm types are
+ * following:
+ *
+ * | ord | norm for matrices | norm for vectors
+ * |------------|---------------------------|---------------------
+ * |'euclidean' |Frobenius norm |2-norm
+ * |'fro' |Frobenius norm |
+ * |Infinity |max(sum(abs(x), axis=1)) |max(abs(x))
+ * |-Infinity |min(sum(abs(x), axis=1)) |min(abs(x))
+ * |1 |max(sum(abs(x), axis=0)) |sum(abs(x))
+ * |2 | |sum(abs(x)^2)^1/2*
+ *
+ * @param axis Optional. If axis is null (the default), the input is
+ * considered a vector and a single vector norm is computed over the entire
+ * set of values in the Tensor, i.e. norm(x, ord) is equivalent
+ * to norm(x.reshape([-1]), ord). If axis is a integer, the input
+ * is considered a batch of vectors, and axis determines the axis in x
+ * over which to compute vector norms. If axis is a 2-tuple of integer it is
+ * considered a batch of matrices and axis determines the axes in NDArray
+ * over which to compute a matrix norm.
+ * @param keepDims Optional. If true, the norm have the same dimensionality
+ * as the input.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function norm_(x, ord, axis, keepDims) {
+ if (ord === void 0) { ord = 'euclidean'; }
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ x = convertToTensor(x, 'x', 'norm');
+ var norm = normImpl(x, ord, axis);
+ var keepDimsShape = norm.shape;
+ if (keepDims) {
+ var axes = parseAxisParam(axis, x.shape);
+ keepDimsShape = expandShapeToKeepDim(norm.shape, axes);
+ }
+ return reshape(norm, keepDimsShape);
+ }
+ function normImpl(x, p, axis) {
+ if (axis === void 0) { axis = null; }
+ if (x.rank === 0) {
+ return abs(x);
+ }
+ // consider vector when no axis is specified
+ if (x.rank !== 1 && axis === null) {
+ return normImpl(reshape(x, [-1]), p, axis);
+ }
+ // vector
+ if (x.rank === 1 || typeof axis === 'number' ||
+ Array.isArray(axis) && axis.length === 1) {
+ if (p === 1) {
+ return sum(abs(x), axis);
+ }
+ if (p === Infinity) {
+ return max(abs(x), axis);
+ }
+ if (p === -Infinity) {
+ return min(abs(x), axis);
+ }
+ if (p === 'euclidean' || p === 2) {
+ // norm(x, 2) = sum(abs(xi) ^ 2) ^ 1/2
+ return sqrt(sum(pow(abs(x), scalar(2, 'int32')), axis));
+ }
+ throw new Error("Error in norm: invalid ord value: " + p);
+ }
+ // matrix (assumption axis[0] < axis[1])
+ if (Array.isArray(axis) && axis.length === 2) {
+ if (p === 1) {
+ return max(sum(abs(x), axis[0]), axis[1] - 1);
+ }
+ if (p === Infinity) {
+ return max(sum(abs(x), axis[1]), axis[0]);
+ }
+ if (p === -Infinity) {
+ return min(sum(abs(x), axis[1]), axis[0]);
+ }
+ if (p === 'fro' || p === 'euclidean') {
+ // norm(x) = sqrt(sum(pow(x, 2)))
+ return sqrt(sum(square(x), axis));
+ }
+ throw new Error("Error in norm: invalid ord value: " + p);
+ }
+ throw new Error("Error in norm: invalid axis: " + axis);
+ }
+ var norm = op({ norm_: norm_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Compute the moving average of a variable.
+ *
+ * Without zeroDebias, the moving average operation is defined by:
+ * `v += delta`
+ * where
+ * `delta = (1 - decay) * (x - v)`
+ *
+ * With zeroDebias (default), the `delta` term is scaled to debias the
+ * effect of the (assumed) zero-initialization of `v`.
+ * `delta /= (1 - decay ^ step)`
+ *
+ * For more details on the zero-debiasing algorithm, see:
+ * https://arxiv.org/abs/1412.6980
+ *
+ * Note that this function is completely stateless and does not keep track of
+ * step count. The step count needs to be maintained by the caller and passed
+ * in as `step`.
+ *
+ * @param v The current moving average value.
+ * @param x New input value, must have the same shape and dtype as `v`.
+ * @param decay The decay factor. Typical values are 0.95 and 0.99.
+ * @param step Step count.
+ * @param zeroDebias: Whether zeroDebias is to be performed (default: `true`).
+ * @returns The new moving average value.
+ *
+ * @doc {heading: 'Operations', subheading: 'Moving Average'}
+ */
+ function movingAverage_(v, x, decay, step, zeroDebias) {
+ if (zeroDebias === void 0) { zeroDebias = true; }
+ var $v = convertToTensor(v, 'v', 'movingAverage');
+ var $x = convertToTensor(x, 'x', 'movingAverage');
+ var $decay = convertToTensor(decay, 'decay', 'movingAverage');
+ assertTypesMatch($v, $x);
+ assert(arraysEqual($v.shape, $x.shape), function () { return 'Shape mismatch in v and x'; });
+ var one = scalar(1);
+ var oneMinusDecay = sub(one, $decay);
+ var update = mul(sub($x, $v), oneMinusDecay);
+ if (zeroDebias) {
+ assert(step != null, function () { return 'When using zeroDebias: true, step is required.'; });
+ var $step = convertToTensor(step, 'step', 'movingAverage');
+ update = div(update, sub(one, pow($decay, $step)));
+ }
+ return add($v, update);
+ }
+ var movingAverage = op({ movingAverage_: movingAverage_ });
+
+ /**
+ * Check whether updates.shape = indices.shape[:batchDim] +
+ * shape[sliceDim:]
+ *
+ * @param x The input tensor.
+ */
+ function validateUpdateShape(shape, indices, updates) {
+ var sliceDim = (indices.rank > 1) ? indices.shape[indices.rank - 1] : 1;
+ var batchDim = (indices.rank > 1) ? indices.rank - 1 : 1;
+ var shapeError = 'Must have updates.shape = indices.shape[:batchDim] + ' +
+ ("shape[sliceDim:], got updates.shape: " + updates.shape) +
+ (", indices.shape: " + indices.shape + ", shape: " + shape) +
+ (", sliceDim: " + sliceDim + ", and batchDim: " + batchDim + ".");
+ if (updates.rank < batchDim) {
+ throw new Error(shapeError + (" update.rank < " + batchDim + ". "));
+ }
+ if (shape.length < sliceDim + (updates.rank - batchDim)) {
+ throw new Error(shapeError +
+ (" Output shape length < " + (sliceDim + (updates.rank - batchDim))));
+ }
+ if (updates.rank !== batchDim + shape.length - sliceDim) {
+ throw new Error(shapeError + (" update.rank != " + (batchDim + shape.length - sliceDim)));
+ }
+ for (var d = 0; d < batchDim; ++d) {
+ if (updates.shape[d] !== indices.shape[d]) {
+ throw new Error(shapeError +
+ (" updates.shape[" + d + "] (" + updates.shape[d] + ") != indices.shape[" + d + "] (" + indices.shape[d] + ")."));
+ }
+ }
+ for (var d = 0; d < updates.rank - batchDim; ++d) {
+ if (updates.shape[d + batchDim] !== shape[d + sliceDim]) {
+ throw new Error(shapeError +
+ (" updates.shape[" + (d + batchDim) + "] (" + updates.shape[d + batchDim] + ") != shape[" + (d + batchDim) + "] (" + shape[d + batchDim] + ")"));
+ }
+ }
+ }
+ /**
+ * Validate scatter nd inputs.
+ *
+ * @param update The tensor contains the update values.
+ * @param indices The tensor contains the indices for the update values.
+ * @param shape The shape of the output tensor.
+ */
+ function validateInput$1(updates, indices, shape) {
+ if (indices.rank < 1) {
+ throw new Error('tf.scatterND() expects the indices to be rank 1 or higher,' +
+ (" but the rank was " + indices.rank + "."));
+ }
+ if (updates.rank < 1) {
+ throw new Error('tf.scatterND() expects the updates to be rank 1 or higher,' +
+ (" but the rank was " + updates.rank + "."));
+ }
+ if (indices.dtype !== 'int32') {
+ throw new Error("The dtype of 'indices' should be int32, but got dtype: " + indices.dtype);
+ }
+ if (shape.length < 1) {
+ throw new Error("Output rank must be greater or equal to 1, but got shape: " + shape);
+ }
+ if (shape.length === 0) {
+ if (indices.size === 0) {
+ throw new Error("Indices specified for empty output. indices shape: " + indices.shape);
+ }
+ if (updates.size === 0) {
+ throw new Error("Updates specified for empty output. updates shape: " + updates.shape);
+ }
+ }
+ validateUpdateShape(shape, indices, updates);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new tensor by applying sparse updates to individual
+ * values or slices within a zero tensor of the given shape tensor according to
+ * indices. This operator is the inverse of the `tf.gatherND` operator which
+ * extracts values or slices from a given tensor.
+ *
+ * ```js
+ * const indices = tf.tensor2d([4, 3, 1, 7], [4, 1], 'int32');
+ * const updates = tf.tensor1d([9, 10, 11, 12]);
+ * const shape = [8];
+ * tf.scatterND(indices, updates, shape).print() //[0, 11, 0, 10, 9, 0, 0, 12]
+ * ```
+ *
+ * @param indices The tensor contains the indices into the output tensor.
+ * @param updates The tensor contains the value for the indices.
+ * @param shape: The shape of the output tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function scatterND_(indices, updates, shape) {
+ var $indices = convertToTensor(indices, 'indices', 'scatterND', 'int32');
+ var $updates = convertToTensor(updates, 'updates', 'scatterND');
+ validateInput$1($updates, $indices, shape);
+ var inputs = { indices: $indices, updates: $updates };
+ var attrs = { shape: shape };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(ScatterNd, inputs, attrs);
+ }
+ var scatterND = op({ scatterND_: scatterND_ });
+
+ /**
+ * Validate sparseToDense inputs.
+ *
+ * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32.
+ * sparseIndices[i] contains the complete index where sparseValues[i] will be
+ * placed.
+ * @param sparseValues A 0-D or 1-D Tensor. Values
+ * corresponding to each row of sparseIndices, or a scalar value to be used for
+ * all sparse indices.
+ * @param outputShape number[]. Shape of the dense output tensor.
+ * @param validateIndices boolean. indice validation is not supported, error
+ * will be thrown if it is set.
+ */
+ function validateInput(sparseIndices, sparseValues, outputShape, defaultValues) {
+ if (sparseIndices.dtype !== 'int32') {
+ throw new Error('tf.sparseToDense() expects the indices to be int32 type,' +
+ (" but the dtype was " + sparseIndices.dtype + "."));
+ }
+ if (sparseIndices.rank > 2) {
+ throw new Error('sparseIndices should be a scalar, vector, or matrix,' +
+ (" but got shape " + sparseIndices.shape + "."));
+ }
+ var numElems = sparseIndices.rank > 0 ? sparseIndices.shape[0] : 1;
+ var numDims = sparseIndices.rank > 1 ? sparseIndices.shape[1] : 1;
+ if (outputShape.length !== numDims) {
+ throw new Error('outputShape has incorrect number of elements:,' +
+ (" " + outputShape.length + ", should be: " + numDims + "."));
+ }
+ var numValues = sparseValues.size;
+ if (!(sparseValues.rank === 0 ||
+ sparseValues.rank === 1 && numValues === numElems)) {
+ throw new Error('sparseValues has incorrect shape ' +
+ (sparseValues.shape + ", should be [] or [" + numElems + "]"));
+ }
+ if (sparseValues.dtype !== defaultValues.dtype) {
+ throw new Error('sparseValues.dtype must match defaultValues.dtype');
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts a sparse representation into a dense tensor.
+ *
+ * Builds an array dense with shape outputShape such that:
+ *
+ * // If sparseIndices is scalar
+ * dense[i] = (i == sparseIndices ? sparseValues : defaultValue)
+ *
+ * // If sparseIndices is a vector, then for each i
+ * dense[sparseIndices[i]] = sparseValues[i]
+ *
+ * // If sparseIndices is an n by d matrix, then for each i in [0, n)
+ * dense[sparseIndices[i][0], ..., sparseIndices[i][d-1]] = sparseValues[i]
+ * All other values in dense are set to defaultValue. If sparseValues is a
+ * scalar, all sparse indices are set to this single value.
+ *
+ * If indices are repeated the final value is summed over all values for those
+ * indices.
+ *
+ * ```js
+ * const indices = tf.tensor1d([4, 5, 6, 1, 2, 3], 'int32');
+ * const values = tf.tensor1d([10, 11, 12, 13, 14, 15], 'float32');
+ * const shape = [8];
+ * tf.sparseToDense(indices, values, shape).print();
+ * ```
+ *
+ * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32.
+ * sparseIndices[i] contains the complete index where sparseValues[i] will be
+ * placed.
+ * @param sparseValues A 0-D or 1-D Tensor. Values
+ * corresponding to each row of sparseIndices, or a scalar value to be used for
+ * all sparse indices.
+ * @param outputShape Shape of the dense output tensor. the type is inferred.
+ * @param defaultValue Scalar. Value to set for indices not specified in
+ * sparseIndices. Defaults to zero.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function sparseToDense_(sparseIndices, sparseValues, outputShape, defaultValue) {
+ if (defaultValue === void 0) { defaultValue = 0; }
+ var $sparseIndices = convertToTensor(sparseIndices, 'sparseIndices', 'sparseToDense', 'int32');
+ var $sparseValues = convertToTensor(sparseValues, 'sparseValues', 'sparseToDense');
+ var $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'sparseToDense', $sparseValues.dtype);
+ validateInput($sparseIndices, $sparseValues, outputShape, $defaultValue);
+ var inputs = {
+ sparseIndices: $sparseIndices,
+ sparseValues: $sparseValues,
+ defaultValue: $defaultValue
+ };
+ var attrs = { outputShape: outputShape };
+ return ENGINE.runKernel(SparseToDense, inputs, attrs);
+ }
+ var sparseToDense = op({ sparseToDense_: sparseToDense_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gather slices from input tensor into a Tensor with shape specified by
+ * `indices`.
+ *
+ * `indices` is an K-dimensional integer tensor, best thought of as a
+ * (K-1)-dimensional tensor of indices into input, where each element defines a
+ * slice of input:
+ * output[\\(i_0, ..., i_{K-2}\\)] = input[indices[\\(i_0, ..., i_{K-2}\\)]]
+ *
+ * Whereas in `tf.gather`, `indices` defines slices into the first dimension of
+ * input, in `tf.gatherND`, `indices` defines slices into the first N dimensions
+ * of input, where N = indices.shape[-1].
+ *
+ * The last dimension of indices can be at most the rank of input:
+ * indices.shape[-1] <= input.rank
+ *
+ * The last dimension of `indices` corresponds to elements
+ * (if indices.shape[-1] == input.rank) or slices
+ * (if indices.shape[-1] < input.rank) along dimension indices.shape[-1] of
+ * input.
+ * The output tensor has shape
+ * indices.shape[:-1] + input.shape[indices.shape[-1]:]
+ *
+ * Note that on CPU, if an out of bound index is found, an error is returned. On
+ * GPU, if an out of bound index is found, a 0 is stored in the corresponding
+ * output value.
+ *
+ * ```js
+ * const indices = tf.tensor2d([0, 1, 1, 0], [2,2], 'int32');
+ * const input = tf.tensor2d([9, 10, 11, 12], [2, 2]);
+ * tf.gatherND(input, indices).print() // [10, 11]
+ * ```
+ *
+ * @param x The tensor from which to gather values.
+ * @param indices Index tensor, must be of type int32.
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function gatherND_(x, indices) {
+ var $indices = convertToTensor(indices, 'indices', 'gatherND', 'int32');
+ var $x = convertToTensor(x, 'x', 'gatherND', 'string_or_numeric');
+ var inputs = { params: $x, indices: $indices };
+ return ENGINE.runKernel(GatherNd, inputs);
+ }
+ var gatherND = op({ gatherND_: gatherND_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Normalize noise shape based on provided tensor and noise shape.
+ *
+ * @param x Tensor.
+ * @param noiseShape The shape for the randomly generated keep/drop flags, as
+ * an array of numbers. Optional.
+ * @returns Normalized noise shape.
+ */
+ function getNoiseShape(x, noiseShape) {
+ if (noiseShape == null) {
+ return x.shape.slice();
+ }
+ if (arraysEqual(x.shape, noiseShape)) {
+ return noiseShape;
+ }
+ if (x.shape.length === noiseShape.length) {
+ var newDimension = [];
+ for (var i = 0; i < x.shape.length; i++) {
+ if (noiseShape[i] == null && x.shape[i] != null) {
+ newDimension.push(x.shape[i]);
+ }
+ else {
+ newDimension.push(noiseShape[i]);
+ }
+ }
+ return newDimension;
+ }
+ return noiseShape;
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes dropout.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 2, 1]);
+ * const rate = 0.75;
+ * const output = tf.dropout(x, rate);
+ * output.print();
+ * ```
+ *
+ * @param x A floating point Tensor or TensorLike.
+ * @param rate A float in the range [0, 1). The probability that each element
+ * of x is discarded.
+ * @param noiseShape An array of numbers of type int32, representing the
+ * shape for randomly generated keep/drop flags. If the noiseShape has null
+ * value, it will be automatically replaced with the x's relative dimension
+ * size. Optional.
+ * @param seed Used to create random seeds. Optional.
+ * @returns A Tensor of the same shape of x.
+ *
+ * @doc {heading: 'Operations', subheading: 'Dropout'}
+ */
+ function dropout_(x, rate, noiseShape, seed) {
+ var $x = convertToTensor(x, 'x', 'dropout');
+ assert($x.dtype === 'float32', function () { return "x has to be a floating point tensor since it's going to be " +
+ ("scaled, but got a " + $x.dtype + " tensor instead."); });
+ assert(rate >= 0 && rate < 1, function () { return "rate must be a float in the range [0, 1), but got " + rate + "."; });
+ if (rate === 0) {
+ return x instanceof Tensor ? $x.clone() : $x;
+ }
+ var $noiseShape = getNoiseShape($x, noiseShape);
+ var keepProb = 1 - rate;
+ var multiplier = div(floor(add(randomUniform($noiseShape, 0, 1, 'float32', seed), keepProb)), keepProb);
+ return mul($x, multiplier);
+ }
+ var dropout = op({ dropout_: dropout_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function enclosingPowerOfTwo(value) {
+ // Return 2**N for integer N such that 2**N >= value.
+ return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2.0))));
+ }
+ function cosineWindow(windowLength, a, b) {
+ var even = 1 - windowLength % 2;
+ var newValues = new Float32Array(windowLength);
+ for (var i = 0; i < windowLength; ++i) {
+ var cosArg = (2.0 * Math.PI * i) / (windowLength + even - 1);
+ newValues[i] = a - b * Math.cos(cosArg);
+ }
+ return tensor1d(newValues, 'float32');
+ }
+
+ /**
+ * Returns whether the targets are in the top K predictions.
+ *
+ * ```js
+ * const predictions = tf.tensor2d([[20, 10, 40, 30], [30, 50, -20, 10]]);
+ * const targets = tf.tensor1d([2, 0]);
+ * const precision = await tf.inTopKAsync(predictions, targets);
+ * precision.print();
+ * ```
+ * @param predictions 2-D or higher `tf.Tensor` with last dimension being
+ * at least `k`.
+ * @param targets 1-D or higher `tf.Tensor`.
+ * @param k Optional Number of top elements to look at for computing precision,
+ * default to 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function inTopKAsync_(predictions, targets, k) {
+ if (k === void 0) { k = 1; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $predictions, $targets, lastDim, predictionsVals, targetsVals, _a, batch, size, precision, b, offset, vals, valAndInd, i, i;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ $predictions = convertToTensor(predictions, 'predictions', 'inTopK');
+ $targets = convertToTensor(targets, 'targets', 'inTopK');
+ assert($predictions.rank > 1, function () { return 'inTopK() expects the predictions to be of rank 2 or higher, ' +
+ ("but got " + $predictions.rank); });
+ assert($predictions.rank - 1 === $targets.rank, function () { return "predictions rank should be 1 larger than " +
+ "targets rank, but got predictions rank " +
+ ($predictions.rank + " and targets rank " + $targets.rank); });
+ assertShapesMatch($predictions.shape.slice(0, $predictions.shape.length - 1), $targets.shape, "predictions's shape should be align with the targets' shape, " +
+ 'except the last dimension.');
+ lastDim = $predictions.shape[$predictions.shape.length - 1];
+ assert(k > 0 && k <= lastDim, function () { return "'k' passed to inTopK() must be > 0 && <= the predictions last " +
+ ("dimension (" + lastDim + "), but got " + k); });
+ return [4 /*yield*/, $predictions.data()];
+ case 1:
+ predictionsVals = _b.sent();
+ return [4 /*yield*/, $targets.data()];
+ case 2:
+ targetsVals = _b.sent();
+ _a = __read([predictionsVals.length / lastDim, lastDim], 2), batch = _a[0], size = _a[1];
+ precision = getTypedArrayFromDType('bool', batch);
+ for (b = 0; b < batch; b++) {
+ offset = b * size;
+ vals = predictionsVals.subarray(offset, offset + size);
+ valAndInd = [];
+ for (i = 0; i < vals.length; i++) {
+ valAndInd.push({ value: vals[i], index: i });
+ }
+ valAndInd.sort(function (a, b) { return b.value - a.value; });
+ precision[b] = 0;
+ for (i = 0; i < k; i++) {
+ if (valAndInd[i].index === targetsVals[b]) {
+ precision[b] = 1;
+ break;
+ }
+ }
+ }
+ if (predictions !== $predictions) {
+ $predictions.dispose();
+ }
+ if (targets !== $targets) {
+ $targets.dispose();
+ }
+ // Output precision has the same shape as targets.
+ return [2 /*return*/, tensor(precision, $targets.shape, 'bool')];
+ }
+ });
+ });
+ }
+ var inTopKAsync = inTopKAsync_;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the derivative of the filter of a 2D convolution.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * [batch, height, width, inChannels]. If rank 3, batch of 1 is assumed.
+ * @param dy The dy image, of rank 4 or rank 3, of shape
+ * [batch, height, width, outDepth]. If rank 3, batch of 1 is assumed.
+ * @param filterShape The shape of the filter, length 4,
+ * [filterHeight, filterWidth, inDepth, outDepth].
+ * @param strides The strides of the convolution: [strideHeight,
+ * strideWidth].
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function conv2DBackpropFilter_(x, dy, filterShape, strides, pad, dataFormat, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var x4D = x;
+ if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ var dy4D = dy;
+ if (dy4D.rank === 3) {
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in conv2dDerFilter: input must be rank 4, but got shape " +
+ (x4D.shape + "."); });
+ assert(dy4D.rank === 4, function () { return "Error in conv2dDerFilter: dy must be rank 4, but got shape " +
+ (dy4D.shape + "."); });
+ assert(filterShape.length === 4, function () { return "Error in conv2dDerFilter: filterShape must be length 4, but got " +
+ (filterShape + "."); });
+ var inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];
+ var outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1];
+ assert(inDepth === filterShape[2], function () { return "Error in conv2dDerFilter: depth of input " + inDepth + ") must " +
+ ("match input depth in filter (" + filterShape[2] + "."); });
+ assert(outDepth === filterShape[3], function () { return "Error in conv2dDerFilter: depth of dy (" + outDepth + ") must " +
+ ("match output depth for filter (" + filterShape[3] + ")."); });
+ checkPadOnDimRoundingMode('conv2dDerFilter', pad, dimRoundingMode);
+ var inputs = { x: x4D, dy: dy4D };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dimRoundingMode: dimRoundingMode, filterShape: filterShape };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(Conv2DBackpropFilter, inputs, attrs);
+ }
+ var conv2DBackpropFilter = op({ conv2DBackpropFilter_: conv2DBackpropFilter_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Returns gradient for fused activation.
+ function getFusedDyActivation(dy, y, activation) {
+ if (activation == null || activation === 'linear') {
+ return dy;
+ }
+ if (activation === 'relu') {
+ return mul(dy, step(y));
+ }
+ throw new Error("Cannot compute gradient for fused activation " + activation + ".");
+ }
+ // Returns gradient for fused bias.
+ function getFusedBiasGradient(bias, dyActivation) {
+ var res = dyActivation;
+ var reduceAxes = getReductionAxes(bias.shape, dyActivation.shape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, bias.shape);
+ }
+ function applyActivation(x, activation, preluActivationWeights, leakyreluAlpha) {
+ if (activation === 'linear') {
+ return x;
+ }
+ else if (activation === 'relu') {
+ return relu(x);
+ }
+ else if (activation === 'elu') {
+ return elu(x);
+ }
+ else if (activation === 'relu6') {
+ return relu6(x);
+ }
+ else if (activation === 'prelu') {
+ return prelu(x, preluActivationWeights);
+ }
+ else if (activation === 'leakyrelu') {
+ return leakyRelu(x, leakyreluAlpha);
+ }
+ else if (activation === 'sigmoid') {
+ return sigmoid(x);
+ }
+ throw new Error("Unknown fused activation " + activation + ".");
+ }
+ // Whether we should call fused ops.
+ var shouldFuse = function (gradientDepth, activation) {
+ var gradientMode = gradientDepth > 0;
+ return !gradientMode || activation === 'linear';
+ };
+
+ /**
+ * Computes a 2D convolution over the input x, optionally fused with adding a
+ * bias and applying an activation.
+ *
+ * ```js
+ * const inputDepth = 2;
+ * const inShape = [2, 2, 2, inputDepth];
+ * const outputDepth = 2;
+ * const fSize = 1;
+ * const pad = 0;
+ * const strides = 1;
+ *
+ * const x = tf.tensor4d( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ * 16], inShape);
+ * const w = tf.tensor4d([-1, 1, -2, 0.5], [fSize, fSize, inputDepth,
+ * outputDepth]);
+ *
+ * tf.fused.conv2d({ x, filter: w, strides, pad, dataFormat: 'NHWC',
+ * dilations: [1, 1], bias: tf.scalar(5), activation: 'relu' }).print();
+ * ```
+ *
+ * @param obj An object with the following properties:
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid` output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param bias Tensor to be added to the result.
+ * @param activation Name of activation kernel (defaults to `linear`) to be
+ * applied
+ * after biasAdd.
+ * @param preluActivationWeights Tensor of prelu weights to be applied as part
+ * of a `prelu` activation, typically the same shape as `x`.
+ * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`
+ * activation.
+ */
+ function fusedConv2d_(_a) {
+ var _b;
+ var x = _a.x, filter = _a.filter, strides = _a.strides, pad = _a.pad, _c = _a.dataFormat, dataFormat = _c === void 0 ? 'NHWC' : _c, _d = _a.dilations, dilations = _d === void 0 ? [1, 1] : _d, dimRoundingMode = _a.dimRoundingMode, bias = _a.bias, _e = _a.activation, activation = _e === void 0 ? 'linear' : _e, preluActivationWeights = _a.preluActivationWeights, leakyreluAlpha = _a.leakyreluAlpha;
+ activation = activation || 'linear';
+ if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
+ var result = conv2d$1(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
+ if (bias != null) {
+ result = add(result, bias);
+ }
+ return applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
+ }
+ var $x = convertToTensor(x, 'x', 'conv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in fused conv2d: input must be rank 4, but got rank " +
+ (x4D.rank + "."); });
+ assert($filter.rank === 4, function () { return "Error in fused conv2d: filter must be rank 4, but got rank " +
+ ($filter.rank + "."); });
+ checkPadOnDimRoundingMode('fused conv2d', pad, dimRoundingMode);
+ assert(x4D.shape[3] === $filter.shape[2], function () { return "Error in conv2d: depth of input (" + x4D.shape[3] + ") must match " +
+ ("input depth for filter " + $filter.shape[2] + "."); });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in conv2D: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ assert(dataFormat === 'NHWC', function () { return "Error in conv2d: got dataFormat of " + dataFormat + " but only NHWC is currently supported."; });
+ var convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode);
+ var $bias;
+ if (bias != null) {
+ $bias = convertToTensor(bias, 'bias', 'fused conv2d');
+ _b = __read(makeTypesMatch($bias, $x), 1), $bias = _b[0];
+ assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);
+ }
+ var $preluActivationWeights;
+ if (preluActivationWeights != null) {
+ $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused conv2d');
+ }
+ var grad = function (dy, saved) {
+ var _a = __read(saved, 4), $filter = _a[0], x4D = _a[1], y = _a[2], $bias = _a[3];
+ var dyActivation = getFusedDyActivation(dy, y, activation);
+ assert(tupleValuesAreOne(dilations), function () { return 'Error in gradient of fused conv2D: ' +
+ "dilation rates greater than 1 " +
+ ("are not yet supported in gradients. Got dilations '" + dilations + "'"); });
+ var xDer = conv2DBackpropInput(x4D.shape, dyActivation, $filter, strides, pad);
+ var filterDer = conv2DBackpropFilter(x4D, dyActivation, $filter.shape, strides, pad);
+ var der = [xDer, filterDer];
+ if ($bias != null) {
+ var biasDer = getFusedBiasGradient($bias, dyActivation);
+ der.push(biasDer);
+ }
+ return der;
+ };
+ var inputs = {
+ x: x4D,
+ filter: $filter,
+ bias: $bias,
+ preluActivationWeights: $preluActivationWeights
+ };
+ var attrs = {
+ strides: strides,
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: dilations,
+ dimRoundingMode: dimRoundingMode,
+ activation: activation,
+ leakyreluAlpha: leakyreluAlpha
+ };
+ // Depending on the the params passed in we will have different number of
+ // inputs and thus a a different number of elements in the gradient.
+ if (bias == null) {
+ var customOp = customGrad(function (x4D, filter, save) {
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(FusedConv2D, inputs, attrs);
+ save([filter, x4D, res]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOp(x4D, $filter);
+ }
+ else {
+ var customOpWithBias = customGrad(function (x4D, filter, bias, save) {
+ var res = ENGINE.runKernel(FusedConv2D, inputs, attrs);
+ save([filter, x4D, res, bias]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOpWithBias(x4D, $filter, $bias);
+ }
+ }
+ var conv2d = op({ fusedConv2d_: fusedConv2d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, strides, pad, dilations, dimRoundingMode) {
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var x4D = x;
+ if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ var dy4D = dy;
+ if (dy4D.rank === 3) {
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ var inputs = { x: x4D, dy: dy4D };
+ var attrs = { strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dilations: dilations, filterShape: filterShape };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(DepthwiseConv2dNativeBackpropFilter, inputs, attrs);
+ }
+ var depthwiseConv2dNativeBackpropFilter = op({ depthwiseConv2dNativeBackpropFilter_: depthwiseConv2dNativeBackpropFilter_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, strides, pad, dilations, dimRoundingMode) {
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var dy4D = dy;
+ var reshapedTo4D = false;
+ if (dy.rank === 3) {
+ reshapedTo4D = true;
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ var inputs = { dy: dy4D, filter: filter };
+ var attrs = { strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dilations: dilations, inputShape: xShape };
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(DepthwiseConv2dNativeBackpropInput, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var depthwiseConv2dNativeBackpropInput = op({ depthwiseConv2dNativeBackpropInput_: depthwiseConv2dNativeBackpropInput_ });
+
+ /**
+ * Computes depthwise 2D convolution, optionally fused with adding a
+ * bias and applying an activation.
+ *
+ * Given a 4D `input` array and a `filter` array of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
+ * `inChannels` convolutional filters of depth 1, this op applies a
+ * different filter to each input channel (expanding from 1 channel to
+ * `channelMultiplier` channels for each), then concatenates the results
+ * together. The output has `inChannels * channelMultiplier` channels.
+ *
+ * See
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
+ * for more details.
+ *
+ * @param obj An object with the following properties:
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter tensor, rank 4, of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`. If strides is a single number, then `strideHeight ==
+ * strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param bias Tensor to be added to the result.
+ * @param activation Name of activation kernel (defaults to `linear`).
+ * @param preluActivationWeights Tensor of prelu weights to be applied as part
+ * of a `prelu` activation, typically the same shape as `x`.
+ * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`
+ * activation.
+ */
+ function fusedDepthwiseConv2d_(_a) {
+ var _b;
+ var x = _a.x, filter = _a.filter, strides = _a.strides, pad = _a.pad, _c = _a.dataFormat, dataFormat = _c === void 0 ? 'NHWC' : _c, _d = _a.dilations, dilations = _d === void 0 ? [1, 1] : _d, dimRoundingMode = _a.dimRoundingMode, bias = _a.bias, _e = _a.activation, activation = _e === void 0 ? 'linear' : _e, preluActivationWeights = _a.preluActivationWeights, leakyreluAlpha = _a.leakyreluAlpha;
+ if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
+ var result = depthwiseConv2d$1(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
+ if (bias != null) {
+ result = add(result, bias);
+ }
+ return applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
+ }
+ var $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in fused depthwiseConv2d: input must be rank 4, but got " +
+ ("rank " + x4D.rank + "."); });
+ assert($filter.rank === 4, function () { return "Error in fused depthwiseConv2d: filter must be rank 4, " +
+ ("but got rank " + $filter.rank + "."); });
+ assert(x4D.shape[3] === $filter.shape[2], function () { return "Error in fused depthwiseConv2d: number of input channels " +
+ ("(" + x4D.shape[3] + ") must match the inChannels dimension in ") +
+ ("filter " + $filter.shape[2] + "."); });
+ if (dilations == null) {
+ dilations = [1, 1];
+ }
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in fused depthwiseConv2d: Either strides or dilations must ' +
+ ("be 1. Got strides " + strides + " and dilations '" + dilations + "'"); });
+ checkPadOnDimRoundingMode('fused depthwiseConv2d', pad, dimRoundingMode);
+ var convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */);
+ var $bias;
+ if (bias != null) {
+ $bias = convertToTensor(bias, 'bias', 'fused conv2d');
+ _b = __read(makeTypesMatch($bias, $x), 1), $bias = _b[0];
+ assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);
+ }
+ var $preluActivationWeights;
+ if (preluActivationWeights != null) {
+ $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused depthwiseConv2d');
+ }
+ var grad = function (dy, saved) {
+ assert(tupleValuesAreOne(dilations), function () { return 'Error in gradient of fused depthwiseConv2d: dilation rates ' +
+ "greater than 1 are not yet supported. Got dilations " +
+ ("'" + dilations + "'"); });
+ var _a = __read(saved, 4), $filter = _a[0], x4D = _a[1], y = _a[2], bias = _a[3];
+ var dyActivation = getFusedDyActivation(dy, y, activation);
+ var xDer = depthwiseConv2dNativeBackpropInput(x4D.shape, dyActivation, $filter, strides, pad, dilations, dimRoundingMode);
+ var filterDer = depthwiseConv2dNativeBackpropFilter(x4D, dyActivation, $filter.shape, strides, pad, dilations, dimRoundingMode);
+ if (bias != null) {
+ var biasDer = getFusedBiasGradient($bias, dyActivation);
+ return [xDer, filterDer, biasDer];
+ }
+ return [xDer, filterDer];
+ };
+ var inputs = {
+ x: x4D,
+ filter: $filter,
+ bias: $bias,
+ preluActivationWeights: $preluActivationWeights
+ };
+ var attrs = {
+ strides: strides,
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: dilations,
+ dimRoundingMode: dimRoundingMode,
+ activation: activation,
+ leakyreluAlpha: leakyreluAlpha
+ };
+ // Depending on the the params passed in we will have different number of
+ // inputs and thus a a different number of elements in the gradient.
+ if (bias == null) {
+ var customOp = customGrad(function (x4D, filter, save) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(FusedDepthwiseConv2D, inputs, attrs);
+ save([filter, x4D, res]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOp(x4D, $filter);
+ }
+ else {
+ var customOpWithBias = customGrad(function (x4D, filter, bias, save) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(FusedDepthwiseConv2D, inputs, attrs);
+ save([filter, x4D, res, bias]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOpWithBias(x4D, $filter, $bias);
+ }
+ }
+ var depthwiseConv2d = op({ fusedDepthwiseConv2d_: fusedDepthwiseConv2d_ });
+
+ /**
+ * Computes the dot product of two matrices with optional activation and bias.
+ *
+ * ```js
+ * const a = tf.tensor2d([-1, -2], [1, 2]);
+ * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const bias = tf.tensor2d([1, 2], [1, 2]);
+ *
+ * tf.fused.matMul({a, b, bias, activation: 'relu'}).print();
+ * ```
+ *
+ * @param obj An object with the following properties:
+ * - `a` First matrix in dot product operation.
+ * - `b` Second matrix in dot product operation.
+ * - `transposeA` If true, `a` is transposed before multiplication.
+ * - `transposeB` If true, `b` is transposed before multiplication.
+ * - `bias` Matrix to be added to the result.
+ * - `activation` Name of activation kernel (defaults to `linear`).
+ * - `preluActivationWeights` Tensor of prelu weights.
+ * - `leakyreluAlpha` Alpha of leakyrelu.
+ */
+ function fusedMatMul_(_a) {
+ var _b, _c;
+ var a = _a.a, b = _a.b, _d = _a.transposeA, transposeA = _d === void 0 ? false : _d, _e = _a.transposeB, transposeB = _e === void 0 ? false : _e, bias = _a.bias, _f = _a.activation, activation = _f === void 0 ? 'linear' : _f, preluActivationWeights = _a.preluActivationWeights, leakyreluAlpha = _a.leakyreluAlpha;
+ if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
+ var result = matMul$1(a, b, transposeA, transposeB);
+ if (bias != null) {
+ result = add(result, bias);
+ }
+ return applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
+ }
+ var $a = convertToTensor(a, 'a', 'fused matMul');
+ var $b = convertToTensor(b, 'b', 'fused matMul');
+ _b = __read(makeTypesMatch($a, $b), 2), $a = _b[0], $b = _b[1];
+ var innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];
+ var innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];
+ var outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];
+ var outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];
+ var outerDimsA = $a.shape.slice(0, -2);
+ var outerDimsB = $b.shape.slice(0, -2);
+ var batchDimA = sizeFromShape(outerDimsA);
+ var batchDimB = sizeFromShape(outerDimsB);
+ assert(innerShapeA === innerShapeB, function () { return "Error in fused matMul: inner shapes (" + innerShapeA + ") and (" +
+ (innerShapeB + ") of Tensors with shapes " + $a.shape + " and ") +
+ ($b.shape + " and transposeA=" + transposeA) +
+ (" and transposeB=" + transposeB + " must match."); });
+ var outShapeOuterDims = assertAndGetBroadcastShape($a.shape.slice(0, -2), $b.shape.slice(0, -2));
+ var outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);
+ var a3D = transposeA ?
+ reshape($a, [batchDimA, innerShapeA, outerShapeA]) :
+ reshape($a, [batchDimA, outerShapeA, innerShapeA]);
+ var b3D = transposeB ?
+ reshape($b, [batchDimB, outerShapeB, innerShapeB]) :
+ reshape($b, [batchDimB, innerShapeB, outerShapeB]);
+ var $bias;
+ if (bias != null) {
+ $bias = convertToTensor(bias, 'bias', 'fused matMul');
+ _c = __read(makeTypesMatch($bias, $a), 1), $bias = _c[0];
+ assertAndGetBroadcastShape(outShape, $bias.shape);
+ }
+ var $preluActivationWeights;
+ if (preluActivationWeights != null) {
+ $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused matMul');
+ }
+ var grad = function (dy, saved) {
+ var _a = __read(saved, 4), a3D = _a[0], b3D = _a[1], y = _a[2], $bias = _a[3];
+ // we reshape dy because the result of the forward is not
+ // necessarily going to be a 3d tensor due to a reshape done at the end of
+ // the customOp.
+ var dyActivation = getFusedDyActivation(reshape(dy, y.shape), y, activation);
+ var aDer;
+ var bDer;
+ if (!transposeA && !transposeB) {
+ aDer = matMul$1(dyActivation, b3D, false, true);
+ bDer = matMul$1(a3D, dyActivation, true, false);
+ }
+ else if (!transposeA && transposeB) {
+ aDer = matMul$1(dyActivation, b3D, false, false);
+ bDer = matMul$1(dyActivation, a3D, true, false);
+ }
+ else if (transposeA && !transposeB) {
+ aDer = matMul$1(b3D, dyActivation, false, true);
+ bDer = matMul$1(a3D, dyActivation, false, false);
+ }
+ else {
+ aDer = matMul$1(b3D, dyActivation, true, true);
+ bDer = matMul$1(dyActivation, a3D, true, true);
+ }
+ if (bias != null) {
+ var biasDer = getFusedBiasGradient($bias, dyActivation);
+ return [aDer, bDer, biasDer];
+ }
+ else {
+ return [aDer, bDer];
+ }
+ };
+ var inputs = {
+ a: a3D,
+ b: b3D,
+ bias: $bias,
+ preluActivationWeights: $preluActivationWeights
+ };
+ var attrs = { transposeA: transposeA, transposeB: transposeB, activation: activation, leakyreluAlpha: leakyreluAlpha };
+ // Depending on the the params passed in we will have different number of
+ // inputs and thus a a different number of elements in the gradient.
+ if (bias == null) {
+ var customOp = customGrad(function (a3D, b3D, save) {
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(_FusedMatMul, inputs, attrs);
+ save([a3D, b3D, res]);
+ return { value: reshape(res, outShape), gradFunc: grad };
+ });
+ return customOp(a3D, b3D);
+ }
+ else {
+ var customOpWithBias = customGrad(function (a3D, b3D, $bias, save) {
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(_FusedMatMul, inputs, attrs);
+ save([a3D, b3D, res, $bias]);
+ return { value: reshape(res, outShape), gradFunc: grad };
+ });
+ return customOpWithBias(a3D, b3D, $bias);
+ }
+ }
+ var matMul = op({ fusedMatMul_: fusedMatMul_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var fused_ops = {
+ __proto__: null,
+ conv2d: conv2d,
+ depthwiseConv2d: depthwiseConv2d,
+ matMul: matMul
+ };
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generate a hamming window.
+ *
+ * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
+ *
+ * ```js
+ * tf.signal.hammingWindow(10).print();
+ * ```
+ * @param The length of window
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function hammingWindow_(windowLength) {
+ return cosineWindow(windowLength, 0.54, 0.46);
+ }
+ var hammingWindow = op({ hammingWindow_: hammingWindow_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generate a Hann window.
+ *
+ * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
+ *
+ * ```js
+ * tf.signal.hannWindow(10).print();
+ * ```
+ * @param The length of window
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function hannWindow_(windowLength) {
+ return cosineWindow(windowLength, 0.5, 0.5);
+ }
+ var hannWindow = op({ hannWindow_: hannWindow_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Expands input into frames of frameLength.
+ * Slides a window size with frameStep.
+ *
+ * ```js
+ * tf.signal.frame([1, 2, 3], 2, 1).print();
+ * ```
+ * @param signal The input tensor to be expanded
+ * @param frameLength Length of each frame
+ * @param frameStep The frame hop size in samples.
+ * @param padEnd Whether to pad the end of signal with padValue.
+ * @param padValue An number to use where the input signal does
+ * not exist when padEnd is True.
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function frame_(signal, frameLength, frameStep, padEnd, padValue) {
+ if (padEnd === void 0) { padEnd = false; }
+ if (padValue === void 0) { padValue = 0; }
+ var start = 0;
+ var output = [];
+ while (start + frameLength <= signal.size) {
+ output.push(slice(signal, start, frameLength));
+ start += frameStep;
+ }
+ if (padEnd) {
+ while (start < signal.size) {
+ var padLen = (start + frameLength) - signal.size;
+ var pad = concat([
+ slice(signal, start, frameLength - padLen), fill([padLen], padValue)
+ ]);
+ output.push(pad);
+ start += frameStep;
+ }
+ }
+ if (output.length === 0) {
+ return tensor2d([], [0, frameLength]);
+ }
+ return reshape(concat(output), [output.length, frameLength]);
+ }
+ var frame = op({ frame_: frame_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the Short-time Fourier Transform of signals
+ * See: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
+ *
+ * ```js
+ * const input = tf.tensor1d([1, 1, 1, 1, 1])
+ * tf.signal.stft(input, 3, 1).print();
+ * ```
+ * @param signal 1-dimensional real value tensor.
+ * @param frameLength The window length of samples.
+ * @param frameStep The number of samples to step.
+ * @param fftLength The size of the FFT to apply.
+ * @param windowFn A callable that takes a window length and returns 1-d tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function stft_(signal, frameLength, frameStep, fftLength, windowFn) {
+ if (windowFn === void 0) { windowFn = hannWindow; }
+ if (fftLength == null) {
+ fftLength = enclosingPowerOfTwo(frameLength);
+ }
+ var framedSignal = frame(signal, frameLength, frameStep);
+ var windowedSignal = mul(framedSignal, windowFn(frameLength));
+ return rfft(windowedSignal, fftLength);
+ }
+ var stft = op({ stft_: stft_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts crops from the input image tensor and resizes them using bilinear
+ * sampling or nearest neighbor sampling (possibly with aspect ratio change)
+ * to a common output size specified by cropSize.
+ *
+ * @param image 4d tensor of shape `[batch,imageHeight,imageWidth, depth]`,
+ * where imageHeight and imageWidth must be positive, specifying the
+ * batch of images from which to take crops
+ * @param boxes 2d float32 tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the normalized
+ * coordinates of the box in the boxInd[i]'th image in the batch
+ * @param boxInd 1d int32 tensor of shape `[numBoxes]` with values in range
+ * `[0, batch)` that specifies the image that the `i`-th box refers to.
+ * @param cropSize 1d int32 tensor of 2 elements `[cropHeigh, cropWidth]`
+ * specifying the size to which all crops are resized to.
+ * @param method Optional string from `'bilinear' | 'nearest'`,
+ * defaults to bilinear, which specifies the sampling method for resizing
+ * @param extrapolationValue A threshold for deciding when to remove boxes based
+ * on score. Defaults to 0.
+ * @return A 4D tensor of the shape `[numBoxes,cropHeight,cropWidth,depth]`
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function cropAndResize_(image, boxes, boxInd, cropSize, method, extrapolationValue) {
+ if (method === void 0) { method = 'bilinear'; }
+ if (extrapolationValue === void 0) { extrapolationValue = 0; }
+ var $image = convertToTensor(image, 'image', 'cropAndResize');
+ var $boxes = convertToTensor(boxes, 'boxes', 'cropAndResize', 'float32');
+ var $boxInd = convertToTensor(boxInd, 'boxInd', 'cropAndResize', 'int32');
+ var numBoxes = $boxes.shape[0];
+ assert($image.rank === 4, function () { return 'Error in cropAndResize: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ assert($boxes.rank === 2 && $boxes.shape[1] === 4, function () { return "Error in cropAndResize: boxes must be have size [" + numBoxes + ",4] " +
+ ("but had shape " + $boxes.shape + "."); });
+ assert($boxInd.rank === 1 && $boxInd.shape[0] === numBoxes, function () { return "Error in cropAndResize: boxInd must be have size [" + numBoxes + "] " +
+ ("but had shape " + $boxes.shape + "."); });
+ assert(cropSize.length === 2, function () { return "Error in cropAndResize: cropSize must be of length 2, but got " +
+ ("length " + cropSize.length + "."); });
+ assert(cropSize[0] >= 1 && cropSize[1] >= 1, function () { return "cropSize must be atleast [1,1], but was " + cropSize; });
+ assert(method === 'bilinear' || method === 'nearest', function () { return "method must be bilinear or nearest, but was " + method; });
+ var inputs = { image: $image, boxes: $boxes, boxInd: $boxInd };
+ var attrs = { method: method, extrapolationValue: extrapolationValue, cropSize: cropSize };
+ var res = ENGINE.runKernel(CropAndResize, inputs, attrs);
+ return res;
+ }
+ var cropAndResize = op({ cropAndResize_: cropAndResize_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Flips the image left to right. Currently available in the CPU, WebGL, and
+ * WASM backends.
+ *
+ * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} */
+ function flipLeftRight_(image) {
+ var $image = convertToTensor(image, 'image', 'flipLeftRight', 'float32');
+ assert($image.rank === 4, function () { return 'Error in flipLeftRight: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ var inputs = { image: $image };
+ var res = ENGINE.runKernel(FlipLeftRight, inputs, {});
+ return res;
+ }
+ var flipLeftRight = op({ flipLeftRight_: flipLeftRight_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts images from grayscale to RGB format.
+ *
+ * @param image A grayscale tensor to convert. The `image`'s last dimension must
+ * be size 1 with at least a two-dimensional shape.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function grayscaleToRGB_(image) {
+ var $image = convertToTensor(image, 'image', 'grayscaleToRGB');
+ var lastDimsIdx = $image.rank - 1;
+ var lastDims = $image.shape[lastDimsIdx];
+ assert($image.rank >= 2, function () { return 'Error in grayscaleToRGB: images must be at least rank 2, ' +
+ ("but got rank " + $image.rank + "."); });
+ assert(lastDims === 1, function () { return 'Error in grayscaleToRGB: last dimension of a grayscale image ' +
+ ("should be size 1, but got size " + lastDims + "."); });
+ var reps = new Array($image.rank);
+ reps.fill(1, 0, lastDimsIdx);
+ reps[lastDimsIdx] = 3;
+ return tile($image, reps);
+ }
+ var grayscaleToRGB = op({ grayscaleToRGB_: grayscaleToRGB_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Rotates the input image tensor counter-clockwise with an optional offset
+ * center of rotation. Currently available in the CPU, WebGL, and WASM backends.
+ *
+ * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
+ * @param radians The amount of rotation.
+ * @param fillValue The value to fill in the empty space leftover
+ * after rotation. Can be either a single grayscale value (0-255), or an
+ * array of three numbers `[red, green, blue]` specifying the red, green,
+ * and blue channels. Defaults to `0` (black).
+ * @param center The center of rotation. Can be either a single value (0-1), or
+ * an array of two numbers `[centerX, centerY]`. Defaults to `0.5` (rotates
+ * the image around its center).
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function rotateWithOffset_(image, radians, fillValue, center) {
+ if (fillValue === void 0) { fillValue = 0; }
+ if (center === void 0) { center = 0.5; }
+ var $image = convertToTensor(image, 'image', 'rotateWithOffset', 'float32');
+ assert($image.rank === 4, function () { return 'Error in rotateWithOffset: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ var inputs = { image: $image };
+ var attrs = { radians: radians, fillValue: fillValue, center: center };
+ var res = ENGINE.runKernel(RotateWithOffset, inputs, attrs);
+ return res;
+ }
+ var rotateWithOffset = op({ rotateWithOffset_: rotateWithOffset_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ if (iouThreshold == null) {
+ iouThreshold = 0.5;
+ }
+ if (scoreThreshold == null) {
+ scoreThreshold = Number.NEGATIVE_INFINITY;
+ }
+ if (softNmsSigma == null) {
+ softNmsSigma = 0.0;
+ }
+ var numBoxes = boxes.shape[0];
+ maxOutputSize = Math.min(maxOutputSize, numBoxes);
+ assert(0 <= iouThreshold && iouThreshold <= 1, function () { return "iouThreshold must be in [0, 1], but was '" + iouThreshold + "'"; });
+ assert(boxes.rank === 2, function () { return "boxes must be a 2D tensor, but was of rank '" + boxes.rank + "'"; });
+ assert(boxes.shape[1] === 4, function () { return "boxes must have 4 columns, but 2nd dimension was " + boxes.shape[1]; });
+ assert(scores.rank === 1, function () { return 'scores must be a 1D tensor'; });
+ assert(scores.shape[0] === numBoxes, function () { return "scores has incompatible shape with boxes. Expected " + numBoxes + ", " +
+ ("but was " + scores.shape[0]); });
+ assert(0 <= softNmsSigma && softNmsSigma <= 1, function () { return "softNmsSigma must be in [0, 1], but was '" + softNmsSigma + "'"; });
+ return { maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold, softNmsSigma: softNmsSigma };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @return A 1D tensor with the selected box indices.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ var $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression', 'float32');
+ var $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression', 'float32');
+ var inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);
+ maxOutputSize = inputs.maxOutputSize;
+ iouThreshold = inputs.iouThreshold;
+ scoreThreshold = inputs.scoreThreshold;
+ var attrs = { maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold };
+ return ENGINE.runKernel(NonMaxSuppressionV3, { boxes: $boxes, scores: $scores }, attrs);
+ }
+ var nonMaxSuppression = op({ nonMaxSuppression_: nonMaxSuppression_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Inserts a value into a sorted array. This method allows duplicate, meaning it
+ * allows inserting duplicate value, in which case, the element will be inserted
+ * at the lowest index of the value.
+ * @param arr The array to modify.
+ * @param element The element to insert.
+ * @param comparator Optional. If no comparator is specified, elements are
+ * compared using array_util.defaultComparator, which is suitable for Strings
+ * and Numbers in ascending arrays. If the array contains multiple instances of
+ * the target value, the left-most instance will be returned. To provide a
+ * comparator, it should take 2 arguments to compare and return a negative,
+ * zero, or a positive number.
+ */
+ function binaryInsert(arr, element, comparator) {
+ var index = binarySearch(arr, element, comparator);
+ var insertionPoint = index < 0 ? -(index + 1) : index;
+ arr.splice(insertionPoint, 0, element);
+ }
+ /**
+ * Searches the array for the target using binary search, returns the index
+ * of the found element, or position to insert if element not found. If no
+ * comparator is specified, elements are compared using array_
+ * util.defaultComparator, which is suitable for Strings and Numbers in
+ * ascending arrays. If the array contains multiple instances of the target
+ * value, the left-most instance will be returned.
+ * @param arr The array to be searched in.
+ * @param target The target to be searched for.
+ * @param comparator Should take 2 arguments to compare and return a negative,
+ * zero, or a positive number.
+ * @return Lowest index of the target value if found, otherwise the insertion
+ * point where the target should be inserted, in the form of
+ * (-insertionPoint - 1).
+ */
+ function binarySearch(arr, target, comparator) {
+ return binarySearch_(arr, target, comparator || defaultComparator);
+ }
+ /**
+ * Compares its two arguments for order.
+ * @param a The first element to be compared.
+ * @param b The second element to be compared.
+ * @return A negative number, zero, or a positive number as the first
+ * argument is less than, equal to, or greater than the second.
+ */
+ function defaultComparator(a, b) {
+ return a > b ? 1 : a < b ? -1 : 0;
+ }
+ function binarySearch_(arr, target, comparator) {
+ var left = 0;
+ var right = arr.length;
+ var middle = 0;
+ var found = false;
+ while (left < right) {
+ middle = left + ((right - left) >>> 1);
+ var compareResult = comparator(target, arr[middle]);
+ if (compareResult > 0) {
+ left = middle + 1;
+ }
+ else {
+ right = middle;
+ // If compareResult is 0, the value is found. We record it is found,
+ // and then keep looking because there may be duplicate.
+ found = !compareResult;
+ }
+ }
+ return found ? left : -left - 1;
+ }
+
+ function nonMaxSuppressionV3Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0 /* softNmsSigma */);
+ }
+ function nonMaxSuppressionV4Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) {
+ return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0 /* softNmsSigma */, false /* returnScoresTensor */, padToMaxOutputSize /* padToMaxOutputSize */, true
+ /* returnValidOutputs */ );
+ }
+ function nonMaxSuppressionV5Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, true /* returnScoresTensor */);
+ }
+ function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, returnScoresTensor, padToMaxOutputSize, returnValidOutputs) {
+ if (returnScoresTensor === void 0) { returnScoresTensor = false; }
+ if (padToMaxOutputSize === void 0) { padToMaxOutputSize = false; }
+ if (returnValidOutputs === void 0) { returnValidOutputs = false; }
+ // The list is sorted in ascending order, so that we can always pop the
+ // candidate with the largest score in O(1) time.
+ var candidates = [];
+ for (var i = 0; i < scores.length; i++) {
+ if (scores[i] > scoreThreshold) {
+ candidates.push({ score: scores[i], boxIndex: i, suppressBeginIndex: 0 });
+ }
+ }
+ candidates.sort(ascendingComparator);
+ // If softNmsSigma is 0, the outcome of this algorithm is exactly same as
+ // before.
+ var scale = softNmsSigma > 0 ? (-0.5 / softNmsSigma) : 0.0;
+ var selectedIndices = [];
+ var selectedScores = [];
+ while (selectedIndices.length < maxOutputSize && candidates.length > 0) {
+ var candidate = candidates.pop();
+ var originalScore = candidate.score, boxIndex = candidate.boxIndex, suppressBeginIndex = candidate.suppressBeginIndex;
+ if (originalScore < scoreThreshold) {
+ break;
+ }
+ // Overlapping boxes are likely to have similar scores, therefore we
+ // iterate through the previously selected boxes backwards in order to
+ // see if candidate's score should be suppressed. We use
+ // suppressBeginIndex to track and ensure a candidate can be suppressed
+ // by a selected box no more than once. Also, if the overlap exceeds
+ // iouThreshold, we simply ignore the candidate.
+ var ignoreCandidate = false;
+ for (var j = selectedIndices.length - 1; j >= suppressBeginIndex; --j) {
+ var iou = intersectionOverUnion(boxes, boxIndex, selectedIndices[j]);
+ if (iou >= iouThreshold) {
+ ignoreCandidate = true;
+ break;
+ }
+ candidate.score =
+ candidate.score * suppressWeight(iouThreshold, scale, iou);
+ if (candidate.score <= scoreThreshold) {
+ break;
+ }
+ }
+ // At this point, if `candidate.score` has not dropped below
+ // `scoreThreshold`, then we know that we went through all of the
+ // previous selections and can safely update `suppressBeginIndex` to the
+ // end of the selected array. Then we can re-insert the candidate with
+ // the updated score and suppressBeginIndex back in the candidate list.
+ // If on the other hand, `candidate.score` has dropped below the score
+ // threshold, we will not add it back to the candidates list.
+ candidate.suppressBeginIndex = selectedIndices.length;
+ if (!ignoreCandidate) {
+ // Candidate has passed all the tests, and is not suppressed, so
+ // select the candidate.
+ if (candidate.score === originalScore) {
+ selectedIndices.push(boxIndex);
+ selectedScores.push(candidate.score);
+ }
+ else if (candidate.score > scoreThreshold) {
+ // Candidate's score is suppressed but is still high enough to be
+ // considered, so add back to the candidates list.
+ binaryInsert(candidates, candidate, ascendingComparator);
+ }
+ }
+ }
+ // NonMaxSuppressionV4 feature: padding output to maxOutputSize.
+ var validOutputs = selectedIndices.length;
+ var elemsToPad = maxOutputSize - validOutputs;
+ if (padToMaxOutputSize && elemsToPad > 0) {
+ selectedIndices.push.apply(selectedIndices, __spread(new Array(elemsToPad).fill(0)));
+ selectedScores.push.apply(selectedScores, __spread(new Array(elemsToPad).fill(0.0)));
+ }
+ var result = { selectedIndices: selectedIndices };
+ if (returnScoresTensor) {
+ result['selectedScores'] = selectedScores;
+ }
+ if (returnValidOutputs) {
+ result['validOutputs'] = validOutputs;
+ }
+ return result;
+ }
+ function intersectionOverUnion(boxes, i, j) {
+ var iCoord = boxes.subarray(i * 4, i * 4 + 4);
+ var jCoord = boxes.subarray(j * 4, j * 4 + 4);
+ var yminI = Math.min(iCoord[0], iCoord[2]);
+ var xminI = Math.min(iCoord[1], iCoord[3]);
+ var ymaxI = Math.max(iCoord[0], iCoord[2]);
+ var xmaxI = Math.max(iCoord[1], iCoord[3]);
+ var yminJ = Math.min(jCoord[0], jCoord[2]);
+ var xminJ = Math.min(jCoord[1], jCoord[3]);
+ var ymaxJ = Math.max(jCoord[0], jCoord[2]);
+ var xmaxJ = Math.max(jCoord[1], jCoord[3]);
+ var areaI = (ymaxI - yminI) * (xmaxI - xminI);
+ var areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ);
+ if (areaI <= 0 || areaJ <= 0) {
+ return 0.0;
+ }
+ var intersectionYmin = Math.max(yminI, yminJ);
+ var intersectionXmin = Math.max(xminI, xminJ);
+ var intersectionYmax = Math.min(ymaxI, ymaxJ);
+ var intersectionXmax = Math.min(xmaxI, xmaxJ);
+ var intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) *
+ Math.max(intersectionXmax - intersectionXmin, 0.0);
+ return intersectionArea / (areaI + areaJ - intersectionArea);
+ }
+ // A Gaussian penalty function, this method always returns values in [0, 1].
+ // The weight is a function of similarity, the more overlap two boxes are, the
+ // smaller the weight is, meaning highly overlapping boxe will be significantly
+ // penalized. On the other hand, a non-overlapping box will not be penalized.
+ function suppressWeight(iouThreshold, scale, iou) {
+ var weight = Math.exp(scale * iou * iou);
+ return iou <= iouThreshold ? weight : 0.0;
+ }
+ function ascendingComparator(c1, c2) {
+ // For objects with same scores, we make the object with the larger index go
+ // first. In an array that pops from the end, this means that the object with
+ // the smaller index will be popped first. This ensures the same output as
+ // the TensorFlow python version.
+ return (c1.score - c2.score) ||
+ ((c1.score === c2.score) && (c2.boxIndex - c1.boxIndex));
+ }
+
+ /**
+ * Performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * This is the async version of `nonMaxSuppression`
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @return A 1D tensor with the selected box indices.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionAsync_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $boxes, $scores, inputs, boxesAndScores, boxesVals, scoresVals, selectedIndices;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');
+ $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');
+ inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);
+ maxOutputSize = inputs.maxOutputSize;
+ iouThreshold = inputs.iouThreshold;
+ scoreThreshold = inputs.scoreThreshold;
+ return [4 /*yield*/, Promise.all([$boxes.data(), $scores.data()])];
+ case 1:
+ boxesAndScores = _a.sent();
+ boxesVals = boxesAndScores[0];
+ scoresVals = boxesAndScores[1];
+ selectedIndices = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold).selectedIndices;
+ if ($boxes !== boxes) {
+ $boxes.dispose();
+ }
+ if ($scores !== scores) {
+ $scores.dispose();
+ }
+ return [2 /*return*/, tensor1d(selectedIndices, 'int32')];
+ }
+ });
+ });
+ }
+ var nonMaxSuppressionAsync = nonMaxSuppressionAsync_;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * This op also supports a Soft-NMS mode (c.f.
+ * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
+ * of other overlapping boxes, therefore favoring different regions of the image
+ * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`
+ * parameter to be larger than 0.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param softNmsSigma A float representing the sigma parameter for Soft NMS.
+ * When sigma is 0, it falls back to nonMaxSuppression.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - selectedScores: A 1D tensor with the corresponding scores for each
+ * selected box.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionWithScore_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (softNmsSigma === void 0) { softNmsSigma = 0.0; }
+ var $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression');
+ var $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression');
+ var params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
+ maxOutputSize = params.maxOutputSize;
+ iouThreshold = params.iouThreshold;
+ scoreThreshold = params.scoreThreshold;
+ softNmsSigma = params.softNmsSigma;
+ var inputs = { boxes: $boxes, scores: $scores };
+ var attrs = { maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold, softNmsSigma: softNmsSigma };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var result = ENGINE.runKernel(NonMaxSuppressionV5, inputs, attrs);
+ return { selectedIndices: result[0], selectedScores: result[1] };
+ }
+ var nonMaxSuppressionWithScore = op({ nonMaxSuppressionWithScore_: nonMaxSuppressionWithScore_ });
+
+ /**
+ * Asynchronously performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * This op also supports a Soft-NMS mode (c.f.
+ * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
+ * of other overlapping boxes, therefore favoring different regions of the image
+ * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`
+ * parameter to be larger than 0.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param softNmsSigma A float representing the sigma parameter for Soft NMS.
+ * When sigma is 0, it falls back to nonMaxSuppression.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - selectedScores: A 1D tensor with the corresponding scores for each
+ * selected box.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (softNmsSigma === void 0) { softNmsSigma = 0.0; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $boxes, $scores, params, boxesAndScores, boxesVals, scoresVals, _a, selectedIndices, selectedScores;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');
+ $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');
+ params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
+ maxOutputSize = params.maxOutputSize;
+ iouThreshold = params.iouThreshold;
+ scoreThreshold = params.scoreThreshold;
+ softNmsSigma = params.softNmsSigma;
+ return [4 /*yield*/, Promise.all([$boxes.data(), $scores.data()])];
+ case 1:
+ boxesAndScores = _b.sent();
+ boxesVals = boxesAndScores[0];
+ scoresVals = boxesAndScores[1];
+ _a = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma), selectedIndices = _a.selectedIndices, selectedScores = _a.selectedScores;
+ if ($boxes !== boxes) {
+ $boxes.dispose();
+ }
+ if ($scores !== scores) {
+ $scores.dispose();
+ }
+ return [2 /*return*/, {
+ selectedIndices: tensor1d(selectedIndices, 'int32'),
+ selectedScores: tensor1d(selectedScores)
+ }];
+ }
+ });
+ });
+ }
+ var nonMaxSuppressionWithScoreAsync = nonMaxSuppressionWithScoreAsync_;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Asynchronously performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union), with an option to pad results.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param padToMaxOutputSize Defalts to false. If true, size of output
+ * `selectedIndices` is padded to maxOutputSize.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - validOutputs: A scalar denoting how many elements in `selectedIndices`
+ * are valid. Valid elements occur first, then padding.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (padToMaxOutputSize === void 0) { padToMaxOutputSize = false; }
+ var $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression');
+ var $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression');
+ var params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */);
+ var $maxOutputSize = params.maxOutputSize;
+ var $iouThreshold = params.iouThreshold;
+ var $scoreThreshold = params.scoreThreshold;
+ var inputs = { boxes: $boxes, scores: $scores };
+ var attrs = {
+ maxOutputSize: $maxOutputSize,
+ iouThreshold: $iouThreshold,
+ scoreThreshold: $scoreThreshold,
+ padToMaxOutputSize: padToMaxOutputSize
+ };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var result = ENGINE.runKernel(NonMaxSuppressionV4, inputs, attrs);
+ return { selectedIndices: result[0], validOutputs: result[1] };
+ }
+ var nonMaxSuppressionPadded = op({ nonMaxSuppressionPadded_: nonMaxSuppressionPadded_ });
+
+ /**
+ * Asynchronously performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union), with an option to pad results.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param padToMaxOutputSize Defalts to false. If true, size of output
+ * `selectedIndices` is padded to maxOutputSize.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - validOutputs: A scalar denoting how many elements in `selectedIndices`
+ * are valid. Valid elements occur first, then padding.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (padToMaxOutputSize === void 0) { padToMaxOutputSize = false; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $boxes, $scores, params, $maxOutputSize, $iouThreshold, $scoreThreshold, _a, boxesVals, scoresVals, _b, selectedIndices, validOutputs;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');
+ $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');
+ params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */);
+ $maxOutputSize = params.maxOutputSize;
+ $iouThreshold = params.iouThreshold;
+ $scoreThreshold = params.scoreThreshold;
+ return [4 /*yield*/, Promise.all([$boxes.data(), $scores.data()])];
+ case 1:
+ _a = __read.apply(void 0, [_c.sent(), 2]), boxesVals = _a[0], scoresVals = _a[1];
+ _b = nonMaxSuppressionV4Impl(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize), selectedIndices = _b.selectedIndices, validOutputs = _b.validOutputs;
+ if ($boxes !== boxes) {
+ $boxes.dispose();
+ }
+ if ($scores !== scores) {
+ $scores.dispose();
+ }
+ return [2 /*return*/, {
+ selectedIndices: tensor1d(selectedIndices, 'int32'),
+ validOutputs: scalar(validOutputs, 'int32')
+ }];
+ }
+ });
+ });
+ }
+ var nonMaxSuppressionPaddedAsync = nonMaxSuppressionPaddedAsync_;
+
+ /**
+ * Bilinear resize a single 3D image or a batch of 3D images to a new shape.
+ *
+ * @param images The images, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param size The new shape `[newHeight, newWidth]` to resize the
+ * images to. Each channel is resized individually.
+ * @param alignCorners Defaults to `false`. If true, rescale
+ * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4
+ * corners of images and resized images. If false, rescale by
+ * `new_height / height`. Treat similarly the width dimension.
+ * @param halfPixelCenters Defaults to `false`. Whether to assume pixel centers
+ * are at 0.5, which would make the floating point coordinates of the top
+ * left pixel 0.5, 0.5.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function resizeBilinear_(images, size, alignCorners, halfPixelCenters) {
+ if (alignCorners === void 0) { alignCorners = false; }
+ if (halfPixelCenters === void 0) { halfPixelCenters = false; }
+ var $images = convertToTensor(images, 'images', 'resizeBilinear');
+ assert($images.rank === 3 || $images.rank === 4, function () { return "Error in resizeBilinear: x must be rank 3 or 4, but got " +
+ ("rank " + $images.rank + "."); });
+ assert(size.length === 2, function () { return "Error in resizeBilinear: new shape must 2D, but got shape " +
+ (size + "."); });
+ assert(halfPixelCenters === false || alignCorners === false, function () { return "Error in resizeBilinear: If halfPixelCenters is true, " +
+ "alignCorners must be false."; });
+ var batchImages = $images;
+ var reshapedTo4D = false;
+ if ($images.rank === 3) {
+ reshapedTo4D = true;
+ batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]);
+ }
+ __read(size, 0);
+ var inputs = { images: batchImages };
+ var attrs = { alignCorners: alignCorners, halfPixelCenters: halfPixelCenters, size: size };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(ResizeBilinear, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var resizeBilinear = op({ resizeBilinear_: resizeBilinear_ });
+
+ /**
+ * NearestNeighbor resize a batch of 3D images to a new shape.
+ *
+ * @param images The images, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param size The new shape `[newHeight, newWidth]` to resize the
+ * images to. Each channel is resized individually.
+ * @param alignCorners Defaults to False. If true, rescale
+ * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4
+ * corners of images and resized images. If false, rescale by
+ * `new_height / height`. Treat similarly the width dimension.
+ * @param halfPixelCenters Defaults to `false`. Whether to assumes pixels are of
+ * half the actual dimensions, and yields more accurate resizes. This flag
+ * would also make the floating point coordinates of the top left pixel
+ * 0.5, 0.5.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function resizeNearestNeighbor_(images, size, alignCorners, halfPixelCenters) {
+ if (alignCorners === void 0) { alignCorners = false; }
+ if (halfPixelCenters === void 0) { halfPixelCenters = false; }
+ var $images = convertToTensor(images, 'images', 'resizeNearestNeighbor');
+ assert($images.rank === 3 || $images.rank === 4, function () { return "Error in resizeNearestNeighbor: x must be rank 3 or 4, but got " +
+ ("rank " + $images.rank + "."); });
+ assert(size.length === 2, function () { return "Error in resizeNearestNeighbor: new shape must 2D, but got shape " +
+ (size + "."); });
+ assert($images.dtype === 'float32' || $images.dtype === 'int32', function () { return '`images` must have `int32` or `float32` as dtype'; });
+ assert(halfPixelCenters === false || alignCorners === false, function () { return "Error in resizeNearestNeighbor: If halfPixelCenters is true, " +
+ "alignCorners must be false."; });
+ var batchImages = $images;
+ var reshapedTo4D = false;
+ if ($images.rank === 3) {
+ reshapedTo4D = true;
+ batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]);
+ }
+ __read(size, 0);
+ var inputs = { images: batchImages };
+ var attrs = { alignCorners: alignCorners, halfPixelCenters: halfPixelCenters, size: size };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(ResizeNearestNeighbor, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var resizeNearestNeighbor = op({ resizeNearestNeighbor_: resizeNearestNeighbor_ });
+
+ /**
+ * Performs image binarization with corresponding threshold
+ * (depends on the method)value, which creates a binary image from a grayscale.
+ * @param image 3d tensor of shape [imageHeight,imageWidth, depth],
+ * where imageHeight and imageWidth must be positive.The image color
+ * range should be [0, 255].
+ * @param method Optional string from `'binary' | 'otsu'`
+ * which specifies the method for thresholding. Defaults to 'binary'.
+ * @param inverted Optional boolean whichspecifies
+ * if colours should be inverted. Defaults to false.
+ * @param threshValue Optional number which defines threshold value from 0 to 1.
+ * Defaults to 0.5.
+ * @return A 3d tensor of shape [imageHeight,imageWidth, depth], which
+ * contains binarized image.
+ */
+ function threshold_(image, method, inverted, threshValue) {
+ var _a;
+ if (method === void 0) { method = 'binary'; }
+ if (inverted === void 0) { inverted = false; }
+ if (threshValue === void 0) { threshValue = 0.5; }
+ var $image = convertToTensor(image, 'image', 'threshold');
+ /* 0.2989, 0.5870, 0.1140 are represent luma coefficients in CCIR601.
+ Reference for converting between RGB and grayscale: https://en.wikipedia.org/wiki/Luma_%28video%29 */
+ var RED_INTENCITY_COEF = 0.2989;
+ var GREEN_INTENCITY_COEF = 0.5870;
+ var BLUE_INTENCITY_COEF = 0.1140;
+ var totalPixelsInImage = $image.shape[0] * $image.shape[1];
+ var $threshold = mul(tensor1d([threshValue]), 255);
+ var r, g, b, grayscale;
+ assert($image.rank === 3, function () { return 'Error in threshold: image must be rank 3,' +
+ ("but got rank " + $image.rank + "."); });
+ assert($image.shape[2] === 3 || $image.shape[2] === 1, function () { return 'Error in threshold: ' +
+ 'image color channel must be equal to 3 or 1' +
+ ("but got " + $image.shape[2] + "."); });
+ assert($image.dtype === 'int32' || $image.dtype === 'float32', function () { return 'Error in dtype: image dtype must be int32 or float32,' +
+ ("but got dtype " + $image.dtype + "."); });
+ assert(method === 'otsu' || method === 'binary', function () { return "Method must be binary or otsu, but was " + method; });
+ if ($image.shape[2] === 3) {
+ _a = __read(split$1($image, [1, 1, 1], -1), 3), r = _a[0], g = _a[1], b = _a[2];
+ var $r = mul(r, RED_INTENCITY_COEF);
+ var $g = mul(g, GREEN_INTENCITY_COEF);
+ var $b = mul(b, BLUE_INTENCITY_COEF);
+ grayscale = add(add($r, $g), $b);
+ }
+ else {
+ grayscale = image;
+ }
+ if (method === 'otsu') {
+ var $histogram = bincount(cast(round(grayscale), 'int32'), tensor([]), 256);
+ $threshold = otsu($histogram, totalPixelsInImage);
+ }
+ var invCondition = inverted ?
+ lessEqual(grayscale, $threshold) : greater(grayscale, $threshold);
+ var result = cast(mul(invCondition, 255), 'int32');
+ return result;
+ }
+ function otsu(histogram, total) {
+ var bestThresh = tensor1d([-1]);
+ var bestInBetVar = tensor1d([0]);
+ var cInBetVar = tensor1d([0]);
+ var classFirst, classSecond, meanFirst, meanSec, weightForeground, weightBack;
+ for (var index = 0; index < histogram.size - 1; index++) {
+ classFirst = slice(histogram, 0, index + 1);
+ classSecond = slice(histogram, index + 1);
+ weightForeground = div(sum(classFirst), total);
+ weightBack = div(sum(classSecond), total);
+ var meanFirstDivA = sum(mul(classFirst, range(0, classFirst.size)));
+ meanFirst = div(meanFirstDivA, sum(classFirst));
+ var meanSecFill = fill(classSecond.shape, classFirst.size);
+ var meanSecAdd = add(range(0, classSecond.size), meanSecFill);
+ var meanSecMul = mul(classSecond, (meanSecAdd));
+ meanSec = div(sum(meanSecMul), sum(classSecond));
+ var cInBetVarSubA = sub(meanFirst, meanSec);
+ var cInBetVarSubB = sub(meanFirst, meanSec);
+ var cInBetVarMul = mul(weightForeground, weightBack);
+ cInBetVar = mul(mul(cInBetVarMul, cInBetVarSubA), cInBetVarSubB);
+ var condition = greater(cInBetVar, bestInBetVar);
+ bestInBetVar = where(condition, cInBetVar, bestInBetVar);
+ bestThresh = where(condition, tensor1d([index]), bestThresh);
+ }
+ return bestThresh;
+ }
+ var threshold = op({ threshold_: threshold_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Applies the given transform(s) to the image(s).
+ *
+ * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
+ * @param transforms Projective transform matrix/matrices. A tensor1d of length
+ * 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0
+ * b1, b2, c0, c1], then it maps the output point (x, y) to a transformed
+ * input point (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k),
+ * where k = c0 x + c1 y + 1. The transforms are inverted compared to the
+ * transform mapping input points to output points.
+ * @param interpolation Interpolation mode.
+ * Supported values: 'nearest', 'bilinear'. Default to 'nearest'.
+ * @param fillMode Points outside the boundaries of the input are filled
+ * according to the given mode, one of 'constant', 'reflect', 'wrap',
+ * 'nearest'. Default to 'constant'.
+ * 'reflect': (d c b a | a b c d | d c b a ) The input is extended by
+ * reflecting about the edge of the last pixel.
+ * 'constant': (k k k k | a b c d | k k k k) The input is extended by
+ * filling all values beyond the edge with the same constant value k.
+ * 'wrap': (a b c d | a b c d | a b c d) The input is extended by
+ * wrapping around to the opposite edge.
+ * 'nearest': (a a a a | a b c d | d d d d) The input is extended by
+ * the nearest pixel.
+ * @param fillValue A float represents the value to be filled outside the
+ * boundaries when fillMode is 'constant'.
+ * @param Output dimension after the transform, [height, width]. If undefined,
+ * output is the same size as input image.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function transform_(image, transforms, interpolation, fillMode, fillValue, outputShape) {
+ if (interpolation === void 0) { interpolation = 'nearest'; }
+ if (fillMode === void 0) { fillMode = 'constant'; }
+ if (fillValue === void 0) { fillValue = 0; }
+ var $image = convertToTensor(image, 'image', 'transform', 'float32');
+ var $transforms = convertToTensor(transforms, 'transforms', 'transform', 'float32');
+ assert($image.rank === 4, function () { return 'Error in transform: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ assert($transforms.rank === 2 &&
+ ($transforms.shape[0] === $image.shape[0] ||
+ $transforms.shape[0] === 1) &&
+ $transforms.shape[1] === 8, function () { return "Error in transform: Input transform should be batch x 8 or 1 x 8"; });
+ assert(outputShape == null || outputShape.length === 2, function () { return 'Error in transform: outputShape must be [height, width] or null, ' +
+ ("but got " + outputShape + "."); });
+ var inputs = { image: $image, transforms: $transforms };
+ var attrs = { interpolation: interpolation, fillMode: fillMode, fillValue: fillValue, outputShape: outputShape };
+ return ENGINE.runKernel(Transform, inputs, attrs);
+ }
+ var transform = op({ transform_: transform_ });
+
+ /**
+ * Copy a tensor setting everything outside a central band in each innermost
+ * matrix to zero.
+ *
+ * The band part is computed as follows: Assume input has `k` dimensions
+ * `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where
+ * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
+ * The indicator function
+ * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower))`
+ * `&& (num_upper < 0 || (n-m) <= num_upper)`
+ *
+ * ```js
+ * const x = tf.tensor2d([[ 0, 1, 2, 3],
+ * [-1, 0, 1, 2],
+ * [-2, -1, 0, 1],
+ * [-3, -2, -1, 0]]);
+ * let y = tf.linalg.bandPart(x, 1, -1);
+ * y.print(); // [[ 0, 1, 2, 3],
+ * // [-1, 0, 1, 2],
+ * // [ 0, -1, 0, 1],
+ * // [ 0, 0 , -1, 0]]
+ * let z = tf.linalg.bandPart(x, 2, 1);
+ * z.print(); // [[ 0, 1, 0, 0],
+ * // [-1, 0, 1, 0],
+ * // [-2, -1, 0, 1],
+ * // [ 0, -2, -1, 0]]
+ * ```
+ *
+ * @param x Rank `k` tensor
+ * @param numLower Number of subdiagonals to keep.
+ * If negative, keep entire lower triangle.
+ * @param numUpper Number of subdiagonals to keep.
+ * If negative, keep entire upper triangle.
+ * @returns Rank `k` tensor of the same shape as input.
+ * The extracted banded tensor.
+ *
+ * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}
+ */
+ function bandPart_(a, numLower, numUpper) {
+ assert(numLower % 1 === 0, function () { return "bandPart(): numLower must be an integer, got " + numLower + "."; });
+ assert(numUpper % 1 === 0, function () { return "bandPart(): numUpper must be an integer, got " + numUpper + "."; });
+ var $a = convertToTensor(a, 'a', 'bandPart');
+ assert($a.rank >= 2, function () { return "bandPart(): Rank must be at least 2, got " + $a.rank + "."; });
+ var shape = $a.shape;
+ var _a = __read($a.shape.slice(-2), 2), M = _a[0], N = _a[1];
+ if (!(numLower <= M)) {
+ throw new Error("bandPart(): numLower (" + numLower + ")" +
+ (" must not be greater than the number of rows (" + M + ")."));
+ }
+ if (!(numUpper <= N)) {
+ throw new Error("bandPart(): numUpper (" + numUpper + ")" +
+ (" must not be greater than the number of columns (" + N + ")."));
+ }
+ if (numLower < 0) {
+ numLower = M;
+ }
+ if (numUpper < 0) {
+ numUpper = N;
+ }
+ var i = reshape(range(0, M, 1, 'int32'), [-1, 1]);
+ var j = range(0, N, 1, 'int32');
+ var ij = sub(i, j);
+ var inBand = logicalAnd(lessEqual(ij, scalar(+numLower, 'int32')), greaterEqual(ij, scalar(-numUpper, 'int32')));
+ var zero = zeros([M, N], $a.dtype);
+ return reshape(stack(unstack(reshape($a, [-1, M, N]))
+ .map(function (mat) { return where(inBand, mat, zero); })), shape);
+ }
+ var bandPart = op({ bandPart_: bandPart_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gram-Schmidt orthogonalization.
+ *
+ * ```js
+ * const x = tf.tensor2d([[1, 2], [3, 4]]);
+ * let y = tf.linalg.gramSchmidt(x);
+ * y.print();
+ * console.log('Othogonalized:');
+ * y.dot(y.transpose()).print(); // should be nearly the identity matrix.
+ * console.log('First row direction maintained:');
+ * const data = await y.array();
+ * console.log(data[0][1] / data[0][0]); // should be nearly 2.
+ * ```
+ *
+ * @param xs The vectors to be orthogonalized, in one of the two following
+ * formats:
+ * - An Array of `tf.Tensor1D`.
+ * - A `tf.Tensor2D`, i.e., a matrix, in which case the vectors are the rows
+ * of `xs`.
+ * In each case, all the vectors must have the same length and the length
+ * must be greater than or equal to the number of vectors.
+ * @returns The orthogonalized and normalized vectors or matrix.
+ * Orthogonalization means that the vectors or the rows of the matrix
+ * are orthogonal (zero inner products). Normalization means that each
+ * vector or each row of the matrix has an L2 norm that equals `1`.
+ *
+ * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}
+ */
+ function gramSchmidt_(xs) {
+ var inputIsTensor2D;
+ if (Array.isArray(xs)) {
+ inputIsTensor2D = false;
+ assert(xs != null && xs.length > 0, function () { return 'Gram-Schmidt process: input must not be null, undefined, or ' +
+ 'empty'; });
+ var dim_1 = xs[0].shape[0];
+ var _loop_1 = function (i) {
+ assert(xs[i].shape[0] === dim_1, function () { return 'Gram-Schmidt: Non-unique lengths found in the input vectors: ' +
+ ("(" + xs[i].shape[0] + " vs. " + dim_1 + ")"); });
+ };
+ for (var i = 1; i < xs.length; ++i) {
+ _loop_1(i);
+ }
+ }
+ else {
+ inputIsTensor2D = true;
+ xs = split$1(xs, xs.shape[0], 0).map(function (x) { return squeeze(x, [0]); });
+ }
+ assert(xs.length <= xs[0].shape[0], function () { return "Gram-Schmidt: Number of vectors (" + xs.length + ") exceeds " +
+ ("number of dimensions (" + xs[0].shape[0] + ")."); });
+ var ys = [];
+ var xs1d = xs;
+ var _loop_2 = function (i) {
+ ys.push(ENGINE.tidy(function () {
+ var x = xs1d[i];
+ if (i > 0) {
+ for (var j = 0; j < i; ++j) {
+ var proj = mul(sum(mul(ys[j], x)), ys[j]);
+ x = sub(x, proj);
+ }
+ }
+ return div(x, norm(x, 'euclidean'));
+ }));
+ };
+ for (var i = 0; i < xs.length; ++i) {
+ _loop_2(i);
+ }
+ if (inputIsTensor2D) {
+ return stack(ys, 0);
+ }
+ else {
+ return ys;
+ }
+ }
+ var gramSchmidt = op({ gramSchmidt_: gramSchmidt_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Disposes any `tf.Tensor`s found within the provided object.
+ *
+ * @param container an object that may be a `tf.Tensor` or may directly
+ * contain `tf.Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. If
+ * the object is not a `tf.Tensor` or does not contain `Tensors`, nothing
+ * happens. In general it is safe to pass any object here, except that
+ * `Promise`s are not supported.
+ *
+ * @doc {heading: 'Performance', subheading: 'Memory'}
+ */
+ function dispose(container) {
+ var tensors = getTensorsInContainer(container);
+ tensors.forEach(function (tensor) { return tensor.dispose(); });
+ }
+
+ /**
+ * Compute QR decomposition of m-by-n matrix using Householder transformation.
+ *
+ * Implementation based on
+ * [http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf]
+ * (http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf)
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 2], [3, 4]]);
+ * let [q, r] = tf.linalg.qr(a);
+ * console.log('Q');
+ * q.print();
+ * console.log('R');
+ * r.print();
+ * console.log('Orthogonalized');
+ * q.dot(q.transpose()).print() // should be nearly the identity matrix.
+ * console.log('Reconstructed');
+ * q.dot(r).print(); // should be nearly [[1, 2], [3, 4]];
+ * ```
+ *
+ * @param x The `tf.Tensor` to be QR-decomposed. Must have rank >= 2. Suppose
+ * it has the shape `[..., M, N]`.
+ * @param fullMatrices An optional boolean parameter. Defaults to `false`.
+ * If `true`, compute full-sized `Q`. If `false` (the default),
+ * compute only the leading N columns of `Q` and `R`.
+ * @returns An `Array` of two `tf.Tensor`s: `[Q, R]`. `Q` is a unitary matrix,
+ * i.e., its columns all have unit norm and are mutually orthogonal.
+ * If `M >= N`,
+ * If `fullMatrices` is `false` (default),
+ * - `Q` has a shape of `[..., M, N]`,
+ * - `R` has a shape of `[..., N, N]`.
+ * If `fullMatrices` is `true` (default),
+ * - `Q` has a shape of `[..., M, M]`,
+ * - `R` has a shape of `[..., M, N]`.
+ * If `M < N`,
+ * - `Q` has a shape of `[..., M, M]`,
+ * - `R` has a shape of `[..., M, N]`.
+ * @throws If the rank of `x` is less than 2.
+ *
+ * @doc {heading:'Operations',
+ * subheading:'Linear Algebra',
+ * namespace:'linalg'}
+ */
+ function qr_(x, fullMatrices) {
+ if (fullMatrices === void 0) { fullMatrices = false; }
+ assert(x.rank >= 2, function () { return "qr() requires input tensor to have a rank >= 2, but got rank " + x.rank; });
+ if (x.rank === 2) {
+ return qr2d(x, fullMatrices);
+ }
+ else {
+ // Rank > 2.
+ // TODO(cais): Below we split the input into individual 2D tensors,
+ // perform QR decomposition on them and then stack the results back
+ // together. We should explore whether this can be parallelized.
+ var outerDimsProd = x.shape.slice(0, x.shape.length - 2)
+ .reduce(function (value, prev) { return value * prev; });
+ var x2ds = unstack(reshape(x, [
+ outerDimsProd, x.shape[x.shape.length - 2],
+ x.shape[x.shape.length - 1]
+ ]), 0);
+ var q2ds_1 = [];
+ var r2ds_1 = [];
+ x2ds.forEach(function (x2d) {
+ var _a = __read(qr2d(x2d, fullMatrices), 2), q2d = _a[0], r2d = _a[1];
+ q2ds_1.push(q2d);
+ r2ds_1.push(r2d);
+ });
+ var q = reshape(stack(q2ds_1, 0), x.shape);
+ var r = reshape(stack(r2ds_1, 0), x.shape);
+ return [q, r];
+ }
+ }
+ function qr2d(x, fullMatrices) {
+ if (fullMatrices === void 0) { fullMatrices = false; }
+ return ENGINE.tidy(function () {
+ assert(x.shape.length === 2, function () { return "qr2d() requires a 2D Tensor, but got a " + x.shape.length + "D Tensor."; });
+ var m = x.shape[0];
+ var n = x.shape[1];
+ var q = eye(m); // Orthogonal transform so far.
+ var r = clone(x); // Transformed matrix so far.
+ var one2D = tensor2d([[1]], [1, 1]);
+ var w = clone(one2D);
+ var iters = m >= n ? n : m;
+ var _loop_1 = function (j) {
+ var _a;
+ // This tidy within the for-loop ensures we clean up temporary
+ // tensors as soon as they are no longer needed.
+ var rTemp = r;
+ var wTemp = w;
+ var qTemp = q;
+ _a = __read(ENGINE.tidy(function () {
+ // Find H = I - tau * w * w', to put zeros below R(j, j).
+ var rjEnd1 = slice(r, [j, j], [m - j, 1]);
+ var normX = norm(rjEnd1);
+ var rjj = slice(r, [j, j], [1, 1]);
+ // The sign() function returns 0 on 0, which causes division by zero.
+ var s = where(greater(rjj, 0), tensor2d([[-1]]), tensor2d([[1]]));
+ var u1 = sub(rjj, mul(s, normX));
+ var wPre = div(rjEnd1, u1);
+ if (wPre.shape[0] === 1) {
+ w = clone(one2D);
+ }
+ else {
+ w = concat([
+ one2D,
+ slice(wPre, [1, 0], [wPre.shape[0] - 1, wPre.shape[1]])
+ ], 0);
+ }
+ var tau = neg(div(matMul$1(s, u1), normX));
+ // -- R := HR, Q := QH.
+ var rjEndAll = slice(r, [j, 0], [m - j, n]);
+ var tauTimesW = mul(tau, w);
+ var wT = transpose(w);
+ if (j === 0) {
+ r = sub(rjEndAll, matMul$1(tauTimesW, matMul$1(wT, rjEndAll)));
+ }
+ else {
+ var rTimesTau = sub(rjEndAll, matMul$1(tauTimesW, matMul$1(wT, rjEndAll)));
+ r = concat([slice(r, [0, 0], [j, n]), rTimesTau], 0);
+ }
+ var tawTimesWT = transpose(tauTimesW);
+ var qAllJEnd = slice(q, [0, j], [m, q.shape[1] - j]);
+ if (j === 0) {
+ q = sub(qAllJEnd, matMul$1(matMul$1(qAllJEnd, w), tawTimesWT));
+ }
+ else {
+ var qTimesTau = sub(qAllJEnd, matMul$1(matMul$1(qAllJEnd, w), tawTimesWT));
+ q = concat([slice(q, [0, 0], [m, j]), qTimesTau], 1);
+ }
+ return [w, r, q];
+ }), 3), w = _a[0], r = _a[1], q = _a[2];
+ dispose([rTemp, wTemp, qTemp]);
+ };
+ for (var j = 0; j < iters; ++j) {
+ _loop_1(j);
+ }
+ if (!fullMatrices && m > n) {
+ q = slice(q, [0, 0], [m, n]);
+ r = slice(r, [0, 0], [n, n]);
+ }
+ return [q, r];
+ });
+ }
+ var qr = op({ qr_: qr_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var Reduction;
+ (function (Reduction) {
+ Reduction[Reduction["NONE"] = 0] = "NONE";
+ Reduction[Reduction["MEAN"] = 1] = "MEAN";
+ Reduction[Reduction["SUM"] = 2] = "SUM";
+ Reduction[Reduction["SUM_BY_NONZERO_WEIGHTS"] = 3] = "SUM_BY_NONZERO_WEIGHTS";
+ })(Reduction || (Reduction = {}));
+
+ /**
+ * Computes the weighted loss between two tensors.
+ *
+ * @param losses Tensor of shape `[batch_size, d1, ... dN]`.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `losses`, and must be broadcastable to `losses` (i.e., all
+ * dimensions must be either `1`, or the same as the corresponding
+ * `losses` dimension).
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function computeWeightedLoss_(losses, weights, reduction) {
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $losses = convertToTensor(losses, 'losses', 'computeWeightedLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'computeWeightedLoss');
+ }
+ var weightedLoss = ($weights == null) ? $losses : mul($losses, $weights);
+ if (reduction === Reduction.NONE) {
+ return weightedLoss;
+ }
+ if (reduction === Reduction.SUM) {
+ return sum(weightedLoss);
+ }
+ if (reduction === Reduction.MEAN) {
+ if ($weights == null) {
+ return mean(weightedLoss);
+ }
+ else {
+ var broadcastFactor = $losses.size / $weights.size;
+ var result = div(sum(weightedLoss), sum($weights));
+ return broadcastFactor > 1 ? div(result, scalar(broadcastFactor)) :
+ result;
+ }
+ }
+ if (reduction === Reduction.SUM_BY_NONZERO_WEIGHTS) {
+ if ($weights == null) {
+ return div(sum(weightedLoss), scalar($losses.size));
+ }
+ else {
+ var broadcastedWeights = mul($weights, ones($losses.shape));
+ var numNonZeros = cast(sum(notEqual(broadcastedWeights, scalar(0))), 'float32');
+ return div(sum(weightedLoss), numNonZeros);
+ }
+ }
+ throw Error("Unknown reduction: " + reduction);
+ }
+ var computeWeightedLoss = op({ computeWeightedLoss_: computeWeightedLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the absolute difference loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function absoluteDifference_(labels, predictions, weights, reduction) {
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'absoluteDifference');
+ var $predictions = convertToTensor(predictions, 'predictions', 'absoluteDifference');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'absoluteDifference');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in absoluteDifference: ');
+ var losses = abs(sub($labels, $predictions));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var absoluteDifference = op({ absoluteDifference_: absoluteDifference_ });
+
+ /**
+ * Computes the cosine distance loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param axis The dimension along which the cosine distance is computed.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function cosineDistance_(labels, predictions, axis, weights, reduction) {
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'cosineDistance');
+ var $predictions = convertToTensor(predictions, 'predictions', 'cosineDistance');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'cosineDistance');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in cosineDistance: ');
+ var one = scalar(1);
+ var losses = sub(one, sum(mul($labels, $predictions), axis, true));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var cosineDistance = op({ cosineDistance_: cosineDistance_ });
+
+ /**
+ * Computes the Hinge loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function hingeLoss_(labels, predictions, weights, reduction) {
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'hingeLoss');
+ var $predictions = convertToTensor(predictions, 'predictions', 'hingeLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'hingeLoss');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in hingeLoss: ');
+ var one = scalar(1);
+ // Convert binary labels to (-1, 1)
+ $labels = sub(mul(scalar(2), $labels), one);
+ var losses = relu(sub(one, mul($labels, $predictions)));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var hingeLoss = op({ hingeLoss_: hingeLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the huber loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param delta Point where huber loss changes from quadratic to linear.
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`.
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function huberLoss_(labels, predictions, weights, delta, reduction) {
+ if (delta === void 0) { delta = 1.0; }
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'huberLoss');
+ var $predictions = convertToTensor(predictions, 'predictions', 'huberLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'huberLoss');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in huberLoss: ');
+ var deltaScalar = scalar(delta);
+ var error = abs(sub($predictions, $labels));
+ var quadratic = minimum(error, deltaScalar);
+ var linear = sub(error, quadratic);
+ var losses = add(mul(scalar(0.5), square(quadratic)), mul(deltaScalar, linear));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var huberLoss = op({ huberLoss_: huberLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the log loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param epsilon A small increment to avoid taking log of zero
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function logLoss_(labels, predictions, weights, epsilon, reduction) {
+ if (epsilon === void 0) { epsilon = 1e-7; }
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'logLoss');
+ var $predictions = convertToTensor(predictions, 'predictions', 'logLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'logLoss');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in logLoss: ');
+ var one = scalar(1);
+ var epsilonScalar = scalar(epsilon);
+ var l1 = neg(mul($labels, log(add($predictions, epsilonScalar))));
+ var l2 = mul(sub(one, $labels), log(add(sub(one, $predictions), epsilonScalar)));
+ var losses = sub(l1, l2);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var logLoss = op({ logLoss_: logLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the mean squared error between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function meanSquaredError_(labels, predictions, weights, reduction) {
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'meanSquaredError');
+ var $predictions = convertToTensor(predictions, 'predictions', 'meanSquaredError');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'meanSquaredError');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in meanSquaredError: ');
+ var losses = squaredDifference($labels, $predictions);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var meanSquaredError = op({ meanSquaredError_: meanSquaredError_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function sigmoidCrossEntropyWithLogits_(labels, logits) {
+ var $labels = convertToTensor(labels, 'labels', 'sigmoidCrossEntropyWithLogits');
+ var $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropyWithLogits');
+ assertShapesMatch($labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: ');
+ /**
+ * Implementation Details:
+ *
+ * For brevity, let `x = logits`, `z = labels`. The logistic loss is
+ * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
+ * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
+ * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
+ * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
+ * = (1 - z) * x + log(1 + exp(-x))
+ * = x - x * z + log(1 + exp(-x))
+ *
+ * For x < 0, to avoid overflow in exp(-x), we reformulate the above
+ * x - x * z + log(1 + exp(-x))
+ * = log(exp(x)) - x * z + log(1 + exp(-x))
+ * = - x * z + log(1 + exp(x))
+ *
+ * Hence, to ensure stability and avoid overflow, the implementation uses
+ * this equivalent formulation:
+ * max(x, 0) - x * z + log(1 + exp(-abs(x)))
+ */
+ var maxOutput = relu($logits);
+ var outputXTarget = mul($logits, $labels);
+ var sigmoidOutput = log1p(exp(neg(abs($logits))));
+ return add(sub(maxOutput, outputXTarget), sigmoidOutput);
+ }
+ /**
+ * Computes the sigmoid cross entropy loss between two tensors.
+ *
+ * If labelSmoothing is nonzero, smooth the labels towards 1/2:
+ *
+ * newMulticlassLabels = multiclassLabels * (1 - labelSmoothing)
+ * + 0.5 * labelSmoothing
+ *
+ * @param multiClassLabels The ground truth output tensor of shape
+ * [batch_size, num_classes], same dimensions as 'predictions'.
+ * @param logits The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param labelSmoothing If greater than 0, then smooth the labels.
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }
+ */
+ function sigmoidCrossEntropy_(multiClassLabels, logits, weights, labelSmoothing, reduction) {
+ if (labelSmoothing === void 0) { labelSmoothing = 0; }
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $multiClassLabels = convertToTensor(multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy');
+ var $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropy');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'sigmoidCrossEntropy');
+ }
+ assertShapesMatch($multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: ');
+ if (labelSmoothing > 0) {
+ var labelSmoothingScalar = scalar(labelSmoothing);
+ var one = scalar(1);
+ var half = scalar(0.5);
+ $multiClassLabels =
+ add(mul($multiClassLabels, sub(one, labelSmoothingScalar)), mul(half, labelSmoothingScalar));
+ }
+ var losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var sigmoidCrossEntropy = op({ sigmoidCrossEntropy_: sigmoidCrossEntropy_ });
+
+ /**
+ * Computes softmax cross entropy between logits and labels.
+ *
+ * Measures the probability error in discrete classification tasks in which
+ * the classes are mutually exclusive (each entry is in exactly one class).
+ * For example, each CIFAR-10 image is labeled with one and only one label: an
+ * image can be a dog or a truck, but not both.
+ *
+ * `NOTE`: While the classes are mutually exclusive, their probabilities need
+ * not be. All that is required is that each row of labels is a valid
+ * probability distribution. If they are not, the computation of the gradient
+ * will be incorrect.
+ *
+ * `WARNING`: This op expects unscaled logits, since it performs a softmax on
+ * logits internally for efficiency. Do not call this op with the output of
+ * softmax, as it will produce incorrect results.
+ *
+ * logits and labels must have the same shape, e.g. [batch_size, num_classes]
+ * and the same dtype.
+ * @param labels The labels array.
+ * @param logits The logits array.
+ * @param dim The dimension softmax would be performed on. Defaults to `-1`
+ * which indicates the last dimension.
+ */
+ function softmaxCrossEntropyWithLogits_(labels, logits, dim) {
+ if (dim === void 0) { dim = -1; }
+ if (dim === -1) {
+ dim = logits.rank - 1;
+ }
+ if (dim !== logits.rank - 1) {
+ throw Error("Softmax cross entropy along a non-last dimension is not yet " +
+ ("supported. Labels / logits was rank " + logits.rank + " ") +
+ ("and dim was " + dim));
+ }
+ // Use a custom gradient for numerical stability.
+ var customOp = customGrad(function (labels, logits, save) {
+ // Reference:
+ // 1. http://cs231n.github.io/linear-classify/#softmax
+ // 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/
+ var keepDims = true;
+ var lse = logSumExp(logits, [dim], keepDims);
+ var logResult = sub(cast(logits, 'float32'), lse);
+ save([labels, logResult]);
+ var costVector = neg(mul(logResult, labels));
+ var value = sum(costVector, [dim]);
+ var gradFunc = function (dy, saved) {
+ var _a = __read(saved, 2), labels = _a[0], logResult = _a[1];
+ var dyShape = expandShapeToKeepDim(dy.shape, [dim]);
+ return [
+ mul(reshape(dy, dyShape), sub(cast(labels, 'float32'), exp(logResult))),
+ mul(reshape(dy, dyShape), sub(exp(logResult), cast(labels, 'float32'))),
+ ];
+ };
+ return { value: value, gradFunc: gradFunc };
+ });
+ return customOp(labels, logits);
+ }
+ /**
+ * Computes the softmax cross entropy loss between two tensors.
+ *
+ * If labelSmoothing is nonzero, smooth the labels towards 1/2:
+ *
+ * newOnehotLabels = onehotLabels * (1 - labelSmoothing)
+ * + labelSmoothing / numClasses
+ *
+ * @param onehotLabels One hot encoded labels
+ * [batch_size, num_classes], same dimensions as 'predictions'.
+ * @param logits The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or 1, and must be
+ * broadcastable to `loss` of shape [batch_size]
+ * @param labelSmoothing If greater than 0, then smooth the labels.
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }
+ */
+ function softmaxCrossEntropy_(onehotLabels, logits, weights, labelSmoothing, reduction) {
+ if (labelSmoothing === void 0) { labelSmoothing = 0; }
+ if (reduction === void 0) { reduction = Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $onehotLabels = convertToTensor(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy');
+ var $logits = convertToTensor(logits, 'logits', 'softmaxCrossEntropy');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'softmaxCrossEntropy');
+ }
+ assertShapesMatch($onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: ');
+ if (labelSmoothing > 0) {
+ var labelSmoothingScalar = scalar(labelSmoothing);
+ var one = scalar(1);
+ var numClasses = scalar($onehotLabels.shape[1]);
+ $onehotLabels =
+ add(mul($onehotLabels, sub(one, labelSmoothingScalar)), div(labelSmoothingScalar, numClasses));
+ }
+ var losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var softmaxCrossEntropy = op({ softmaxCrossEntropy_: softmaxCrossEntropy_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * The input SparseTensor is represented via the map of inputs {`indices`,
+ * `values`, `denseShape`}. The output SparseTensor has the same `denseShape`
+ * but with indices `outputIndices` and values `outputValues`. This op inserts a
+ * single entry for every row that doesn't have any values. The index is created
+ * as `[row, 0, ..., 0]` and the inserted value is `defaultValue`.
+ *
+ * For example, suppose `spInput` has shape [5, 6] and non-empty values:
+ * [0, 1]: a
+ * [0, 3]: b
+ * [2, 0]: c
+ * [3, 1]: d
+ *
+ * Rows 1 and 4 are empty, so the output will be of shape [5, 6] with values:
+ * [0, 1]: a
+ * [0, 3]: b
+ * [1, 0]: `defaultValue`
+ * [2, 0]: c
+ * [3, 1]: d
+ * [4, 0]: `defaultValue`
+ *
+ * The output SparseTensor will be in row-major order and will have the same
+ * shape as the input.
+ *
+ * This op also returns an indicator vector shaped [dense_shape[0]] such that
+ * emptyRowIndicator[i] = True iff row i was an empty row.
+ *
+ * And a reverse index map vector shaped [indices.shape[0]] that is used during
+ * backpropagation, reverseIndexMap[i] = outi s.t. indices[i, j] ==
+ * outputIndices[outi, j] for all j
+ *
+ * ```js
+ * const result = tf.sparse.sparseFillEmptyRows(
+ * [[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]],
+ * [0, 10, 13, 14, 32, 33], [5, 6], -1);
+ * console.log(result);
+ * result['outputIndices'].print(); // [[0, 0], [1, 0], [1, 3], [1, 4],
+ * // [2, 0], [3, 2], [3, 3], [4, 0]]
+ * result['outputValues'].print(); // [0, 10, 13, 14,-1, 32, 33, -1]
+ * result['emptyRowIndicator'].print(); // [false, false, true, false, true]
+ * result['reverseIndexMap'].print(); // [0, 1, 2, 3, 5, 6]
+ * ```
+ * @param indices: 2-D. the indices of the sparse tensor.
+ * @param values: 1-D. the values of the sparse tensor.
+ * @param denseShape: 1-D. the shape of the sparse tensor.
+ * @param defaultValue: 0-D. default value to insert into location [row, 0, ...,
+ * 0] for rows missing from the input sparse tensor.
+ * @return A map with the following properties:
+ * - outputIndices
+ * - outputValues: 1-D. the values of the filled sparse tensor.
+ * - emptyRowIndicator: 1-D. whether the dense row was missing in the input
+ * sparse tensor.
+ * - reverseIndexMap: 1-D. a map from the input indices to the output
+ * indices.
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseFillEmptyRows_(indices, values, denseShape, defaultValue) {
+ var $indices = convertToTensor(indices, 'indices', 'sparseFillEmptyRows', 'int32');
+ var $values = convertToTensor(values, 'values', 'sparseFillEmptyRows');
+ var $denseShape = convertToTensor(denseShape, 'denseShape', 'sparseFillEmptyRows', 'int32');
+ var $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'sparseFillEmptyRows', $values.dtype);
+ if ($indices.rank !== 2) {
+ throw new Error("Indices should be Tensor2D but received shape\n " + $indices.shape);
+ }
+ if ($values.rank !== 1) {
+ throw new Error("Values should be Tensor1D but received shape " + $values.shape);
+ }
+ if ($denseShape.rank !== 1) {
+ throw new Error("Dense shape should be Tensor1D but received shape " + $denseShape.shape);
+ }
+ if ($defaultValue.rank !== 0) {
+ throw new Error("Default value should be a scalar but received shape " + $defaultValue.shape);
+ }
+ var inputs = {
+ indices: $indices,
+ values: $values,
+ denseShape: $denseShape,
+ defaultValue: $defaultValue
+ };
+ var result = ENGINE.runKernel(SparseFillEmptyRows, inputs);
+ return {
+ outputIndices: result[0],
+ outputValues: result[1],
+ emptyRowIndicator: result[2],
+ reverseIndexMap: result[3]
+ };
+ }
+ var sparseFillEmptyRows = op({ sparseFillEmptyRows_: sparseFillEmptyRows_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * This operation has the same semantics as reshape on the represented dense
+ * tensor. The `inputIndices` are recomputed based on the requested `newShape`.
+ * If one component of `newShape` is the special value -1, the size of that
+ * dimension is computed so that the total dense size remains constant. At most
+ * one component of `newShape` can be -1. The number of dense elements implied
+ * by `newShape` must be the same as the number of dense elements originally
+ * implied by `inputShape`. Reshaping does not affect the order of values in the
+ * SparseTensor. If the input tensor has rank R_in and N non-empty values, and
+ * `newShape` has length R_out, then `inputIndices` has shape [N, R_in],
+ * `inputShape` has length R_in, `outputIndices` has shape [N, R_out], and
+ * `outputShape` has length R_out.
+ *
+ * ```js
+ * const result = tf.sparse.sparseReshape(
+ * [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]],
+ * [2, 3, 6], [9, -1]);
+ * console.log(result);
+ * result['outputIndices'].print(); //[[0, 0], [0, 1], [1, 2], [4, 2], [8, 1]]
+ * result['outputShape'].print(); // [9, 4]
+ * ```
+ * @param inputIndices: 2-D. N x R_in matrix with the indices of non-empty
+ * values in a SparseTensor.
+ * @param inputShape: 1-D. R_in Tensor1D with the input SparseTensor's dense
+ * shape.
+ * @param newShape: 1-D. R_out Tensor1D with the requested new dense shape.
+ * @return A map with the following properties:
+ * - outputIndices: 2-D. N x R_out matrix with the updated indices of
+ * non-empty values in the output SparseTensor.
+ * - outputShape: 1-D. R_out vector with the full dense shape of the output
+ * SparseTensor. This is the same as newShape but with any -1 dimensions
+ * filled in.
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseReshape_(inputIndices, inputShape, newShape) {
+ var $inputIndices = convertToTensor(inputIndices, 'inputIndices', 'sparseReshape', 'int32');
+ var $inputShape = convertToTensor(inputShape, 'inputShape', 'sparseReshape', 'int32');
+ var $newShape = convertToTensor(newShape, 'newShape', 'sparseReshape', 'int32');
+ if ($inputIndices.rank !== 2) {
+ throw new Error("Input indices should be Tensor2D but received shape\n " + $inputIndices.shape);
+ }
+ if ($inputShape.rank !== 1) {
+ throw new Error("Input shape should be Tensor1D but received shape " + $inputShape.shape);
+ }
+ if ($newShape.rank !== 1) {
+ throw new Error("New shape should be Tensor1D but received shape " + $newShape.shape);
+ }
+ var inputs = {
+ inputIndices: $inputIndices,
+ inputShape: $inputShape,
+ newShape: $newShape
+ };
+ var result = ENGINE.runKernel(SparseReshape, inputs);
+ return { outputIndices: result[0], outputShape: result[1] };
+ }
+ var sparseReshape = op({ sparseReshape_: sparseReshape_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the mean along sparse segments of a tensor.
+ *
+ * ```js
+ * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [6,7,8,9]]);
+ * // Select two rows, one segment.
+ * const result1 = tf.sparse.sparseSegmentMean(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 0], 'int32'));
+ * result1.print(); // [[0, 0, 0, 0]]
+ *
+ * // Select two rows, two segments.
+ * const result2 = tf.sparse.sparseSegmentMean(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 1], 'int32'));
+ * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]]
+ *
+ * // Select all rows, two segments.
+ * const result3 = tf.sparse.sparseSegmentMean(c,
+ * tf.tensor1d([0, 1, 2], 'int32'),
+ * tf.tensor1d([0, 1, 1], 'int32'));
+ * result3.print(); // [[1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5]]
+ * ```
+ * @param data: A Tensor of at least one dimension with data that will be
+ * assembled in the output.
+ * @param indices: A 1-D Tensor with indices into data. Has same rank as
+ * segmentIds.
+ * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values
+ * should be sorted and can be repeated.
+ * @return Has same shape as data, except for dimension 0 which has equal to
+ * the number of segments.
+ *
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseSegmentMean_(data, indices, segmentIds) {
+ var $data = convertToTensor(data, 'data', 'sparseSegmentMean');
+ var $indices = convertToTensor(indices, 'indices', 'sparseSegmentMean', 'int32');
+ var $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentMean', 'int32');
+ if ($data.rank < 1) {
+ throw new Error("Data should be at least 1 dimensional but received scalar");
+ }
+ if ($indices.rank !== 1) {
+ throw new Error("Indices should be Tensor1D but received shape\n " + $indices.shape);
+ }
+ if ($segmentIds.rank !== 1) {
+ throw new Error("Segment ids should be Tensor1D but received shape\n " + $segmentIds.shape);
+ }
+ var inputs = {
+ data: $data,
+ indices: $indices,
+ segmentIds: $segmentIds
+ };
+ return ENGINE.runKernel(SparseSegmentMean, inputs);
+ }
+ var sparseSegmentMean = op({ sparseSegmentMean_: sparseSegmentMean_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the sum along sparse segments of a tensor.
+ *
+ * ```js
+ * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]);
+ * // Select two rows, one segment.
+ * const result1 = tf.sparse.sparseSegmentSum(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 0], 'int32'));
+ * result1.print(); // [[0, 0, 0, 0]]
+ *
+ * // Select two rows, two segment.
+ * const result2 = tf.sparse.sparseSegmentSum(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 1], 'int32'));
+ * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]]
+ *
+ * // Select all rows, two segments.
+ * const result3 = tf.sparse.sparseSegmentSum(c,
+ * tf.tensor1d([0, 1, 2], 'int32'),
+ * tf.tensor1d([0, 0, 1], 'int32'));
+ * result3.print(); // [[0, 0, 0, 0], [5, 6, 7, 8]]
+ * ```
+ * @param data: A Tensor of at least one dimension with data that will be
+ * assembled in the output.
+ * @param indices: A 1-D Tensor with indices into data. Has same rank as
+ * segmentIds.
+ * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values
+ * should be sorted and can be repeated.
+ * @return Has same shape as data, except for dimension 0 which has equal to
+ * the number of segments.
+ *
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseSegmentSum_(data, indices, segmentIds) {
+ var $data = convertToTensor(data, 'data', 'sparseSegmentSum');
+ var $indices = convertToTensor(indices, 'indices', 'sparseSegmentSum', 'int32');
+ var $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentSum', 'int32');
+ if ($data.rank < 1) {
+ throw new Error("Data should be at least 1 dimensional but received scalar");
+ }
+ if ($indices.rank !== 1) {
+ throw new Error("Indices should be Tensor1D but received shape\n " + $indices.shape);
+ }
+ if ($segmentIds.rank !== 1) {
+ throw new Error("Segment ids should be Tensor1D but received shape\n " + $segmentIds.shape);
+ }
+ var inputs = {
+ data: $data,
+ indices: $indices,
+ segmentIds: $segmentIds
+ };
+ return ENGINE.runKernel(SparseSegmentSum, inputs);
+ }
+ var sparseSegmentSum = op({ sparseSegmentSum_: sparseSegmentSum_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates ngrams from ragged string data.
+ *
+ * This op accepts a ragged tensor with 1 ragged dimension containing only
+ * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams
+ * of that string, joined along the innermost axis.
+ *
+ * ```js
+ * const result = tf.string.stringNGrams(
+ * ['a', 'b', 'c', 'd'], tf.tensor1d([0, 2, 4], 'int32'),
+ * '|', [1, 2], 'LP', 'RP', -1, false);
+ * result['nGrams'].print(); // ['a', 'b', 'LP|a', 'a|b', 'b|RP',
+ * // 'c', 'd', 'LP|c', 'c|d', 'd|RP']
+ * result['nGramsSplits'].print(); // [0, 5, 10]
+ * ```
+ * @param data: The values tensor of the ragged string tensor to make ngrams out
+ * of. Must be a 1D string tensor.
+ * @param dataSplits: The splits tensor of the ragged string tensor to make
+ * ngrams out of.
+ * @param separator: The string to append between elements of the token. Use ""
+ * for no separator.
+ * @param nGramWidths: The sizes of the ngrams to create.
+ * @param leftPad: The string to use to pad the left side of the ngram sequence.
+ * Only used if pad_width !== 0.
+ * @param rightPad: The string to use to pad the right side of the ngram
+ * sequence. Only used if pad_width !== 0.
+ * @param padWidth: The number of padding elements to add to each side of each
+ * sequence. Note that padding will never be greater than `nGramWidths`-1
+ * regardless of this value. If `padWidth`=-1 , then add max(`nGramWidths)-1
+ * elements.
+ * @param preserveShortSequences: If true, then ensure that at least one ngram
+ * is generated for each input sequence. In particular, if an input sequence
+ * is shorter than min(ngramWidth) + 2*padWidth, then generate a single
+ * ngram containing the entire sequence. If false, then no ngrams are
+ * generated for these short input sequences.
+ * @return A map with the following properties:
+ * - nGrams: The values tensor of the output ngrams ragged tensor.
+ * - nGramsSplits: The splits tensor of the output ngrams ragged tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'String'}
+ */
+ function stringNGrams_(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) {
+ var $data = convertToTensor(data, 'data', 'stringNGrams', 'string');
+ if ($data.dtype !== 'string') {
+ throw new Error('Data must be of datatype string');
+ }
+ if ($data.shape.length !== 1) {
+ throw new Error("Data must be a vector, saw: " + $data.shape);
+ }
+ var $dataSplits = convertToTensor(dataSplits, 'dataSplits', 'stringNGrams');
+ if ($dataSplits.dtype !== 'int32') {
+ throw new Error('Data splits must be of datatype int32');
+ }
+ var attrs = {
+ separator: separator,
+ nGramWidths: nGramWidths,
+ leftPad: leftPad,
+ rightPad: rightPad,
+ padWidth: padWidth,
+ preserveShortSequences: preserveShortSequences
+ };
+ var inputs = { data: $data, dataSplits: $dataSplits };
+ var result = ENGINE.runKernel(StringNGrams, inputs, attrs);
+ return { nGrams: result[0], nGramsSplits: result[1] };
+ }
+ var stringNGrams = op({ stringNGrams_: stringNGrams_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Split elements of `input` based on `delimiter` into a SparseTensor .
+ *
+ * Let N be the size of source (typically N will be the batch size). Split each
+ * element of `input` based on `delimiter` and return a SparseTensor containing
+ * the splitted tokens. Empty tokens are ignored if `skipEmpty` is set to True.
+ *
+ * `delimiter` can be empty, or a string of split characters. If `delimiter` is
+ * an empty string, each element of `input` is split into individual
+ * character strings. Otherwise every character of `delimiter` is a potential
+ * split point.
+ *
+ * ```js
+ * const result = tf.string.stringSplit(['hello world', 'a b c'], ' ');
+ * result['indices'].print(); // [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
+ * result['values'].print(); // ['hello', 'world', 'a', 'b', 'c']
+ * result['shape'].print(); // [2, 3]
+ * ```
+ * @param input: 1-D. Strings to split.
+ * @param delimiter: 0-D. Delimiter characters, or empty string.
+ * @param skipEmpty: Optional. If true, skip the empty strings from the result.
+ * Defaults to true.
+ * @return A map with the following properties:
+ * - indices: A dense matrix of int32 representing the indices of the sparse
+ * tensor.
+ * - values: A vector of strings corresponding to the splited values.
+ * - shape: a length-2 vector of int32 representing the shape of the sparse
+ * tensor, where the first value is N and the second value is the maximum number
+ * of tokens in a single input entry.
+ *
+ * @doc {heading: 'Operations', subheading: 'String'}
+ */
+ function stringSplit_(input, delimiter, skipEmpty) {
+ if (skipEmpty === void 0) { skipEmpty = true; }
+ var $input = convertToTensor(input, 'input', 'stringSplit', 'string');
+ var $delimiter = convertToTensor(delimiter, 'delimiter', 'stringSplit', 'string');
+ if ($input.rank !== 1) {
+ throw new Error("Input should be Tensor1D but received shape " + $input.shape);
+ }
+ if ($delimiter.rank !== 0) {
+ throw new Error("Delimiter should be a scalar but received shape " + $delimiter.shape);
+ }
+ var attrs = { skipEmpty: skipEmpty };
+ var inputs = { input: $input, delimiter: $delimiter };
+ var result = ENGINE.runKernel(StringSplit, inputs, attrs);
+ return { indices: result[0], values: result[1], shape: result[2] };
+ }
+ var stringSplit = op({ stringSplit_: stringSplit_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts each string in the input Tensor to its hash mod by a number of
+ * buckets.
+ *
+ * The hash function is deterministic on the content of the string within the
+ * process and will never change. However, it is not suitable for cryptography.
+ * This function may be used when CPU time is scarce and inputs are trusted or
+ * unimportant. There is a risk of adversaries constructing inputs that all hash
+ * to the same bucket.
+ *
+ * ```js
+ * const result = tf.string.stringToHashBucketFast(
+ * ['Hello', 'TensorFlow', '2.x'], 3);
+ * result.print(); // [0, 2, 2]
+ * ```
+ * @param input: The strings to assign a hash bucket.
+ * @param numBuckets: The number of buckets.
+ * @return A Tensor of the same shape as the input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'String'}
+ */
+ function stringToHashBucketFast_(input, numBuckets) {
+ var $input = convertToTensor(input, 'input', 'stringToHashBucketFast', 'string');
+ var attrs = { numBuckets: numBuckets };
+ if (numBuckets <= 0) {
+ throw new Error("Number of buckets must be at least 1");
+ }
+ var inputs = { input: $input };
+ return ENGINE.runKernel(StringToHashBucketFast, inputs, attrs);
+ }
+ var stringToHashBucketFast = op({ stringToHashBucketFast_: stringToHashBucketFast_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var spectral = {
+ fft: fft,
+ ifft: ifft,
+ rfft: rfft,
+ irfft: irfft
+ };
+ var signal = {
+ hammingWindow: hammingWindow,
+ hannWindow: hannWindow,
+ frame: frame,
+ stft: stft,
+ };
+ var image = {
+ flipLeftRight: flipLeftRight,
+ grayscaleToRGB: grayscaleToRGB,
+ resizeNearestNeighbor: resizeNearestNeighbor,
+ resizeBilinear: resizeBilinear,
+ rotateWithOffset: rotateWithOffset,
+ cropAndResize: cropAndResize,
+ nonMaxSuppression: nonMaxSuppression,
+ nonMaxSuppressionAsync: nonMaxSuppressionAsync,
+ nonMaxSuppressionWithScore: nonMaxSuppressionWithScore,
+ nonMaxSuppressionWithScoreAsync: nonMaxSuppressionWithScoreAsync,
+ nonMaxSuppressionPadded: nonMaxSuppressionPadded,
+ nonMaxSuppressionPaddedAsync: nonMaxSuppressionPaddedAsync,
+ threshold: threshold,
+ transform: transform
+ };
+ var linalg = {
+ bandPart: bandPart,
+ gramSchmidt: gramSchmidt,
+ qr: qr
+ };
+ var losses = {
+ absoluteDifference: absoluteDifference,
+ computeWeightedLoss: computeWeightedLoss,
+ cosineDistance: cosineDistance,
+ hingeLoss: hingeLoss,
+ huberLoss: huberLoss,
+ logLoss: logLoss,
+ meanSquaredError: meanSquaredError,
+ sigmoidCrossEntropy: sigmoidCrossEntropy,
+ softmaxCrossEntropy: softmaxCrossEntropy
+ };
+ var sparse = {
+ sparseFillEmptyRows: sparseFillEmptyRows,
+ sparseReshape: sparseReshape,
+ sparseSegmentMean: sparseSegmentMean,
+ sparseSegmentSum: sparseSegmentSum
+ };
+ // tslint:disable-next-line:variable-name
+ var string = {
+ stringNGrams: stringNGrams,
+ stringSplit: stringSplit,
+ stringToHashBucketFast: stringToHashBucketFast
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var tfOps = {
+ __proto__: null,
+ abs: abs,
+ acos: acos,
+ acosh: acosh,
+ add: add,
+ addN: addN,
+ all: all,
+ any: any,
+ argMax: argMax,
+ argMin: argMin,
+ asin: asin,
+ asinh: asinh,
+ atan: atan,
+ atan2: atan2,
+ atanh: atanh,
+ avgPool: avgPool,
+ avgPool3d: avgPool3d,
+ basicLSTMCell: basicLSTMCell,
+ batchToSpaceND: batchToSpaceND,
+ batchNorm: batchNorm,
+ batchNorm2d: batchNorm2d,
+ batchNorm3d: batchNorm3d,
+ batchNorm4d: batchNorm4d,
+ bincount: bincount,
+ broadcastArgs: broadcastArgs,
+ broadcastTo: broadcastTo,
+ buffer: buffer,
+ cast: cast,
+ ceil: ceil,
+ clipByValue: clipByValue,
+ clone: clone,
+ complex: complex,
+ concat: concat,
+ concat1d: concat1d,
+ concat2d: concat2d,
+ concat3d: concat3d,
+ concat4d: concat4d,
+ conv1d: conv1d,
+ conv2d: conv2d$1,
+ conv2dTranspose: conv2dTranspose,
+ conv3d: conv3d,
+ conv3dTranspose: conv3dTranspose,
+ cos: cos,
+ cosh: cosh,
+ cumsum: cumsum,
+ denseBincount: denseBincount,
+ depthToSpace: depthToSpace,
+ depthwiseConv2d: depthwiseConv2d$1,
+ diag: diag,
+ dilation2d: dilation2d,
+ div: div,
+ divNoNan: divNoNan,
+ dot: dot,
+ einsum: einsum,
+ elu: elu,
+ equal: equal,
+ erf: erf,
+ exp: exp,
+ expandDims: expandDims,
+ expm1: expm1,
+ eye: eye,
+ fill: fill,
+ floor: floor,
+ floorDiv: floorDiv,
+ gather: gather,
+ greater: greater,
+ greaterEqual: greaterEqual,
+ imag: imag,
+ isFinite: isFinite$1,
+ isInf: isInf,
+ isNaN: isNaN$1,
+ leakyRelu: leakyRelu,
+ less: less,
+ lessEqual: lessEqual,
+ linspace: linspace,
+ localResponseNormalization: localResponseNormalization,
+ log: log,
+ log1p: log1p,
+ logSigmoid: logSigmoid,
+ logSoftmax: logSoftmax,
+ logSumExp: logSumExp,
+ logicalAnd: logicalAnd,
+ logicalNot: logicalNot,
+ logicalOr: logicalOr,
+ logicalXor: logicalXor,
+ matMul: matMul$1,
+ max: max,
+ maxPool: maxPool,
+ maxPool3d: maxPool3d,
+ maxPoolWithArgmax: maxPoolWithArgmax,
+ maximum: maximum,
+ mean: mean,
+ meshgrid: meshgrid,
+ min: min,
+ minimum: minimum,
+ mirrorPad: mirrorPad,
+ mod: mod,
+ moments: moments,
+ mul: mul,
+ multiRNNCell: multiRNNCell,
+ multinomial: multinomial,
+ neg: neg,
+ notEqual: notEqual,
+ oneHot: oneHot,
+ ones: ones,
+ onesLike: onesLike,
+ outerProduct: outerProduct,
+ pad: pad,
+ pad1d: pad1d,
+ pad2d: pad2d,
+ pad3d: pad3d,
+ pad4d: pad4d,
+ pool: pool,
+ pow: pow,
+ prelu: prelu,
+ print: print,
+ prod: prod,
+ rand: rand,
+ randomGamma: randomGamma,
+ randomNormal: randomNormal,
+ randomUniform: randomUniform,
+ range: range,
+ real: real,
+ reciprocal: reciprocal,
+ relu: relu,
+ relu6: relu6,
+ reshape: reshape,
+ reverse: reverse,
+ reverse1d: reverse1d,
+ reverse2d: reverse2d,
+ reverse3d: reverse3d,
+ reverse4d: reverse4d,
+ round: round,
+ rsqrt: rsqrt,
+ scalar: scalar,
+ selu: selu,
+ separableConv2d: separableConv2d,
+ setdiff1dAsync: setdiff1dAsync,
+ sigmoid: sigmoid,
+ sign: sign,
+ sin: sin,
+ sinh: sinh,
+ slice: slice,
+ slice1d: slice1d,
+ slice2d: slice2d,
+ slice3d: slice3d,
+ slice4d: slice4d,
+ softmax: softmax,
+ softplus: softplus,
+ spaceToBatchND: spaceToBatchND,
+ fft: fft,
+ ifft: ifft,
+ irfft: irfft,
+ rfft: rfft,
+ split: split$1,
+ sqrt: sqrt,
+ square: square,
+ squaredDifference: squaredDifference,
+ squeeze: squeeze,
+ stack: stack,
+ step: step,
+ stridedSlice: stridedSlice,
+ sub: sub,
+ sum: sum,
+ tan: tan,
+ tanh: tanh,
+ tensor: tensor,
+ tensor1d: tensor1d,
+ tensor2d: tensor2d,
+ tensor3d: tensor3d,
+ tensor4d: tensor4d,
+ tensor5d: tensor5d,
+ tensor6d: tensor6d,
+ tile: tile,
+ topk: topk,
+ truncatedNormal: truncatedNormal,
+ unique: unique,
+ unsortedSegmentSum: unsortedSegmentSum,
+ unstack: unstack,
+ variable: variable,
+ where: where,
+ whereAsync: whereAsync,
+ zeros: zeros,
+ zerosLike: zerosLike,
+ op: op,
+ OP_SCOPE_SUFFIX: OP_SCOPE_SUFFIX,
+ booleanMaskAsync: booleanMaskAsync,
+ transpose: transpose,
+ norm: norm,
+ movingAverage: movingAverage,
+ scatterND: scatterND,
+ sparseToDense: sparseToDense,
+ gatherND: gatherND,
+ dropout: dropout,
+ enclosingPowerOfTwo: enclosingPowerOfTwo,
+ cosineWindow: cosineWindow,
+ inTopKAsync: inTopKAsync,
+ image: image,
+ linalg: linalg,
+ losses: losses,
+ spectral: spectral,
+ fused: fused_ops,
+ signal: signal,
+ sparse: sparse,
+ string: string
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$j = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'BiasAdd':
+ case 'AddV2':
+ case 'Add': {
+ return [add(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'AddN': {
+ return [addN(getParamValue('tensors', node, tensorMap, context))];
+ }
+ case 'FloorMod':
+ case 'Mod':
+ return [mod(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ case 'Mul':
+ return [mul(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ case 'RealDiv':
+ case 'Div': {
+ return [div(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'DivNoNan': {
+ return [divNoNan(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'FloorDiv': {
+ return [floorDiv(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Sub': {
+ return [sub(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Minimum': {
+ return [minimum(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Maximum': {
+ return [maximum(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Pow': {
+ return [pow(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'SquaredDifference': {
+ return [squaredDifference(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$i = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Abs':
+ case 'ComplexAbs':
+ return [abs(getParamValue('x', node, tensorMap, context))];
+ case 'Acos':
+ return [acos(getParamValue('x', node, tensorMap, context))];
+ case 'Acosh':
+ return [acosh(getParamValue('x', node, tensorMap, context))];
+ case 'Asin':
+ return [asin(getParamValue('x', node, tensorMap, context))];
+ case 'Asinh':
+ return [asinh(getParamValue('x', node, tensorMap, context))];
+ case 'Atan':
+ return [atan(getParamValue('x', node, tensorMap, context))];
+ case 'Atan2':
+ return [atan2(getParamValue('x', node, tensorMap, context), getParamValue('y', node, tensorMap, context))];
+ case 'Atanh':
+ return [atanh(getParamValue('x', node, tensorMap, context))];
+ case 'Ceil':
+ return [ceil(getParamValue('x', node, tensorMap, context))];
+ case 'Complex':
+ return [complex(getParamValue('real', node, tensorMap, context), getParamValue('imag', node, tensorMap, context))];
+ case 'Cos':
+ return [cos(getParamValue('x', node, tensorMap, context))];
+ case 'Cosh':
+ return [cosh(getParamValue('x', node, tensorMap, context))];
+ case 'Elu':
+ return [elu(getParamValue('x', node, tensorMap, context))];
+ case 'Erf':
+ return [erf(getParamValue('x', node, tensorMap, context))];
+ case 'Exp':
+ return [exp(getParamValue('x', node, tensorMap, context))];
+ case 'Expm1': {
+ return [expm1(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Floor':
+ return [floor(getParamValue('x', node, tensorMap, context))];
+ case 'Log':
+ return [log(getParamValue('x', node, tensorMap, context))];
+ case 'Log1p': {
+ return [log1p(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Imag':
+ return [imag(getParamValue('x', node, tensorMap, context))];
+ case 'Neg':
+ return [neg(getParamValue('x', node, tensorMap, context))];
+ case 'Reciprocal': {
+ return [reciprocal(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Real':
+ return [real(getParamValue('x', node, tensorMap, context))];
+ case 'Relu':
+ return [relu(getParamValue('x', node, tensorMap, context))];
+ case 'Round': {
+ return [round(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Selu':
+ return [selu(getParamValue('x', node, tensorMap, context))];
+ case 'Sigmoid':
+ return [sigmoid(getParamValue('x', node, tensorMap, context))];
+ case 'Sin':
+ return [sin(getParamValue('x', node, tensorMap, context))];
+ case 'Sign': {
+ return [sign(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Sinh': {
+ return [sinh(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Softplus': {
+ return [softplus(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Sqrt': {
+ return [sqrt(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Square': {
+ return [square(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Tanh': {
+ return [tanh(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'Tan':
+ return [tan(getParamValue('x', node, tensorMap, context))];
+ case 'ClipByValue':
+ return [clipByValue(getParamValue('x', node, tensorMap, context), getParamValue('clipValueMin', node, tensorMap, context), getParamValue('clipValueMax', node, tensorMap, context))];
+ case 'Relu6':
+ return [relu6(getParamValue('x', node, tensorMap, context))];
+ case 'Rsqrt':
+ return [rsqrt(getTensor(node.inputNames[0], tensorMap, context))];
+ case 'Prod':
+ return [prod(getParamValue('x', node, tensorMap, context), getParamValue('axes', node, tensorMap, context))];
+ case 'LeakyRelu':
+ return [leakyRelu(getParamValue('x', node, tensorMap, context), getParamValue('alpha', node, tensorMap, context))];
+ case 'Prelu':
+ return [prelu(getParamValue('x', node, tensorMap, context), getParamValue('alpha', node, tensorMap, context))];
+ case 'IsNan':
+ return [isNaN$1(getTensor(node.inputNames[0], tensorMap, context))];
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Used by TensorList and TensorArray to verify if elementShape matches, support
+ * negative value as the dim shape.
+ * @param shapeA
+ * @param shapeB
+ * @param errorMessagePrefix
+ */
+ function assertShapesMatchAllowUndefinedSize(shapeA, shapeB, errorMessagePrefix) {
+ if (errorMessagePrefix === void 0) { errorMessagePrefix = ''; }
+ // constant shape means unknown rank
+ if (typeof shapeA === 'number' || typeof shapeB === 'number') {
+ return;
+ }
+ tfc.util.assert(shapeA.length === shapeB.length, function () { return errorMessagePrefix + (" Shapes " + shapeA + " and " + shapeB + " must match"); });
+ for (var i = 0; i < shapeA.length; i++) {
+ var dim0 = shapeA[i];
+ var dim1 = shapeB[i];
+ tfc.util.assert(dim0 < 0 || dim1 < 0 || dim0 === dim1, function () { return errorMessagePrefix + (" Shapes " + shapeA + " and " + shapeB + " must match"); });
+ }
+ }
+ function fullDefinedShape(elementShape) {
+ if (typeof elementShape === 'number' || elementShape.some(function (dim) { return dim < 0; })) {
+ return false;
+ }
+ return true;
+ }
+ /**
+ * Generate the output element shape from the list elementShape, list tensors
+ * and input param.
+ * @param listElementShape
+ * @param tensors
+ * @param elementShape
+ */
+ function inferElementShape(listElementShape, tensors, elementShape) {
+ var partialShape = mergeElementShape(listElementShape, elementShape);
+ var notfullDefinedShape = !fullDefinedShape(partialShape);
+ if (notfullDefinedShape && tensors.length === 0) {
+ throw new Error("Tried to calculate elements of an empty list" +
+ (" with non-fully-defined elementShape: " + partialShape));
+ }
+ if (notfullDefinedShape) {
+ tensors.forEach(function (tensor) {
+ partialShape = mergeElementShape(tensor.shape, partialShape);
+ });
+ }
+ if (!fullDefinedShape(partialShape)) {
+ throw new Error("Non-fully-defined elementShape: " + partialShape);
+ }
+ return partialShape;
+ }
+ function mergeElementShape(elementShapeA, elementShapeB) {
+ if (typeof elementShapeA === 'number') {
+ return elementShapeB;
+ }
+ if (typeof elementShapeB === 'number') {
+ return elementShapeA;
+ }
+ if (elementShapeA.length !== elementShapeB.length) {
+ throw new Error("Incompatible ranks during merge: " + elementShapeA + " vs. " + elementShapeB);
+ }
+ var result = [];
+ for (var i = 0; i < elementShapeA.length; ++i) {
+ var dim0 = elementShapeA[i];
+ var dim1 = elementShapeB[i];
+ if (dim0 >= 0 && dim1 >= 0 && dim0 !== dim1) {
+ throw new Error("Incompatible shape during merge: " + elementShapeA + " vs. " + elementShapeB);
+ }
+ result[i] = dim0 >= 0 ? dim0 : dim1;
+ }
+ return result;
+ }
+
+ /**
+ * The TensorArray object keeps an array of Tensors. It
+ * allows reading from the array and writing to the array.
+ */
+ var TensorArray = /** @class */ (function () {
+ function TensorArray(name, dtype, maxSize, elementShape, identicalElementShapes, dynamicSize, clearAfterRead) {
+ this.name = name;
+ this.dtype = dtype;
+ this.maxSize = maxSize;
+ this.elementShape = elementShape;
+ this.identicalElementShapes = identicalElementShapes;
+ this.dynamicSize = dynamicSize;
+ this.clearAfterRead = clearAfterRead;
+ this.tensors = [];
+ this.closed_ = false;
+ this.idTensor = tfc.scalar(0);
+ tfc.keep(this.idTensor);
+ }
+ Object.defineProperty(TensorArray.prototype, "id", {
+ get: function () {
+ return this.idTensor.id;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(TensorArray.prototype, "closed", {
+ get: function () {
+ return this.closed_;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Dispose the tensors and idTensor and mark the TensoryArray as closed.
+ */
+ TensorArray.prototype.clearAndClose = function (keepIds) {
+ this.tensors.forEach(function (tensor) {
+ if (keepIds == null || !keepIds.has(tensor.tensor.id)) {
+ tensor.tensor.dispose();
+ }
+ });
+ this.tensors = [];
+ this.closed_ = true;
+ this.idTensor.dispose();
+ };
+ TensorArray.prototype.size = function () {
+ return this.tensors.length;
+ };
+ /**
+ * Read the value at location index in the TensorArray.
+ * @param index Number the index to read from.
+ */
+ TensorArray.prototype.read = function (index) {
+ if (this.closed_) {
+ throw new Error("TensorArray " + this.name + " has already been closed.");
+ }
+ if (index < 0 || index >= this.size()) {
+ throw new Error("Tried to read from index " + index + ", but array size is: " + this.size());
+ }
+ var tensorWithState = this.tensors[index];
+ if (tensorWithState.cleared) {
+ throw new Error("TensorArray " + this.name + ": Could not read index " + index + " twice because it was cleared after a previous read " +
+ "(perhaps try setting clear_after_read = false?).");
+ }
+ if (this.clearAfterRead) {
+ tensorWithState.cleared = true;
+ }
+ tensorWithState.read = true;
+ return tensorWithState.tensor;
+ };
+ /**
+ * Helper method to read multiple tensors from the specified indices.
+ */
+ TensorArray.prototype.readMany = function (indices) {
+ var _this = this;
+ return indices.map(function (index) { return _this.read(index); });
+ };
+ /**
+ * Write value into the index of the TensorArray.
+ * @param index number the index to write to.
+ * @param tensor
+ */
+ TensorArray.prototype.write = function (index, tensor) {
+ if (this.closed_) {
+ throw new Error("TensorArray " + this.name + " has already been closed.");
+ }
+ if (index < 0 || !this.dynamicSize && index >= this.maxSize) {
+ throw new Error("Tried to write to index " + index + ", but array is not resizeable and size is: " + this.maxSize);
+ }
+ var t = this.tensors[index] || {};
+ if (tensor.dtype !== this.dtype) {
+ throw new Error("TensorArray " + this.name + ": Could not write to TensorArray index " + index + ",\n because the value dtype is " + tensor.dtype + ", but TensorArray dtype is " + this.dtype + ".");
+ }
+ // Set the shape for the first time write to unknow shape tensor array
+ if (this.size() === 0 &&
+ (this.elementShape == null || this.elementShape.length === 0)) {
+ this.elementShape = tensor.shape;
+ }
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensor.shape, "TensorArray " + this.name + ": Could not write to TensorArray index " + index + ".");
+ if (t.read) {
+ throw new Error("TensorArray " + this.name + ": Could not write to TensorArray index " + index + ", because it has already been read.");
+ }
+ if (t.written) {
+ throw new Error("TensorArray " + this.name + ": Could not write to TensorArray index " + index + ", because it has already been written.");
+ }
+ t.tensor = tensor;
+ tfc.keep(tensor);
+ t.written = true;
+ this.tensors[index] = t;
+ };
+ /**
+ * Helper method to write multiple tensors to the specified indices.
+ */
+ TensorArray.prototype.writeMany = function (indices, tensors) {
+ var _this = this;
+ if (indices.length !== tensors.length) {
+ throw new Error("TensorArray " + this.name + ": could not write multiple tensors," +
+ ("because the index size: " + indices.length + " is not the same as tensors size: " + tensors.length + "."));
+ }
+ indices.forEach(function (i, index) { return _this.write(i, tensors[index]); });
+ };
+ /**
+ * Return selected values in the TensorArray as a packed Tensor. All of
+ * selected values must have been written and their shapes must all match.
+ * @param [indices] number[] Optional. Taking values in [0, max_value). If the
+ * TensorArray is not dynamic, max_value=size(). If not specified returns
+ * all tensors in the original order.
+ * @param [dtype]
+ */
+ TensorArray.prototype.gather = function (indices, dtype) {
+ if (!!dtype && dtype !== this.dtype) {
+ throw new Error("TensorArray dtype is " + this.dtype + " but gather requested dtype " + dtype);
+ }
+ if (!indices) {
+ indices = [];
+ for (var i = 0; i < this.size(); i++) {
+ indices.push(i);
+ }
+ }
+ else {
+ indices = indices.slice(0, this.size());
+ }
+ if (indices.length === 0) {
+ return tfc.tensor([], [0].concat(this.elementShape));
+ }
+ // Read all the PersistentTensors into a vector to keep track of
+ // their memory.
+ var tensors = this.readMany(indices);
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensors[0].shape, 'TensorArray shape mismatch: ');
+ return tfc.stack(tensors, 0);
+ };
+ /**
+ * Return the values in the TensorArray as a concatenated Tensor.
+ */
+ TensorArray.prototype.concat = function (dtype) {
+ if (!!dtype && dtype !== this.dtype) {
+ throw new Error("TensorArray dtype is " + this.dtype + " but concat requested dtype " + dtype);
+ }
+ if (this.size() === 0) {
+ return tfc.tensor([], [0].concat(this.elementShape));
+ }
+ var indices = [];
+ for (var i = 0; i < this.size(); i++) {
+ indices.push(i);
+ }
+ // Collect all the tensors from the tensors array.
+ var tensors = this.readMany(indices);
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensors[0].shape, "TensorArray shape mismatch: tensor array shape (" + this.elementShape + ") vs first tensor shape (" + tensors[0].shape + ")");
+ return tfc.concat(tensors, 0);
+ };
+ /**
+ * Scatter the values of a Tensor in specific indices of a TensorArray.
+ * @param indices nummber[] values in [0, max_value). If the
+ * TensorArray is not dynamic, max_value=size().
+ * @param tensor Tensor input tensor.
+ */
+ TensorArray.prototype.scatter = function (indices, tensor) {
+ if (tensor.dtype !== this.dtype) {
+ throw new Error("TensorArray dtype is " + this.dtype + " but tensor has dtype " + tensor.dtype);
+ }
+ if (indices.length !== tensor.shape[0]) {
+ throw new Error("Expected len(indices) == tensor.shape[0], but saw: " + indices.length + " vs. " + tensor.shape[0]);
+ }
+ var maxIndex = Math.max.apply(Math, __spread(indices));
+ if (!this.dynamicSize && maxIndex >= this.maxSize) {
+ throw new Error("Max index must be < array size (" + maxIndex + " vs. " + this.maxSize + ")");
+ }
+ this.writeMany(indices, tfc.unstack(tensor, 0));
+ };
+ /**
+ * Split the values of a Tensor into the TensorArray.
+ * @param length number[] with the lengths to use when splitting value along
+ * its first dimension.
+ * @param tensor Tensor, the tensor to split.
+ */
+ TensorArray.prototype.split = function (length, tensor) {
+ var _this = this;
+ if (tensor.dtype !== this.dtype) {
+ throw new Error("TensorArray dtype is " + this.dtype + " but tensor has dtype " + tensor.dtype);
+ }
+ var totalLength = 0;
+ var cumulativeLengths = length.map(function (len) {
+ totalLength += len;
+ return totalLength;
+ });
+ if (totalLength !== tensor.shape[0]) {
+ throw new Error("Expected sum of lengths to be equal to\n tensor.shape[0], but sum of lengths is\n " + totalLength + ", and tensor's shape is: " + tensor.shape);
+ }
+ if (!this.dynamicSize && length.length !== this.maxSize) {
+ throw new Error("TensorArray's size is not equal to the size of lengths (" + this.maxSize + " vs. " + length.length + "), " +
+ 'and the TensorArray is not marked as dynamically resizeable');
+ }
+ var elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength;
+ var tensors = [];
+ tfc.tidy(function () {
+ tensor = tfc.reshape(tensor, [1, totalLength, elementPerRow]);
+ for (var i = 0; i < length.length; ++i) {
+ var previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1];
+ var indices_1 = [0, previousLength, 0];
+ var sizes = [1, length[i], elementPerRow];
+ tensors[i] = tfc.reshape(tfc.slice(tensor, indices_1, sizes), _this.elementShape);
+ }
+ return tensors;
+ });
+ var indices = [];
+ for (var i = 0; i < length.length; i++) {
+ indices[i] = i;
+ }
+ this.writeMany(indices, tensors);
+ };
+ return TensorArray;
+ }());
+
+ /**
+ * TensorList stores a container of `tf.Tensor` objects, which are accessible
+ * via tensors field.
+ *
+ * In order to get a copy of the underlying list, use the copy method:
+ * ```
+ * TensorList b = a.copy();
+ * b.tensors().pushBack(t); // This does not modify a.tensors().
+ * ```
+ *
+ * Note that this is not a deep copy: the memory locations of the underlying
+ * tensors will still point to the same locations of the corresponding tensors
+ * in the original.
+ */
+ var TensorList = /** @class */ (function () {
+ /**
+ *
+ * @param tensors list of tensors
+ * @param elementShape shape of each tensor, this can be a single number (any
+ * shape is allowed) or partial shape (dim = -1).
+ * @param elementDtype data type of each tensor
+ * @param maxNumElements The maximum allowed size of `tensors`. Defaults to -1
+ * meaning that the size of `tensors` is unbounded.
+ */
+ function TensorList(tensors, elementShape, elementDtype, maxNumElements) {
+ if (maxNumElements === void 0) { maxNumElements = -1; }
+ this.tensors = tensors;
+ this.elementShape = elementShape;
+ this.elementDtype = elementDtype;
+ if (tensors != null) {
+ tensors.forEach(function (tensor) {
+ if (elementDtype !== tensor.dtype) {
+ throw new Error("Invalid data types; op elements " + elementDtype + ", but list elements " + tensor.dtype);
+ }
+ assertShapesMatchAllowUndefinedSize(elementShape, tensor.shape, 'TensorList shape mismatch: ');
+ tfc.keep(tensor);
+ });
+ }
+ this.idTensor = tfc.scalar(0);
+ this.maxNumElements = maxNumElements;
+ tfc.keep(this.idTensor);
+ }
+ Object.defineProperty(TensorList.prototype, "id", {
+ get: function () {
+ return this.idTensor.id;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Get a new TensorList containing a copy of the underlying tensor container.
+ */
+ TensorList.prototype.copy = function () {
+ return new TensorList(__spread(this.tensors), this.elementShape, this.elementDtype);
+ };
+ /**
+ * Dispose the tensors and idTensor and clear the tensor list.
+ */
+ TensorList.prototype.clearAndClose = function (keepIds) {
+ this.tensors.forEach(function (tensor) {
+ if (keepIds == null || !keepIds.has(tensor.id)) {
+ tensor.dispose();
+ }
+ });
+ this.tensors.length = 0;
+ this.idTensor.dispose();
+ };
+ /**
+ * The size of the tensors in the tensor list.
+ */
+ TensorList.prototype.size = function () {
+ return this.tensors.length;
+ };
+ /**
+ * Return a tensor that stacks a list of rank-R tf.Tensors into one rank-(R+1)
+ * tf.Tensor.
+ * @param elementShape shape of each tensor
+ * @param elementDtype data type of each tensor
+ * @param numElements the number of elements to stack
+ */
+ TensorList.prototype.stack = function (elementShape, elementDtype, numElements) {
+ var _this = this;
+ if (numElements === void 0) { numElements = -1; }
+ if (elementDtype !== this.elementDtype) {
+ throw new Error("Invalid data types; op elements " + elementDtype + ", but list elements " + this.elementDtype);
+ }
+ if (numElements !== -1 && this.tensors.length !== numElements) {
+ throw new Error("Operation expected a list with " + numElements + " elements but got a list with " + this.tensors.length + " elements.");
+ }
+ assertShapesMatchAllowUndefinedSize(elementShape, this.elementShape, 'TensorList shape mismatch: ');
+ var outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape);
+ return tfc.tidy(function () {
+ var reshapedTensors = _this.tensors.map(function (tensor) { return tfc.reshape(tensor, outputElementShape); });
+ return tfc.stack(reshapedTensors, 0);
+ });
+ };
+ /**
+ * Pop a tensor from the end of the list.
+ * @param elementShape shape of the tensor
+ * @param elementDtype data type of the tensor
+ */
+ TensorList.prototype.popBack = function (elementShape, elementDtype) {
+ if (elementDtype !== this.elementDtype) {
+ throw new Error("Invalid data types; op elements " + elementDtype + ", but list elements " + this.elementDtype);
+ }
+ if (this.size() === 0) {
+ throw new Error('Trying to pop from an empty list.');
+ }
+ var outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape);
+ var tensor = this.tensors.pop();
+ assertShapesMatchAllowUndefinedSize(tensor.shape, elementShape, 'TensorList shape mismatch: ');
+ return tfc.reshape(tensor, outputElementShape);
+ };
+ /**
+ * Push a tensor to the end of the list.
+ * @param tensor Tensor to be pushed.
+ */
+ TensorList.prototype.pushBack = function (tensor) {
+ if (tensor.dtype !== this.elementDtype) {
+ throw new Error("Invalid data types; op elements " + tensor.dtype + ", but list elements " + this.elementDtype);
+ }
+ assertShapesMatchAllowUndefinedSize(tensor.shape, this.elementShape, 'TensorList shape mismatch: ');
+ if (this.maxNumElements === this.size()) {
+ throw new Error("Trying to push element into a full list.");
+ }
+ tfc.keep(tensor);
+ this.tensors.push(tensor);
+ };
+ /**
+ * Update the size of the list.
+ * @param size the new size of the list.
+ */
+ TensorList.prototype.resize = function (size) {
+ if (size < 0) {
+ throw new Error("TensorListResize expects size to be non-negative. Got: " + size);
+ }
+ if (this.maxNumElements !== -1 && size > this.maxNumElements) {
+ throw new Error("TensorListResize input size " + size + " is greater maxNumElement " + this.maxNumElements + ".");
+ }
+ this.tensors.length = size;
+ };
+ /**
+ * Retrieve the element at the provided index
+ * @param elementShape shape of the tensor
+ * @param elementDtype dtype of the tensor
+ * @param elementIndex index of the tensor
+ */
+ TensorList.prototype.getItem = function (elementIndex, elementShape, elementDtype) {
+ if (elementDtype !== this.elementDtype) {
+ throw new Error("Invalid data types; op elements " + elementDtype + ", but list elements " + this.elementDtype);
+ }
+ if (elementIndex < 0 || elementIndex > this.tensors.length) {
+ throw new Error("Trying to access element " + elementIndex + " in a list with " + this.tensors.length + " elements.");
+ }
+ if (this.tensors[elementIndex] == null) {
+ throw new Error("element at index " + elementIndex + " is null.");
+ }
+ assertShapesMatchAllowUndefinedSize(this.tensors[elementIndex].shape, elementShape, 'TensorList shape mismatch: ');
+ var outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape);
+ return tfc.reshape(this.tensors[elementIndex], outputElementShape);
+ };
+ /**
+ * Set the tensor at the index
+ * @param elementIndex index of the tensor
+ * @param tensor the tensor to be inserted into the list
+ */
+ TensorList.prototype.setItem = function (elementIndex, tensor) {
+ if (tensor.dtype !== this.elementDtype) {
+ throw new Error("Invalid data types; op elements " + tensor.dtype + ", but list elements " + this.elementDtype);
+ }
+ if (elementIndex < 0 ||
+ this.maxNumElements !== -1 && elementIndex >= this.maxNumElements) {
+ throw new Error("Trying to set element " + elementIndex + " in a list with max " + this.maxNumElements + " elements.");
+ }
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensor.shape, 'TensorList shape mismatch: ');
+ tfc.keep(tensor);
+ this.tensors[elementIndex] = tensor;
+ };
+ /**
+ * Return selected values in the TensorList as a stacked Tensor. All of
+ * selected values must have been written and their shapes must all match.
+ * @param indices indices of tensors to gather
+ * @param elementDtype output tensor dtype
+ * @param elementShape output tensor element shape
+ */
+ TensorList.prototype.gather = function (indices, elementDtype, elementShape) {
+ var _this = this;
+ if (elementDtype !== this.elementDtype) {
+ throw new Error("Invalid data types; op elements " + elementDtype + ", but list elements " + this.elementDtype);
+ }
+ assertShapesMatchAllowUndefinedSize(this.elementShape, elementShape, 'TensorList shape mismatch: ');
+ // When indices is greater than the size of the list, indices beyond the
+ // size of the list are ignored.
+ indices = indices.slice(0, this.size());
+ var outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape);
+ if (indices.length === 0) {
+ return tfc.tensor([], [0].concat(outputElementShape));
+ }
+ return tfc.tidy(function () {
+ var tensors = indices.map(function (i) { return tfc.reshape(_this.tensors[i], outputElementShape); });
+ return tfc.stack(tensors, 0);
+ });
+ };
+ /**
+ * Return the values in the TensorList as a concatenated Tensor.
+ * @param elementDtype output tensor dtype
+ * @param elementShape output tensor element shape
+ */
+ TensorList.prototype.concat = function (elementDtype, elementShape) {
+ var _this = this;
+ if (!!elementDtype && elementDtype !== this.elementDtype) {
+ throw new Error("TensorList dtype is " + this.elementDtype + " but concat requested dtype " + elementDtype);
+ }
+ assertShapesMatchAllowUndefinedSize(this.elementShape, elementShape, 'TensorList shape mismatch: ');
+ var outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape);
+ if (this.size() === 0) {
+ return tfc.tensor([], [0].concat(outputElementShape));
+ }
+ return tfc.tidy(function () {
+ var tensors = _this.tensors.map(function (t) { return tfc.reshape(t, outputElementShape); });
+ return tfc.concat(tensors, 0);
+ });
+ };
+ return TensorList;
+ }());
+ /**
+ * Creates a TensorList which, when stacked, has the value of tensor.
+ * @param tensor from tensor
+ * @param elementShape output tensor element shape
+ */
+ function fromTensor(tensor, elementShape, elementDtype) {
+ var dtype = tensor.dtype;
+ if (tensor.shape.length < 1) {
+ throw new Error("Tensor must be at least a vector, but saw shape: " + tensor.shape);
+ }
+ if (tensor.dtype !== elementDtype) {
+ throw new Error("Invalid data types; op elements " + tensor.dtype + ", but list elements " + elementDtype);
+ }
+ var tensorElementShape = tensor.shape.slice(1);
+ assertShapesMatchAllowUndefinedSize(tensorElementShape, elementShape, 'TensorList shape mismatch: ');
+ var tensorList = tfc.unstack(tensor);
+ return new TensorList(tensorList, elementShape, dtype);
+ }
+ /**
+ * Return a TensorList of the given size with empty elements.
+ * @param elementShape the shape of the future elements of the list
+ * @param elementDtype the desired type of elements in the list
+ * @param numElements the number of elements to reserve
+ */
+ function reserve(elementShape, elementDtype, numElements) {
+ return new TensorList([], elementShape, elementDtype, numElements);
+ }
+ /**
+ * Put tensors at specific indices of a stacked tensor into a TensorList.
+ * @param indices list of indices on how to scatter the tensor.
+ * @param tensor input tensor.
+ * @param elementShape the shape of the future elements of the list
+ * @param numElements the number of elements to scatter
+ */
+ function scatter(tensor, indices, elementShape, numElements) {
+ if (indices.length !== tensor.shape[0]) {
+ throw new Error("Expected len(indices) == tensor.shape[0], but saw: " + indices.length + " vs. " + tensor.shape[0]);
+ }
+ var maxIndex = Math.max.apply(Math, __spread(indices));
+ if (numElements != null && numElements !== -1 && maxIndex >= numElements) {
+ throw new Error("Max index must be < array size (" + maxIndex + " vs. " + numElements + ")");
+ }
+ var list = new TensorList([], elementShape, tensor.dtype, numElements);
+ var tensors = tfc.unstack(tensor, 0);
+ indices.forEach(function (value, index) {
+ list.setItem(value, tensors[index]);
+ });
+ return list;
+ }
+ /**
+ * Split the values of a Tensor into a TensorList.
+ * @param length the lengths to use when splitting value along
+ * its first dimension.
+ * @param tensor the tensor to split.
+ * @param elementShape the shape of the future elements of the list
+ */
+ function split(tensor, length, elementShape) {
+ var totalLength = 0;
+ var cumulativeLengths = length.map(function (len) {
+ totalLength += len;
+ return totalLength;
+ });
+ if (totalLength !== tensor.shape[0]) {
+ throw new Error("Expected sum of lengths to be equal to\n tensor.shape[0], but sum of lengths is\n " + totalLength + ", and tensor's shape is: " + tensor.shape);
+ }
+ var shapeWithoutFirstDim = tensor.shape.slice(1);
+ var outputElementShape = mergeElementShape(shapeWithoutFirstDim, elementShape);
+ var elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength;
+ var tensors = tfc.tidy(function () {
+ var tensors = [];
+ tensor = tfc.reshape(tensor, [1, totalLength, elementPerRow]);
+ for (var i = 0; i < length.length; ++i) {
+ var previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1];
+ var indices = [0, previousLength, 0];
+ var sizes = [1, length[i], elementPerRow];
+ tensors[i] = tfc.reshape(tfc.slice(tensor, indices, sizes), outputElementShape);
+ }
+ tensor.dispose();
+ return tensors;
+ });
+ var list = new TensorList([], elementShape, tensor.dtype, length.length);
+ for (var i = 0; i < tensors.length; i++) {
+ list.setItem(i, tensors[i]);
+ }
+ return list;
+ }
+
+ var _this$2 = undefined;
+ var executeOp$h = function (node, tensorMap, context) { return __awaiter(_this$2, void 0, void 0, function () {
+ var _a, thenFunc, elseFunc, cond, args, condValue, bodyFunc, condFunc, args, condResult, argIds_1, condValue, result, _loop_1, pred, pred, data, inputName, data, frameId, data, data, data, size, dtype, elementShape, dynamicSize, clearAfterRead, identicalElementShapes, name, tensorArray, id, index, writeTensor, writeTensorArray, readId, readIndex, readTensorArray, gatherId, gatherIndices, gatherDtype, gatherTensorArray, scatterId, scatterIndices, scatterTensor, scatterTensorArray, concatId, concatTensorArray, concatDtype, splitId, splitTensor, lengths, splitTensorArray, sizeId, sizeTensorArray, closeId, closeTensorArray, idTensor, index, writeTensor, tensorList, idTensor, readIndex, elementShape, elementDType, tensorList, scatterIndices, scatterTensor, elementShape, numElements, tensorList, elementShape, elementDtype, numElementsParam, numElements, tensorList, gatherId, gatherIndices, elementShape, elementDtype, tensorList, idTensor, elementShape, elementDtype, numElements, tensorList, tensor, elementShape, elementDtype, tensorList, concatId, tensorList, concatDtype, elementShape, idTensor, writeTensor, tensorList, idTensor, elementShape, elementDType, tensorList, splitTensor, elementShape, lengths, tensorList;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ _a = node.op;
+ switch (_a) {
+ case 'If': return [3 /*break*/, 1];
+ case 'StatelessIf': return [3 /*break*/, 1];
+ case 'While': return [3 /*break*/, 3];
+ case 'StatelessWhile': return [3 /*break*/, 3];
+ case 'LoopCond': return [3 /*break*/, 9];
+ case 'Switch': return [3 /*break*/, 10];
+ case 'Merge': return [3 /*break*/, 12];
+ case 'Enter': return [3 /*break*/, 13];
+ case 'Exit': return [3 /*break*/, 14];
+ case 'NextIteration': return [3 /*break*/, 15];
+ case 'TensorArrayV3': return [3 /*break*/, 16];
+ case 'TensorArrayWriteV3': return [3 /*break*/, 17];
+ case 'TensorArrayReadV3': return [3 /*break*/, 18];
+ case 'TensorArrayGatherV3': return [3 /*break*/, 19];
+ case 'TensorArrayScatterV3': return [3 /*break*/, 20];
+ case 'TensorArrayConcatV3': return [3 /*break*/, 21];
+ case 'TensorArraySplitV3': return [3 /*break*/, 22];
+ case 'TensorArraySizeV3': return [3 /*break*/, 23];
+ case 'TensorArrayCloseV3': return [3 /*break*/, 24];
+ case 'TensorListSetItem': return [3 /*break*/, 25];
+ case 'TensorListGetItem': return [3 /*break*/, 26];
+ case 'TensorListScatterV2': return [3 /*break*/, 27];
+ case 'TensorListScatter': return [3 /*break*/, 27];
+ case 'TensorListReserve': return [3 /*break*/, 28];
+ case 'EmptyTensorList': return [3 /*break*/, 28];
+ case 'TensorListGather': return [3 /*break*/, 29];
+ case 'TensorListStack': return [3 /*break*/, 30];
+ case 'TensorListFromTensor': return [3 /*break*/, 31];
+ case 'TensorListConcat': return [3 /*break*/, 32];
+ case 'TensorListPushBack': return [3 /*break*/, 33];
+ case 'TensorListPopBack': return [3 /*break*/, 34];
+ case 'TensorListSplit': return [3 /*break*/, 35];
+ }
+ return [3 /*break*/, 36];
+ case 1:
+ thenFunc = getParamValue('thenBranch', node, tensorMap, context);
+ elseFunc = getParamValue('elseBranch', node, tensorMap, context);
+ cond = getParamValue('cond', node, tensorMap, context);
+ args = getParamValue('args', node, tensorMap, context);
+ return [4 /*yield*/, cond.data()];
+ case 2:
+ condValue = _b.sent();
+ if (condValue[0]) {
+ return [2 /*return*/, context.functionMap[thenFunc].executeFunctionAsync(args, context.tensorArrayMap, context.tensorListMap)];
+ }
+ else {
+ return [2 /*return*/, context.functionMap[elseFunc].executeFunctionAsync(args, context.tensorArrayMap, context.tensorListMap)];
+ }
+ case 3:
+ bodyFunc = getParamValue('body', node, tensorMap, context);
+ condFunc = getParamValue('cond', node, tensorMap, context);
+ args = getParamValue('args', node, tensorMap, context);
+ return [4 /*yield*/, context.functionMap[condFunc].executeFunctionAsync(args, context.tensorArrayMap, context.tensorListMap)];
+ case 4:
+ condResult = (_b.sent());
+ argIds_1 = args.map(function (tensor) { return tensor.id; });
+ return [4 /*yield*/, condResult[0].data()];
+ case 5:
+ condValue = _b.sent();
+ // Dispose the intermediate tensors for condition function
+ condResult.forEach(function (tensor) {
+ if (!tensor.kept && argIds_1.indexOf(tensor.id) === -1) {
+ tensor.dispose();
+ }
+ });
+ result = args;
+ _loop_1 = function () {
+ var origResult, resultIds, condResult_1;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ origResult = result;
+ return [4 /*yield*/, context.functionMap[bodyFunc].executeFunctionAsync(result, context.tensorArrayMap, context.tensorListMap)];
+ case 1:
+ // Execution the body of the loop
+ result = _a.sent();
+ resultIds = result.map(function (tensor) { return tensor.id; });
+ // Dispose the intermediate tensor for body function that is not global
+ // kept, not input/output of the body function
+ origResult.forEach(function (tensor) {
+ if (!tensor.kept && argIds_1.indexOf(tensor.id) === -1 &&
+ resultIds.indexOf(tensor.id) === -1) {
+ tensor.dispose();
+ }
+ });
+ return [4 /*yield*/, context.functionMap[condFunc].executeFunctionAsync(result, context.tensorArrayMap, context.tensorListMap)];
+ case 2:
+ condResult_1 = (_a.sent());
+ return [4 /*yield*/, condResult_1[0].data()];
+ case 3:
+ condValue = _a.sent();
+ // Dispose the intermediate tensors for condition function
+ condResult_1.forEach(function (tensor) {
+ if (!tensor.kept && argIds_1.indexOf(tensor.id) === -1 &&
+ resultIds.indexOf(tensor.id) === -1) {
+ tensor.dispose();
+ }
+ });
+ return [2 /*return*/];
+ }
+ });
+ };
+ _b.label = 6;
+ case 6:
+ if (!condValue[0]) return [3 /*break*/, 8];
+ return [5 /*yield**/, _loop_1()];
+ case 7:
+ _b.sent();
+ return [3 /*break*/, 6];
+ case 8: return [2 /*return*/, result];
+ case 9:
+ {
+ pred = getParamValue('pred', node, tensorMap, context);
+ return [2 /*return*/, [cloneTensor(pred)]];
+ }
+ case 10:
+ pred = getParamValue('pred', node, tensorMap, context);
+ data = getParamValue('data', node, tensorMap, context);
+ if (!data.kept) {
+ data = cloneTensor(data);
+ }
+ return [4 /*yield*/, pred.data()];
+ case 11:
+ // Outputs nodes :0 => false, :1 => true
+ return [2 /*return*/, (_b.sent())[0] ? [undefined, data] : [data, undefined]];
+ case 12:
+ {
+ inputName = node.inputNames.find(function (name) { return getTensor(name, tensorMap, context) !== undefined; });
+ if (inputName) {
+ data = getTensor(inputName, tensorMap, context);
+ return [2 /*return*/, [cloneTensor(data)]];
+ }
+ return [2 /*return*/, undefined];
+ }
+ case 13:
+ {
+ frameId = getParamValue('frameName', node, tensorMap, context);
+ data = getParamValue('tensor', node, tensorMap, context);
+ context.enterFrame(frameId);
+ return [2 /*return*/, [cloneTensor(data)]];
+ }
+ case 14:
+ {
+ data = getParamValue('tensor', node, tensorMap, context);
+ context.exitFrame();
+ return [2 /*return*/, [cloneTensor(data)]];
+ }
+ case 15:
+ {
+ data = getParamValue('tensor', node, tensorMap, context);
+ context.nextIteration();
+ return [2 /*return*/, [cloneTensor(data)]];
+ }
+ case 16:
+ {
+ size = getParamValue('size', node, tensorMap, context);
+ dtype = getParamValue('dtype', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ dynamicSize = getParamValue('dynamicSize', node, tensorMap, context);
+ clearAfterRead = getParamValue('clearAfterRead', node, tensorMap, context);
+ identicalElementShapes = getParamValue('identicalElementShapes', node, tensorMap, context);
+ name = getParamValue('name', node, tensorMap, context);
+ tensorArray = new TensorArray(name, dtype, size, elementShape, identicalElementShapes, dynamicSize, clearAfterRead);
+ context.addTensorArray(tensorArray);
+ return [2 /*return*/, [tensorArray.idTensor, tfc.scalar(1.0)]];
+ }
+ case 17:
+ {
+ id = getParamValue('tensorArrayId', node, tensorMap, context);
+ index = getParamValue('index', node, tensorMap, context);
+ writeTensor = getParamValue('tensor', node, tensorMap, context);
+ writeTensorArray = context.getTensorArray(id.id);
+ writeTensorArray.write(index, writeTensor);
+ return [2 /*return*/, [writeTensorArray.idTensor]];
+ }
+ case 18:
+ {
+ readId = getParamValue('tensorArrayId', node, tensorMap, context);
+ readIndex = getParamValue('index', node, tensorMap, context);
+ readTensorArray = context.getTensorArray(readId.id);
+ return [2 /*return*/, [readTensorArray.read(readIndex)]];
+ }
+ case 19:
+ {
+ gatherId = getParamValue('tensorArrayId', node, tensorMap, context);
+ gatherIndices = getParamValue('indices', node, tensorMap, context);
+ gatherDtype = getParamValue('dtype', node, tensorMap, context);
+ gatherTensorArray = context.getTensorArray(gatherId.id);
+ return [2 /*return*/, [gatherTensorArray.gather(gatherIndices, gatherDtype)]];
+ }
+ case 20:
+ {
+ scatterId = getParamValue('tensorArrayId', node, tensorMap, context);
+ scatterIndices = getParamValue('indices', node, tensorMap, context);
+ scatterTensor = getParamValue('tensor', node, tensorMap, context);
+ scatterTensorArray = context.getTensorArray(scatterId.id);
+ scatterTensorArray.scatter(scatterIndices, scatterTensor);
+ return [2 /*return*/, [scatterTensorArray.idTensor]];
+ }
+ case 21:
+ {
+ concatId = getParamValue('tensorArrayId', node, tensorMap, context);
+ concatTensorArray = context.getTensorArray(concatId.id);
+ concatDtype = getParamValue('dtype', node, tensorMap, context);
+ return [2 /*return*/, [concatTensorArray.concat(concatDtype)]];
+ }
+ case 22:
+ {
+ splitId = getParamValue('tensorArrayId', node, tensorMap, context);
+ splitTensor = getParamValue('tensor', node, tensorMap, context);
+ lengths = getParamValue('lengths', node, tensorMap, context);
+ splitTensorArray = context.getTensorArray(splitId.id);
+ splitTensorArray.split(lengths, splitTensor);
+ return [2 /*return*/, [splitTensorArray.idTensor]];
+ }
+ case 23:
+ {
+ sizeId = getParamValue('tensorArrayId', node, tensorMap, context);
+ sizeTensorArray = context.getTensorArray(sizeId.id);
+ return [2 /*return*/, [tfc.scalar(sizeTensorArray.size(), 'int32')]];
+ }
+ case 24:
+ {
+ closeId = getParamValue('tensorArrayId', node, tensorMap, context);
+ closeTensorArray = context.getTensorArray(closeId.id);
+ closeTensorArray.clearAndClose();
+ return [2 /*return*/, [closeTensorArray.idTensor]];
+ }
+ case 25:
+ {
+ idTensor = getParamValue('tensorListId', node, tensorMap, context);
+ index = getParamValue('index', node, tensorMap, context);
+ writeTensor = getParamValue('tensor', node, tensorMap, context);
+ tensorList = context.getTensorList(idTensor.id);
+ tensorList.setItem(index, writeTensor);
+ return [2 /*return*/, [tensorList.idTensor]];
+ }
+ case 26:
+ {
+ idTensor = getParamValue('tensorListId', node, tensorMap, context);
+ readIndex = getParamValue('index', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ elementDType = getParamValue('elementDType', node, tensorMap, context);
+ tensorList = context.getTensorList(idTensor.id);
+ return [2 /*return*/, [tensorList.getItem(readIndex, elementShape, elementDType)]];
+ }
+ case 27:
+ {
+ scatterIndices = getParamValue('indices', node, tensorMap, context);
+ scatterTensor = getParamValue('tensor', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ numElements = getParamValue('numElements', node, tensorMap, context);
+ tensorList = scatter(scatterTensor, scatterIndices, elementShape, numElements);
+ context.addTensorList(tensorList);
+ return [2 /*return*/, [tensorList.idTensor]];
+ }
+ case 28:
+ {
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ elementDtype = getParamValue('elementDType', node, tensorMap, context);
+ numElementsParam = void 0;
+ if (node.op === 'TensorListReserve') {
+ numElementsParam = 'numElements';
+ }
+ else {
+ numElementsParam = 'maxNumElements';
+ }
+ numElements = getParamValue(numElementsParam, node, tensorMap, context);
+ tensorList = reserve(elementShape, elementDtype, numElements);
+ context.addTensorList(tensorList);
+ return [2 /*return*/, [tensorList.idTensor]];
+ }
+ case 29:
+ {
+ gatherId = getParamValue('tensorListId', node, tensorMap, context);
+ gatherIndices = getParamValue('indices', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ elementDtype = getParamValue('elementDType', node, tensorMap, context);
+ tensorList = context.getTensorList(gatherId.id);
+ return [2 /*return*/, [tensorList.gather(gatherIndices, elementDtype, elementShape)]];
+ }
+ case 30:
+ {
+ idTensor = getParamValue('tensorListId', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ elementDtype = getParamValue('elementDType', node, tensorMap, context);
+ numElements = getParamValue('numElements', node, tensorMap, context);
+ tensorList = context.getTensorList(idTensor.id);
+ return [2 /*return*/, [tensorList.stack(elementShape, elementDtype, numElements)]];
+ }
+ case 31:
+ {
+ tensor = getParamValue('tensor', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ elementDtype = getParamValue('elementDType', node, tensorMap, context);
+ tensorList = fromTensor(tensor, elementShape, elementDtype);
+ context.addTensorList(tensorList);
+ return [2 /*return*/, [tensorList.idTensor]];
+ }
+ case 32:
+ {
+ concatId = getParamValue('tensorListId', node, tensorMap, context);
+ tensorList = context.getTensorList(concatId.id);
+ concatDtype = getParamValue('dtype', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ return [2 /*return*/, [tensorList.concat(concatDtype, elementShape)]];
+ }
+ case 33:
+ {
+ idTensor = getParamValue('tensorListId', node, tensorMap, context);
+ writeTensor = getParamValue('tensor', node, tensorMap, context);
+ tensorList = context.getTensorList(idTensor.id);
+ tensorList.pushBack(writeTensor);
+ return [2 /*return*/, [tensorList.idTensor]];
+ }
+ case 34:
+ {
+ idTensor = getParamValue('tensorListId', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ elementDType = getParamValue('elementDType', node, tensorMap, context);
+ tensorList = context.getTensorList(idTensor.id);
+ return [2 /*return*/, [tensorList.popBack(elementShape, elementDType)]];
+ }
+ case 35:
+ {
+ splitTensor = getParamValue('tensor', node, tensorMap, context);
+ elementShape = getParamValue('elementShape', node, tensorMap, context);
+ lengths = getParamValue('lengths', node, tensorMap, context);
+ tensorList = split(splitTensor, lengths, elementShape);
+ context.addTensorList(tensorList);
+ return [2 /*return*/, [tensorList.idTensor]];
+ }
+ case 36: throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ });
+ }); };
+
+ function fusedConvAndDepthWiseParams(node, tensorMap, context) {
+ var _a = __read(getParamValue('fusedOps', node, tensorMap, context), 2), extraOp = _a[0], activationFunc = _a[1];
+ var isBiasAdd = extraOp === 'biasadd';
+ var noBiasAdd = !isBiasAdd;
+ var isPrelu = activationFunc === 'prelu';
+ var isBatchNorm = extraOp === 'fusedbatchnorm';
+ var numArgs = getParamValue('numArgs', node, tensorMap, context);
+ if (isBiasAdd) {
+ if (isPrelu && numArgs !== 2) {
+ throw new Error('FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu ' +
+ 'must have two extra arguments: bias and alpha.');
+ }
+ if (!isPrelu && isBiasAdd && numArgs !== 1) {
+ throw new Error('FusedConv2d and DepthwiseConv2d with BiasAdd must have ' +
+ 'one extra argument: bias.');
+ }
+ }
+ if (isBatchNorm) {
+ throw new Error('FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported');
+ }
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getPadding(node, tensorMap, context);
+ var dataFormat = getParamValue('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ var dilations = getParamValue('dilations', node, tensorMap, context);
+ var _b = __read(getParamValue('args', node, tensorMap, context), 2), biasArg = _b[0], preluArg = _b[1];
+ if (noBiasAdd) {
+ preluArg = biasArg;
+ biasArg = undefined;
+ }
+ var leakyreluAlpha = getParamValue('leakyreluAlpha', node, tensorMap, context);
+ return {
+ stride: stride,
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: dilations,
+ biasArg: biasArg,
+ preluArg: preluArg,
+ activationFunc: activationFunc,
+ leakyreluAlpha: leakyreluAlpha
+ };
+ }
+ var executeOp$g = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Conv1D': {
+ var stride = getParamValue('stride', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var dataFormat = getParamValue('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ var dilation = getParamValue('dilation', node, tensorMap, context);
+ return [conv1d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), stride, pad, dataFormat, dilation)];
+ }
+ case 'Conv2D': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getPadding(node, tensorMap, context);
+ var dataFormat = getParamValue('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ var dilations = getParamValue('dilations', node, tensorMap, context);
+ return [conv2d$1(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [stride[1], stride[2]], pad, dataFormat, [dilations[1], dilations[2]])];
+ }
+ case '_FusedConv2D': {
+ var _a = fusedConvAndDepthWiseParams(node, tensorMap, context), stride = _a.stride, pad = _a.pad, dataFormat = _a.dataFormat, dilations = _a.dilations, biasArg = _a.biasArg, preluArg = _a.preluArg, activationFunc = _a.activationFunc, leakyreluAlpha = _a.leakyreluAlpha;
+ return [conv2d({
+ x: getParamValue('x', node, tensorMap, context),
+ filter: getParamValue('filter', node, tensorMap, context),
+ strides: [stride[1], stride[2]],
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: [dilations[1], dilations[2]],
+ bias: biasArg,
+ activation: activationFunc,
+ preluActivationWeights: preluArg,
+ leakyreluAlpha: leakyreluAlpha
+ })];
+ }
+ case 'FusedDepthwiseConv2dNative': {
+ var _b = fusedConvAndDepthWiseParams(node, tensorMap, context), stride = _b.stride, pad = _b.pad, dataFormat = _b.dataFormat, dilations = _b.dilations, biasArg = _b.biasArg, preluArg = _b.preluArg, activationFunc = _b.activationFunc, leakyreluAlpha = _b.leakyreluAlpha;
+ return [depthwiseConv2d({
+ x: getParamValue('x', node, tensorMap, context),
+ filter: getParamValue('filter', node, tensorMap, context),
+ strides: [stride[1], stride[2]],
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: [dilations[1], dilations[2]],
+ bias: biasArg,
+ activation: activationFunc,
+ preluActivationWeights: preluArg,
+ leakyreluAlpha: leakyreluAlpha
+ })];
+ }
+ case 'Conv2DBackpropInput':
+ case 'Conv2dTranspose': {
+ var shape = getParamValue('outputShape', node, tensorMap, context);
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getPadding(node, tensorMap, context);
+ return [conv2dTranspose(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), shape, [stride[1], stride[2]], pad)];
+ }
+ case 'DepthwiseConv2dNative':
+ case 'DepthwiseConv2d': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getPadding(node, tensorMap, context);
+ var dilations = getParamValue('dilations', node, tensorMap, context);
+ var dataFormat = getParamValue('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ return [depthwiseConv2d$1(getParamValue('input', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [stride[1], stride[2]], pad, dataFormat, [dilations[1], dilations[2]])];
+ }
+ case 'Conv3D': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var dataFormat = getParamValue('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ var dilations = getParamValue('dilations', node, tensorMap, context);
+ return [conv3d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [stride[1], stride[2], stride[3]], pad, dataFormat, [dilations[1], dilations[2], dilations[3]])];
+ }
+ case 'AvgPool': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var kernelSize = getParamValue('kernelSize', node, tensorMap, context);
+ return [avgPool(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad)];
+ }
+ case 'MaxPool': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var kernelSize = getParamValue('kernelSize', node, tensorMap, context);
+ return [maxPool(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad)];
+ }
+ case 'MaxPoolWithArgmax': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var kernelSize = getParamValue('kernelSize', node, tensorMap, context);
+ var includeBatchInIndex = getParamValue('includeBatchInIndex', node, tensorMap, context);
+ var _c = maxPoolWithArgmax(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad, includeBatchInIndex), result = _c.result, indexes = _c.indexes;
+ return [result, indexes];
+ }
+ case 'AvgPool3D': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var kernelSize = getParamValue('kernelSize', node, tensorMap, context);
+ return [avgPool3d(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2], kernelSize[3]], [stride[1], stride[2], stride[3]], pad)];
+ }
+ case 'MaxPool3D': {
+ var stride = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var kernelSize = getParamValue('kernelSize', node, tensorMap, context);
+ return [maxPool3d(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2], kernelSize[3]], [stride[1], stride[2], stride[3]], pad)];
+ }
+ case 'Dilation2D': {
+ var strides = getParamValue('strides', node, tensorMap, context);
+ var pad = getParamValue('pad', node, tensorMap, context);
+ var dilations = getParamValue('dilations', node, tensorMap, context);
+ // strides: [1, stride_height, stride_width, 1].
+ var strideHeight = strides[1];
+ var strideWidth = strides[2];
+ // dilations: [1, dilation_height, dilation_width, 1].
+ var dilationHeight = dilations[1];
+ var dilationWidth = dilations[2];
+ return [dilation2d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [strideHeight, strideWidth], pad, [dilationHeight, dilationWidth], 'NHWC' /* dataFormat */)];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$f = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Fill': {
+ var shape = getParamValue('shape', node, tensorMap, context);
+ var dtype = getParamValue('dtype', node, tensorMap, context);
+ var value = getParamValue('value', node, tensorMap, context);
+ return [fill(shape, value, dtype)];
+ }
+ case 'LinSpace': {
+ var start = getParamValue('start', node, tensorMap, context);
+ var stop = getParamValue('stop', node, tensorMap, context);
+ var num = getParamValue('num', node, tensorMap, context);
+ return [linspace(start, stop, num)];
+ }
+ case 'Multinomial': {
+ var logits = getParamValue('logits', node, tensorMap, context);
+ var numSamples = getParamValue('numSamples', node, tensorMap, context);
+ var seed = getParamValue('seed', node, tensorMap, context);
+ return [multinomial(logits, numSamples, seed)];
+ }
+ case 'OneHot': {
+ var indices = getParamValue('indices', node, tensorMap, context);
+ var depth = getParamValue('depth', node, tensorMap, context);
+ var onValue = getParamValue('onValue', node, tensorMap, context);
+ var offValue = getParamValue('offValue', node, tensorMap, context);
+ return [oneHot(indices, depth, onValue, offValue)];
+ }
+ case 'Ones': {
+ return [ones(getParamValue('shape', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))];
+ }
+ case 'OnesLike': {
+ return [onesLike(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'RandomUniform': {
+ return [randomUniform(
+ // tslint:disable-next-line:no-any
+ getParamValue('shape', node, tensorMap, context), getParamValue('minval', node, tensorMap, context), getParamValue('maxval', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))];
+ }
+ case 'Range': {
+ var start = getParamValue('start', node, tensorMap, context);
+ var stop = getParamValue('stop', node, tensorMap, context);
+ var step = getParamValue('step', node, tensorMap, context);
+ return [range(start, stop, step, getParamValue('dtype', node, tensorMap, context))];
+ }
+ case 'TruncatedNormal': {
+ var shape = getParamValue('shape', node, tensorMap, context);
+ var mean = getParamValue('mean', node, tensorMap, context);
+ var stdDev = getParamValue('stdDev', node, tensorMap, context);
+ var seed = getParamValue('seed', node, tensorMap, context);
+ return [truncatedNormal(shape, mean, stdDev, getParamValue('dtype', node, tensorMap, context), seed)];
+ }
+ case 'Zeros': {
+ return [zeros(getParamValue('shape', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))];
+ }
+ case 'ZerosLike': {
+ return [zerosLike(getParamValue('x', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ var _this$1 = undefined;
+ function nmsParams(node, tensorMap, context) {
+ var boxes = getParamValue('boxes', node, tensorMap, context);
+ var scores = getParamValue('scores', node, tensorMap, context);
+ var maxOutputSize = getParamValue('maxOutputSize', node, tensorMap, context);
+ var iouThreshold = getParamValue('iouThreshold', node, tensorMap, context);
+ var scoreThreshold = getParamValue('scoreThreshold', node, tensorMap, context);
+ var softNmsSigma = getParamValue('softNmsSigma', node, tensorMap, context);
+ return {
+ boxes: boxes,
+ scores: scores,
+ maxOutputSize: maxOutputSize,
+ iouThreshold: iouThreshold,
+ scoreThreshold: scoreThreshold,
+ softNmsSigma: softNmsSigma
+ };
+ }
+ var executeOp$e = function (node, tensorMap, context) { return __awaiter(_this$1, void 0, void 0, function () {
+ var _a, _b, boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, result, _c, boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize, result, _d, boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, condition, result;
+ return __generator(this, function (_e) {
+ switch (_e.label) {
+ case 0:
+ _a = node.op;
+ switch (_a) {
+ case 'NonMaxSuppressionV5': return [3 /*break*/, 1];
+ case 'NonMaxSuppressionV4': return [3 /*break*/, 3];
+ case 'NonMaxSuppressionV3': return [3 /*break*/, 5];
+ case 'NonMaxSuppressionV2': return [3 /*break*/, 5];
+ case 'Where': return [3 /*break*/, 7];
+ case 'ListDiff': return [3 /*break*/, 9];
+ }
+ return [3 /*break*/, 10];
+ case 1:
+ _b = nmsParams(node, tensorMap, context), boxes = _b.boxes, scores = _b.scores, maxOutputSize = _b.maxOutputSize, iouThreshold = _b.iouThreshold, scoreThreshold = _b.scoreThreshold, softNmsSigma = _b.softNmsSigma;
+ return [4 /*yield*/, image.nonMaxSuppressionWithScoreAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma)];
+ case 2:
+ result = _e.sent();
+ return [2 /*return*/, [result.selectedIndices, result.selectedScores]];
+ case 3:
+ _c = nmsParams(node, tensorMap, context), boxes = _c.boxes, scores = _c.scores, maxOutputSize = _c.maxOutputSize, iouThreshold = _c.iouThreshold, scoreThreshold = _c.scoreThreshold;
+ padToMaxOutputSize = getParamValue('padToMaxOutputSize', node, tensorMap, context);
+ return [4 /*yield*/, image.nonMaxSuppressionPaddedAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize)];
+ case 4:
+ result = _e.sent();
+ return [2 /*return*/, [result.selectedIndices, result.validOutputs]];
+ case 5:
+ _d = nmsParams(node, tensorMap, context), boxes = _d.boxes, scores = _d.scores, maxOutputSize = _d.maxOutputSize, iouThreshold = _d.iouThreshold, scoreThreshold = _d.scoreThreshold;
+ return [4 /*yield*/, image.nonMaxSuppressionAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold)];
+ case 6: return [2 /*return*/, [_e.sent()]];
+ case 7:
+ condition = cast(getParamValue('condition', node, tensorMap, context), 'bool');
+ return [4 /*yield*/, whereAsync(condition)];
+ case 8:
+ result = [_e.sent()];
+ condition.dispose();
+ return [2 /*return*/, result];
+ case 9:
+ {
+ return [2 /*return*/, setdiff1dAsync(getParamValue('x', node, tensorMap, context), getParamValue('y', node, tensorMap, context))];
+ }
+ case 10: throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ });
+ }); };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$d = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'TopKV2': {
+ var x = getParamValue('x', node, tensorMap, context);
+ var k = getParamValue('k', node, tensorMap, context);
+ var sorted = getParamValue('sorted', node, tensorMap, context);
+ var result = topk(x, k, sorted);
+ return [result.values, result.indices];
+ }
+ case 'Unique': {
+ var x = getParamValue('x', node, tensorMap, context);
+ var result = unique(x);
+ return [result.values, result.indices];
+ }
+ case 'UniqueV2': {
+ var x = getParamValue('x', node, tensorMap, context);
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var result = unique(x, axis);
+ return [result.values, result.indices];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$c = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Const': {
+ return tensorMap[node.name];
+ }
+ case 'PlaceholderWithDefault':
+ var def = getParamValue('default', node, tensorMap, context);
+ return [getTensor(node.name, tensorMap, context) || def];
+ case 'Placeholder':
+ return [getTensor(node.name, tensorMap, context)];
+ case 'Identity':
+ case 'StopGradient':
+ case 'FakeQuantWithMinMaxVars': { // This op is currently ignored.
+ var data_1 = getParamValue('x', node, tensorMap, context);
+ return [cloneTensor(data_1)];
+ }
+ case 'IdentityN':
+ return getParamValue('x', node, tensorMap, context)
+ .map(function (t) { return cloneTensor(t); });
+ case 'Snapshot':
+ var snapshot = getParamValue('x', node, tensorMap, context);
+ return [cloneTensor(snapshot)];
+ case 'Shape':
+ return [tensor1d(getParamValue('x', node, tensorMap, context).shape, 'int32')];
+ case 'ShapeN':
+ return getParamValue('x', node, tensorMap, context)
+ .map(function (t) { return tensor1d(t.shape); });
+ case 'Size':
+ return [scalar(getParamValue('x', node, tensorMap, context).size, 'int32')];
+ case 'Rank':
+ return [scalar(getParamValue('x', node, tensorMap, context).rank, 'int32')];
+ case 'NoOp':
+ return [scalar(1)];
+ case 'Print':
+ var input = getParamValue('x', node, tensorMap, context);
+ var data = getParamValue('data', node, tensorMap, context);
+ var message = getParamValue('message', node, tensorMap, context);
+ var summarize = getParamValue('summarize', node, tensorMap, context);
+ console.warn('The graph has a tf.print() operation,' +
+ 'usually used for debugging, which slows down performance.');
+ console.log(message);
+ for (var i = 0; i < data.length; i++) {
+ console.log(Array.prototype.slice.call(data[i].dataSync())
+ .slice(0, summarize));
+ }
+ return [input];
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * Hashtable contains a set of tensors, which can be accessed by key.
+ */
+ var HashTable = /** @class */ (function () {
+ /**
+ * Constructor of HashTable. Creates a hash table.
+ *
+ * @param keyDType `dtype` of the table keys.
+ * @param valueDType `dtype` of the table values.
+ */
+ function HashTable(keyDType, valueDType) {
+ this.keyDType = keyDType;
+ this.valueDType = valueDType;
+ this.handle = tfc.scalar(0);
+ // tslint:disable-next-line: no-any
+ this.tensorMap = new Map();
+ tfc.keep(this.handle);
+ }
+ Object.defineProperty(HashTable.prototype, "id", {
+ get: function () {
+ return this.handle.id;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Dispose the tensors and handle and clear the hashtable.
+ */
+ HashTable.prototype.clearAndClose = function () {
+ this.tensorMap.forEach(function (value) { return value.dispose(); });
+ this.tensorMap.clear();
+ this.handle.dispose();
+ };
+ /**
+ * The number of items in the hash table.
+ */
+ HashTable.prototype.size = function () {
+ return this.tensorMap.size;
+ };
+ /**
+ * The number of items in the hash table as a rank-0 tensor.
+ */
+ HashTable.prototype.tensorSize = function () {
+ return scalar(this.size(), 'int32');
+ };
+ /**
+ * Replaces the contents of the table with the specified keys and values.
+ * @param keys Keys to store in the hashtable.
+ * @param values Values to store in the hashtable.
+ */
+ HashTable.prototype.import = function (keys, values) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $keys;
+ var _this = this;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ this.checkKeyAndValueTensor(keys, values);
+ return [4 /*yield*/, keys.data()];
+ case 1:
+ $keys = _a.sent();
+ // Clear the hashTable before inserting new values.
+ this.tensorMap.forEach(function (value) { return value.dispose(); });
+ this.tensorMap.clear();
+ return [2 /*return*/, tfc.tidy(function () {
+ var $values = tfc.unstack(values);
+ var keysLength = $keys.length;
+ var valuesLength = $values.length;
+ tfc.util.assert(keysLength === valuesLength, function () { return "The number of elements doesn't match, keys has " +
+ (keysLength + " elements, the values has " + valuesLength + " ") +
+ "elements."; });
+ for (var i = 0; i < keysLength; i++) {
+ var key = $keys[i];
+ var value = $values[i];
+ tfc.keep(value);
+ _this.tensorMap.set(key, value);
+ }
+ return _this.handle;
+ })];
+ }
+ });
+ });
+ };
+ /**
+ * Looks up keys in a hash table, outputs the corresponding values.
+ *
+ * Performs batch lookups, for every element in the key tensor, `find`
+ * stacks the corresponding value into the return tensor.
+ *
+ * If an element is not present in the table, the given `defaultValue` is
+ * used.
+ *
+ * @param keys Keys to look up. Must have the same type as the keys of the
+ * table.
+ * @param defaultValue The scalar `defaultValue` is the value output for keys
+ * not present in the table. It must also be of the same type as the
+ * table values.
+ */
+ HashTable.prototype.find = function (keys, defaultValue) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $keys;
+ var _this = this;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ this.checkKeyAndValueTensor(keys, defaultValue);
+ return [4 /*yield*/, keys.data()];
+ case 1:
+ $keys = _a.sent();
+ return [2 /*return*/, tfc.tidy(function () {
+ var result = [];
+ for (var i = 0; i < $keys.length; i++) {
+ var key = $keys[i];
+ var value = _this.findWithDefault(key, defaultValue);
+ result.push(value);
+ }
+ return tfc.stack(result);
+ })];
+ }
+ });
+ });
+ };
+ // tslint:disable-next-line: no-any
+ HashTable.prototype.findWithDefault = function (key, defaultValue) {
+ var result = this.tensorMap.get(key);
+ return result != null ? result : defaultValue;
+ };
+ HashTable.prototype.checkKeyAndValueTensor = function (key, value) {
+ if (key.dtype !== this.keyDType) {
+ throw new Error("Expect key dtype " + this.keyDType + ", but got " +
+ ("" + key.dtype));
+ }
+ if (value.dtype !== this.valueDType) {
+ throw new Error("Expect value dtype " + this.valueDType + ", but got " +
+ ("" + value.dtype));
+ }
+ };
+ return HashTable;
+ }());
+
+ var _this = undefined;
+ var executeOp$b = function (node, tensorMap, context, resourceManager) { return __awaiter(_this, void 0, void 0, function () {
+ var _a, keyDType, valueDType, hashTable, handle, keys, values, hashTable, handle, keys, defaultValue, hashTable, handle, hashTable;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ _a = node.op;
+ switch (_a) {
+ case 'HashTable': return [3 /*break*/, 1];
+ case 'HashTableV2': return [3 /*break*/, 1];
+ case 'LookupTableImport': return [3 /*break*/, 2];
+ case 'LookupTableImportV2': return [3 /*break*/, 2];
+ case 'LookupTableFind': return [3 /*break*/, 4];
+ case 'LookupTableFindV2': return [3 /*break*/, 4];
+ case 'LookupTableSize': return [3 /*break*/, 6];
+ case 'LookupTableSizeV2': return [3 /*break*/, 6];
+ }
+ return [3 /*break*/, 7];
+ case 1:
+ {
+ keyDType = getParamValue('keyDType', node, tensorMap, context);
+ valueDType = getParamValue('valueDType', node, tensorMap, context);
+ hashTable = new HashTable(keyDType, valueDType);
+ resourceManager.addHashTable(node.name, hashTable);
+ return [2 /*return*/, [hashTable.handle]];
+ }
+ case 2:
+ handle = getParamValue('tableHandle', node, tensorMap, context, resourceManager);
+ keys = getParamValue('keys', node, tensorMap, context);
+ values = getParamValue('values', node, tensorMap, context);
+ hashTable = resourceManager.getHashTableById(handle.id);
+ return [4 /*yield*/, hashTable.import(keys, values)];
+ case 3: return [2 /*return*/, [_b.sent()]];
+ case 4:
+ handle = getParamValue('tableHandle', node, tensorMap, context, resourceManager);
+ keys = getParamValue('keys', node, tensorMap, context);
+ defaultValue = getParamValue('defaultValue', node, tensorMap, context);
+ hashTable = resourceManager.getHashTableById(handle.id);
+ return [4 /*yield*/, hashTable.find(keys, defaultValue)];
+ case 5: return [2 /*return*/, [_b.sent()]];
+ case 6:
+ {
+ handle = getParamValue('tableHandle', node, tensorMap, context, resourceManager);
+ hashTable = resourceManager.getHashTableById(handle.id);
+ return [2 /*return*/, [hashTable.tensorSize()]];
+ }
+ case 7: throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ });
+ }); };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$a = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'ResizeBilinear': {
+ var images = getParamValue('images', node, tensorMap, context);
+ var size = getParamValue('size', node, tensorMap, context);
+ var alignCorners = getParamValue('alignCorners', node, tensorMap, context);
+ var halfPixelCenters = getParamValue('halfPixelCenters', node, tensorMap, context);
+ return [image.resizeBilinear(images, [size[0], size[1]], alignCorners, halfPixelCenters)];
+ }
+ case 'ResizeNearestNeighbor': {
+ var images = getParamValue('images', node, tensorMap, context);
+ var size = getParamValue('size', node, tensorMap, context);
+ var alignCorners = getParamValue('alignCorners', node, tensorMap, context);
+ var halfPixelCenters = getParamValue('halfPixelCenters', node, tensorMap, context);
+ return [image.resizeNearestNeighbor(images, [size[0], size[1]], alignCorners, halfPixelCenters)];
+ }
+ case 'CropAndResize': {
+ var image$1 = getParamValue('image', node, tensorMap, context);
+ var boxes = getParamValue('boxes', node, tensorMap, context);
+ var boxInd = getParamValue('boxInd', node, tensorMap, context);
+ var cropSize = getParamValue('cropSize', node, tensorMap, context);
+ var method = getParamValue('method', node, tensorMap, context);
+ var extrapolationValue = getParamValue('extrapolationValue', node, tensorMap, context);
+ return [image.cropAndResize(image$1, boxes, boxInd, cropSize, method, extrapolationValue)];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$9 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Equal': {
+ return [equal(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'NotEqual': {
+ return [notEqual(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Greater': {
+ return [greater(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'GreaterEqual': {
+ return [greaterEqual(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Less': {
+ return [less(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'LessEqual': {
+ return [lessEqual(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'LogicalAnd': {
+ return [logicalAnd(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'LogicalNot': {
+ return [logicalNot(getParamValue('a', node, tensorMap, context))];
+ }
+ case 'LogicalOr': {
+ return [logicalOr(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ case 'Select':
+ case 'SelectV2': {
+ return [where(getParamValue('condition', node, tensorMap, context), getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ var executeOp$8 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'BatchMatMul':
+ case 'BatchMatMulV2':
+ case 'MatMul':
+ return [matMul$1(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context), getParamValue('transposeA', node, tensorMap, context), getParamValue('transposeB', node, tensorMap, context))];
+ case 'Einsum':
+ return [einsum.apply(tfOps, __spread([getParamValue('equation', node, tensorMap, context)], getParamValue('tensors', node, tensorMap, context)))];
+ case 'Transpose':
+ return [transpose(getParamValue('x', node, tensorMap, context), getParamValue('perm', node, tensorMap, context))];
+ case '_FusedMatMul':
+ var _a = __read(getParamValue('fusedOps', node, tensorMap, context), 2), extraOp = _a[0], activationFunc = _a[1];
+ var isBiasAdd = extraOp === 'biasadd';
+ var isPrelu = activationFunc === 'prelu';
+ var numArgs = getParamValue('numArgs', node, tensorMap, context);
+ var leakyreluAlpha = getParamValue('leakyreluAlpha', node, tensorMap, context);
+ if (isBiasAdd) {
+ if (isPrelu && numArgs !== 2) {
+ throw new Error('Fused MatMul with BiasAdd and Prelu must have two ' +
+ 'extra arguments: bias and alpha.');
+ }
+ if (!isPrelu && numArgs !== 1) {
+ throw new Error('Fused MatMul with BiasAdd must have one extra argument: bias.');
+ }
+ }
+ var _b = __read(getParamValue('args', node, tensorMap, context), 2), biasArg = _b[0], preluArg = _b[1];
+ return [matMul({
+ a: getParamValue('a', node, tensorMap, context),
+ b: getParamValue('b', node, tensorMap, context),
+ transposeA: getParamValue('transposeA', node, tensorMap, context),
+ transposeB: getParamValue('transposeB', node, tensorMap, context),
+ bias: biasArg,
+ activation: activationFunc,
+ preluActivationWeights: preluArg,
+ leakyreluAlpha: leakyreluAlpha
+ })];
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$7 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'FusedBatchNorm':
+ case 'FusedBatchNormV2': {
+ return [batchNorm(getParamValue('x', node, tensorMap, context), getParamValue('mean', node, tensorMap, context), getParamValue('variance', node, tensorMap, context), getParamValue('offset', node, tensorMap, context), getParamValue('scale', node, tensorMap, context), getParamValue('epsilon', node, tensorMap, context))];
+ }
+ case 'FusedBatchNormV3': {
+ return [batchNorm(getParamValue('x', node, tensorMap, context), getParamValue('mean', node, tensorMap, context), getParamValue('variance', node, tensorMap, context), getParamValue('offset', node, tensorMap, context), getParamValue('scale', node, tensorMap, context), getParamValue('epsilon', node, tensorMap, context))];
+ }
+ case 'LRN': {
+ return [localResponseNormalization(getParamValue('x', node, tensorMap, context), getParamValue('radius', node, tensorMap, context), getParamValue('bias', node, tensorMap, context), getParamValue('alpha', node, tensorMap, context), getParamValue('beta', node, tensorMap, context))];
+ }
+ case 'Softmax': {
+ return [softmax(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'LogSoftmax': {
+ return [logSoftmax(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'SparseToDense': {
+ return [sparseToDense(getParamValue('sparseIndices', node, tensorMap, context), getParamValue('outputShape', node, tensorMap, context), getParamValue('sparseValues', node, tensorMap, context), getParamValue('defaultValue', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$6 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Max': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [max(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Mean': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [mean(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Min': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [min(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Sum': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [sum(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'All': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [all(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Any': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [any(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'ArgMax': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ return [argMax(getParamValue('x', node, tensorMap, context), axis)];
+ }
+ case 'ArgMin': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ return [argMin(getParamValue('x', node, tensorMap, context), axis)];
+ }
+ case 'Prod': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var keepDims = getParamValue('keepDims', node, tensorMap, context);
+ return [prod(getParamValue('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Cumsum': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var exclusive = getParamValue('exclusive', node, tensorMap, context);
+ var reverse = getParamValue('reverse', node, tensorMap, context);
+ return [cumsum(getParamValue('x', node, tensorMap, context), axis, exclusive, reverse)];
+ }
+ case 'Bincount':
+ var x = getParamValue('x', node, tensorMap, context);
+ var weights = getParamValue('weights', node, tensorMap, context);
+ var size = getParamValue('size', node, tensorMap, context);
+ return [bincount(x, weights, size)];
+ case 'DenseBincount': {
+ var x_1 = getParamValue('x', node, tensorMap, context);
+ var weights_1 = getParamValue('weights', node, tensorMap, context);
+ var size_1 = getParamValue('size', node, tensorMap, context);
+ var binaryOutput = getParamValue('binaryOutput', node, tensorMap, context);
+ return [denseBincount(x_1, weights_1, size_1, binaryOutput)];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$5 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'ConcatV2':
+ case 'Concat': {
+ var n = getParamValue('n', node, tensorMap, context);
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var inputs = getParamValue('tensors', node, tensorMap, context);
+ inputs = inputs.slice(0, n);
+ return [concat(inputs, axis)];
+ }
+ case 'Gather': {
+ var input = getParamValue('x', node, tensorMap, context);
+ var indices = getParamValue('indices', node, tensorMap, context);
+ return [gather(input, cast(indices, 'int32'), 0)];
+ }
+ case 'GatherV2': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var batchDims = getParamValue('batchDims', node, tensorMap, context);
+ var input = getParamValue('x', node, tensorMap, context);
+ var indices = getParamValue('indices', node, tensorMap, context);
+ return [gather(input, cast(indices, 'int32'), axis, batchDims)];
+ }
+ case 'Reverse': {
+ var dims = getParamValue('dims', node, tensorMap, context);
+ var axis = [];
+ for (var i = 0; i < dims.length; i++) {
+ if (dims[i]) {
+ axis.push(i);
+ }
+ }
+ var input = getParamValue('x', node, tensorMap, context);
+ return [reverse(input, axis)];
+ }
+ case 'ReverseV2': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var input = getParamValue('x', node, tensorMap, context);
+ return [reverse(input, axis)];
+ }
+ case 'Slice': {
+ // tslint:disable-next-line:no-any
+ var begin = getParamValue('begin', node, tensorMap, context);
+ // tslint:disable-next-line:no-any
+ var size = getParamValue('size', node, tensorMap, context);
+ return [slice(getParamValue('x', node, tensorMap, context), begin, size)];
+ }
+ case 'StridedSlice': {
+ var begin = getParamValue('begin', node, tensorMap, context);
+ var end = getParamValue('end', node, tensorMap, context);
+ var strides = getParamValue('strides', node, tensorMap, context);
+ var beginMask = getParamValue('beginMask', node, tensorMap, context);
+ var endMask = getParamValue('endMask', node, tensorMap, context);
+ var ellipsisMask = getParamValue('ellipsisMask', node, tensorMap, context);
+ var newAxisMask = getParamValue('newAxisMask', node, tensorMap, context);
+ var shrinkAxisMask = getParamValue('shrinkAxisMask', node, tensorMap, context);
+ var tensor = getParamValue('x', node, tensorMap, context);
+ return [stridedSlice(tensor, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask)];
+ }
+ case 'Pack': {
+ return tfc.tidy(function () {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var tensors = getParamValue('tensors', node, tensorMap, context);
+ // Reshape the tensors to the first tensor's shape if they don't
+ // match.
+ var shape = tensors[0].shape;
+ var squeezedShape = squeeze(tensors[0]).shape;
+ var mapped = tensors.map(function (tensor) {
+ var sameShape = tfc.util.arraysEqual(tensor.shape, shape);
+ if (!sameShape &&
+ !tfc.util.arraysEqual(squeeze(tensor).shape, squeezedShape)) {
+ throw new Error('the input tensors shape does not match');
+ }
+ return sameShape ? tensor : reshape(tensor, shape);
+ });
+ return [stack(mapped, axis)];
+ });
+ }
+ case 'Unpack': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var tensor = getParamValue('tensor', node, tensorMap, context);
+ return unstack(tensor, axis);
+ }
+ case 'Tile': {
+ var reps = getParamValue('reps', node, tensorMap, context);
+ return [tile(getParamValue('x', node, tensorMap, context), reps)];
+ }
+ case 'Split':
+ case 'SplitV': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ var numOrSizeSplits = getParamValue('numOrSizeSplits', node, tensorMap, context);
+ var tensor = getParamValue('x', node, tensorMap, context);
+ return split$1(tensor, numOrSizeSplits, axis);
+ }
+ case 'ScatterNd': {
+ var indices = getParamValue('indices', node, tensorMap, context);
+ var values = getParamValue('values', node, tensorMap, context);
+ var shape = getParamValue('shape', node, tensorMap, context);
+ return [scatterND(indices, values, shape)];
+ }
+ case 'GatherNd': {
+ var x = getParamValue('x', node, tensorMap, context);
+ var indices = getParamValue('indices', node, tensorMap, context);
+ return [gatherND(x, indices)];
+ }
+ case 'SparseToDense': {
+ var indices = getParamValue('sparseIndices', node, tensorMap, context);
+ var shape = getParamValue('outputShape', node, tensorMap, context);
+ var sparseValues = getParamValue('sparseValues', node, tensorMap, context);
+ var defaultValue = getParamValue('defaultValue', node, tensorMap, context);
+ return [sparseToDense(indices, sparseValues, shape, sparseValues.dtype === defaultValue.dtype ?
+ defaultValue :
+ cast(defaultValue, sparseValues.dtype))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$4 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'SparseFillEmptyRows': {
+ var _a = sparse.sparseFillEmptyRows(getParamValue('indices', node, tensorMap, context), getParamValue('values', node, tensorMap, context), getParamValue('denseShape', node, tensorMap, context), getParamValue('defaultValue', node, tensorMap, context)), outputIndices = _a.outputIndices, outputValues = _a.outputValues, emptyRowIndicator = _a.emptyRowIndicator, reverseIndexMap = _a.reverseIndexMap;
+ return [
+ outputIndices, outputValues, emptyRowIndicator, reverseIndexMap
+ ];
+ }
+ case 'SparseReshape': {
+ var _b = sparse.sparseReshape(getParamValue('inputIndices', node, tensorMap, context), getParamValue('inputShape', node, tensorMap, context), getParamValue('newShape', node, tensorMap, context)), outputIndices = _b.outputIndices, outputShape = _b.outputShape;
+ return [outputIndices, outputShape];
+ }
+ case 'SparseSegmentMean': {
+ var outputData = sparse.sparseSegmentMean(getParamValue('data', node, tensorMap, context), getParamValue('indices', node, tensorMap, context), getParamValue('segmentIds', node, tensorMap, context));
+ return [outputData];
+ }
+ case 'SparseSegmentSum': {
+ var outputData = sparse.sparseSegmentSum(getParamValue('data', node, tensorMap, context), getParamValue('indices', node, tensorMap, context), getParamValue('segmentIds', node, tensorMap, context));
+ return [outputData];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$3 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'FFT': {
+ return [fft(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'IFFT': {
+ return [ifft(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'RFFT': {
+ return [rfft(getParamValue('x', node, tensorMap, context))];
+ }
+ case 'IRFFT': {
+ return [irfft(getParamValue('x', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$2 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'StringNGrams': {
+ var _a = string.stringNGrams(getParamValue('data', node, tensorMap, context), getParamValue('dataSplits', node, tensorMap, context), getParamValue('separator', node, tensorMap, context), getParamValue('nGramWidths', node, tensorMap, context), getParamValue('leftPad', node, tensorMap, context), getParamValue('rightPad', node, tensorMap, context), getParamValue('padWidth', node, tensorMap, context), getParamValue('preserveShortSequences', node, tensorMap, context)), nGrams = _a.nGrams, nGramsSplits = _a.nGramsSplits;
+ return [nGrams, nGramsSplits];
+ }
+ case 'StringSplit': {
+ var _b = string.stringSplit(getParamValue('input', node, tensorMap, context), getParamValue('delimiter', node, tensorMap, context), getParamValue('skipEmpty', node, tensorMap, context)), indices = _b.indices, values = _b.values, shape = _b.shape;
+ return [indices, values, shape];
+ }
+ case 'StringToHashBucketFast': {
+ var output = string.stringToHashBucketFast(getParamValue('input', node, tensorMap, context), getParamValue('numBuckets', node, tensorMap, context));
+ return [output];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var executeOp$1 = function (node, tensorMap, context) {
+ switch (node.op) {
+ case 'Cast': {
+ return [cast(getParamValue('x', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))];
+ }
+ case 'ExpandDims': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ return [expandDims(getParamValue('x', node, tensorMap, context), axis)];
+ }
+ case 'Squeeze': {
+ var axis = getParamValue('axis', node, tensorMap, context);
+ return [squeeze(getParamValue('x', node, tensorMap, context), axis)];
+ }
+ case 'Reshape': {
+ return [reshape(getParamValue('x', node, tensorMap, context), getParamValue('shape', node, tensorMap, context))];
+ }
+ case 'MirrorPad': {
+ return [mirrorPad(getParamValue('x', node, tensorMap, context), getParamValue('padding', node, tensorMap, context), getParamValue('mode', node, tensorMap, context))];
+ }
+ case 'PadV2':
+ case 'Pad': {
+ return [pad(getParamValue('x', node, tensorMap, context), getParamValue('padding', node, tensorMap, context), getParamValue('constantValue', node, tensorMap, context))];
+ }
+ case 'SpaceToBatchND': {
+ var blockShape = getParamValue('blockShape', node, tensorMap, context);
+ var paddings = getParamValue('paddings', node, tensorMap, context);
+ return [spaceToBatchND(getParamValue('x', node, tensorMap, context), blockShape, paddings)];
+ }
+ case 'BatchToSpaceND': {
+ var blockShape = getParamValue('blockShape', node, tensorMap, context);
+ var crops = getParamValue('crops', node, tensorMap, context);
+ return [batchToSpaceND(getParamValue('x', node, tensorMap, context), blockShape, crops)];
+ }
+ case 'DepthToSpace': {
+ var blockSize = getParamValue('blockSize', node, tensorMap, context);
+ var dataFormat = getParamValue('dataFormat', node, tensorMap, context).toUpperCase();
+ return [depthToSpace(getParamValue('x', node, tensorMap, context), blockSize, dataFormat)];
+ }
+ case 'BroadcastTo': {
+ return [broadcastTo(getParamValue('x', node, tensorMap, context), getParamValue('shape', node, tensorMap, context))];
+ }
+ case 'BroadcastArgs': {
+ return [broadcastArgs(getParamValue('s0', node, tensorMap, context), getParamValue('s1', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError("Node type " + node.op + " is not implemented");
+ }
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Executes the op defined by the node object.
+ * @param node
+ * @param tensorMap contains tensors for executed nodes and weights
+ * @param context contains tensors and information for running the current node.
+ * @param resourceManager Optional. Contains global resources of the model.
+ */
+ function executeOp(node, tensorMap, context, resourceManager) {
+ var value = (function (node, tensorMap, context) {
+ switch (node.category) {
+ case 'arithmetic':
+ return tfc__namespace.tidy(function () { return executeOp$j(node, tensorMap, context); });
+ case 'basic_math':
+ return tfc__namespace.tidy(function () { return executeOp$i(node, tensorMap, context); });
+ case 'control':
+ return executeOp$h(node, tensorMap, context);
+ case 'convolution':
+ return tfc__namespace.tidy(function () { return executeOp$g(node, tensorMap, context); });
+ case 'creation':
+ return tfc__namespace.tidy(function () { return executeOp$f(node, tensorMap, context); });
+ case 'dynamic':
+ return executeOp$e(node, tensorMap, context);
+ case 'evaluation':
+ return tfc__namespace.tidy(function () { return executeOp$d(node, tensorMap, context); });
+ case 'image':
+ return tfc__namespace.tidy(function () { return executeOp$a(node, tensorMap, context); });
+ case 'graph':
+ return tfc__namespace.tidy(function () { return executeOp$c(node, tensorMap, context); });
+ case 'logical':
+ return tfc__namespace.tidy(function () { return executeOp$9(node, tensorMap, context); });
+ case 'matrices':
+ return tfc__namespace.tidy(function () { return executeOp$8(node, tensorMap, context); });
+ case 'normalization':
+ return tfc__namespace.tidy(function () { return executeOp$7(node, tensorMap, context); });
+ case 'reduction':
+ return tfc__namespace.tidy(function () { return executeOp$6(node, tensorMap, context); });
+ case 'slice_join':
+ return tfc__namespace.tidy(function () { return executeOp$5(node, tensorMap, context); });
+ case 'sparse':
+ return tfc__namespace.tidy(function () { return executeOp$4(node, tensorMap, context); });
+ case 'spectral':
+ return tfc__namespace.tidy(function () { return executeOp$3(node, tensorMap, context); });
+ case 'string':
+ return tfc__namespace.tidy(function () { return executeOp$2(node, tensorMap, context); });
+ case 'transformation':
+ return tfc__namespace.tidy(function () { return executeOp$1(node, tensorMap, context); });
+ case 'hash_table':
+ return executeOp$b(node, tensorMap, context, resourceManager);
+ case 'custom':
+ var opMapper = getRegisteredOp(node.op);
+ if (opMapper && opMapper.customExecutor) {
+ return opMapper.customExecutor(new NodeValueImpl(node, tensorMap, context));
+ }
+ else {
+ throw TypeError("Custom op " + node.op + " is not registered.");
+ }
+ default:
+ throw TypeError("Unknown op '" + node.op + "'. File an issue at " +
+ "https://github.com/tensorflow/tfjs/issues so we can add it" +
+ ", or register a custom execution with tf.registerOp()");
+ }
+ })(node, tensorMap, context);
+ if (tfc__namespace.util.isPromise(value)) {
+ return value.then(function (data) { return [].concat(data); });
+ }
+ return [].concat(value);
+ }
+
+ /**
+ * ExecutionContext captures the runtime environment of the node. It keeps
+ * track of the current frame and iteration for the control flow ops.
+ *
+ * For example, typical Dynamic RNN model may contain loops, for which
+ * TensorFlow will generate graphs with Enter/Exit nodes to control the
+ * current execution frame, and NextIteration Nodes for iteration id increment.
+ * For model with branch logic, TensorFLow will generate Switch/Merge ops.
+ */
+ var ExecutionContext = /** @class */ (function () {
+ function ExecutionContext(weightMap, tensorArrayMap, tensorListMap, functionMap) {
+ if (weightMap === void 0) { weightMap = {}; }
+ if (tensorArrayMap === void 0) { tensorArrayMap = {}; }
+ if (tensorListMap === void 0) { tensorListMap = {}; }
+ if (functionMap === void 0) { functionMap = {}; }
+ this.weightMap = weightMap;
+ this.tensorArrayMap = tensorArrayMap;
+ this.tensorListMap = tensorListMap;
+ this.functionMap = functionMap;
+ this.rootContext = { id: 0, frameName: '', iterationId: 0 };
+ this.contexts = [this.rootContext];
+ this.lastId = 0;
+ this.generateCurrentContextIds();
+ }
+ ExecutionContext.prototype.newFrame = function (id, frameName) {
+ return { id: id, frameName: frameName, iterationId: 0 };
+ };
+ Object.defineProperty(ExecutionContext.prototype, "currentContext", {
+ get: function () {
+ return this.contexts;
+ },
+ /**
+ * Set the current context
+ * @param contexts: ExecutionContextInfo[] the current path of execution
+ * frames
+ */
+ set: function (contexts) {
+ if (this.contexts !== contexts) {
+ this.contexts = contexts;
+ this.generateCurrentContextIds();
+ }
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(ExecutionContext.prototype, "currentContextId", {
+ /**
+ * Returns the current context in string format.
+ */
+ get: function () {
+ return this._currentContextIds[0];
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(ExecutionContext.prototype, "currentContextIds", {
+ /**
+ * Returns the current context and all parent contexts in string format.
+ * This allow access to the nodes in the current and parent frames.
+ */
+ get: function () {
+ return this._currentContextIds;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ ExecutionContext.prototype.generateCurrentContextIds = function () {
+ var names = [];
+ for (var i = 0; i < this.contexts.length - 1; i++) {
+ var contexts = this.contexts.slice(0, this.contexts.length - i);
+ names.push(this.contextIdforContexts(contexts));
+ }
+ names.push('');
+ this._currentContextIds = names;
+ };
+ ExecutionContext.prototype.contextIdforContexts = function (contexts) {
+ return contexts ?
+ contexts
+ .map(function (context) { return (context.id === 0 && context.iterationId === 0) ?
+ '' :
+ context.frameName + "-" + context.iterationId; })
+ .join('/') :
+ '';
+ };
+ /**
+ * Enter a new frame, a new context is pushed on the current context list.
+ * @param frameId new frame id
+ */
+ ExecutionContext.prototype.enterFrame = function (frameId) {
+ if (this.contexts) {
+ this.lastId++;
+ this.contexts = this.contexts.slice();
+ this.contexts.push(this.newFrame(this.lastId, frameId));
+ this._currentContextIds.unshift(this.contextIdforContexts(this.contexts));
+ }
+ };
+ /**
+ * Exit the current frame, the last context is removed from the current
+ * context list.
+ */
+ ExecutionContext.prototype.exitFrame = function () {
+ if (this.contexts && this.contexts.length > 1) {
+ this.contexts = this.contexts.slice();
+ this.contexts.splice(-1);
+ this.currentContextIds.shift();
+ }
+ else {
+ throw new Error('Cannot exit frame, the context is empty');
+ }
+ };
+ /**
+ * Enter the next iteration of a loop, the iteration id of last context is
+ * increased.
+ */
+ ExecutionContext.prototype.nextIteration = function () {
+ if (this.contexts && this.contexts.length > 0) {
+ this.contexts = this.contexts.slice();
+ this.lastId++;
+ var context = Object.assign({}, this.contexts[this.contexts.length - 1]);
+ context.iterationId += 1;
+ context.id = this.lastId;
+ this.contexts.splice(-1, 1, context);
+ this._currentContextIds.splice(0, 1, this.contextIdforContexts(this.contexts));
+ }
+ else {
+ throw new Error('Cannot increase frame iteration, the context is empty');
+ }
+ };
+ ExecutionContext.prototype.getWeight = function (name) {
+ return this.weightMap[name];
+ };
+ ExecutionContext.prototype.addTensorArray = function (tensorArray) {
+ this.tensorArrayMap[tensorArray.id] = tensorArray;
+ };
+ ExecutionContext.prototype.getTensorArray = function (id) {
+ return this.tensorArrayMap[id];
+ };
+ ExecutionContext.prototype.addTensorList = function (tensorList) {
+ this.tensorListMap[tensorList.id] = tensorList;
+ };
+ ExecutionContext.prototype.getTensorList = function (id) {
+ return this.tensorListMap[id];
+ };
+ ExecutionContext.prototype.dispose = function (keepIds) {
+ for (var key in this.tensorArrayMap) {
+ this.tensorArrayMap[key].clearAndClose(keepIds);
+ }
+ for (var key in this.tensorListMap) {
+ this.tensorListMap[key].clearAndClose(keepIds);
+ }
+ };
+ return ExecutionContext;
+ }());
+
+ /**
+ * Given graph inputs and desired outputs, find the minimal set of nodes
+ * to execute in order to compute the outputs. In addition return other useful
+ * info such:
+ * - Missing inputs needed to compute the output.
+ * - Whether the subgraph contains dynamic ops (control flow, dynamic shape).
+ * - Alternative inputs in order to avoid async (dynamic op) execution.
+ */
+ function getExecutionSubgraph(inputs, outputs, weightMap, initNodes) {
+ var usedNodes = new Set();
+ var missingInputs = [];
+ var dynamicNode = null;
+ var syncInputs = null;
+ // Start with the outputs, going backwards and find all the nodes that are
+ // needed to compute those outputs.
+ var seen = new Set();
+ var inputNodeNames = Object.keys(inputs).map(function (name) { return parseNodeName(name)[0]; });
+ var initNodeNames = [];
+ if (initNodes != null) {
+ initNodeNames = initNodes.map(function (node) { return parseNodeName(node.name)[0]; });
+ }
+ var frontier = __spread(outputs);
+ while (frontier.length > 0) {
+ var node = frontier.pop();
+ if (isControlFlow(node) || isDynamicShape(node) || isHashTable(node)) {
+ if (dynamicNode == null) {
+ dynamicNode = node;
+ syncInputs = dynamicNode.children.map(function (child) { return child.name; })
+ .filter(function (name) { return usedNodes.has(name); });
+ }
+ }
+ usedNodes.add(node.name);
+ // Weights are dead end since we already have their values.
+ if (weightMap[node.name] != null) {
+ continue;
+ }
+ // This node is a dead end since it's one of the user-provided inputs.
+ if (inputNodeNames.indexOf(node.name) !== -1) {
+ continue;
+ }
+ // This node is a dead end since it doesn't have any inputs.
+ if (initNodeNames.indexOf(node.name) !== -1) {
+ continue;
+ }
+ if (node.inputs.length === 0) {
+ missingInputs.push(node.name);
+ continue;
+ }
+ node.inputs.forEach(function (input) {
+ // Don't add to the frontier if it is already there.
+ if (seen.has(input.name)) {
+ return;
+ }
+ seen.add(input.name);
+ frontier.push(input);
+ });
+ }
+ return { inputs: inputs, outputs: outputs, usedNodes: usedNodes, missingInputs: missingInputs, dynamicNode: dynamicNode, syncInputs: syncInputs };
+ }
+ /**
+ * Given the execution info, return a list of nodes in topological order that
+ * need to be executed to compute the output.
+ */
+ function getNodesInTopologicalOrder(graph, weightMap, executionInfo) {
+ var usedNodes = executionInfo.usedNodes, inputs = executionInfo.inputs;
+ var frontier = [];
+ var inputNodes = Object.keys(inputs)
+ .map(function (name) { return parseNodeName(name)[0]; })
+ .map(function (name) { return graph.nodes[name]; });
+ var initNodes = graph.initNodes;
+ inputNodes.forEach(function (input) {
+ if (usedNodes.has(input.name)) {
+ frontier.push(input);
+ }
+ });
+ graph.weights.forEach(function (weight) {
+ if (usedNodes.has(weight.name)) {
+ frontier.push(weight);
+ }
+ });
+ if (initNodes != null) {
+ initNodes.forEach(function (node) {
+ if (usedNodes.has(node.name)) {
+ frontier.push(node);
+ }
+ });
+ }
+ var seen = new Set();
+ var orderedNodes = [];
+ while (frontier.length > 0) {
+ var node = frontier.pop();
+ seen.add(node.name);
+ if (!weightMap[node.name]) {
+ orderedNodes.push(node);
+ }
+ node.children.forEach(function (child) {
+ if (!seen.has(child.name) && usedNodes.has(child.name) &&
+ child.inputs.every(function (input) { return seen.has(input.name); })) {
+ frontier.push(child);
+ }
+ });
+ }
+ return orderedNodes;
+ }
+ var CONTROL_FLOW_OPS = [
+ 'Switch', 'Merge', 'Enter', 'Exit', 'NextIteration', 'StatelessIf',
+ 'StatelessWhile', 'if', 'While'
+ ];
+ var DYNAMIC_SHAPE_OPS = [
+ 'NonMaxSuppressionV2', 'NonMaxSuppressionV3', 'NonMaxSuppressionV5', 'Where'
+ ];
+ var HASH_TABLE_OPS = [
+ 'HashTable', 'HashTableV2', 'LookupTableImport', 'LookupTableImportV2',
+ 'LookupTableFind', 'LookupTableFindV2', 'LookupTableSize', 'LookupTableSizeV2'
+ ];
+ function isControlFlow(node) {
+ return CONTROL_FLOW_OPS.indexOf(node.op) >= 0;
+ }
+ function isDynamicShape(node) {
+ return DYNAMIC_SHAPE_OPS.indexOf(node.op) >= 0;
+ }
+ function isHashTable(node) {
+ return HASH_TABLE_OPS.indexOf(node.op) >= 0;
+ }
+
+ var GraphExecutor = /** @class */ (function () {
+ /**
+ *
+ * @param graph Graph the model or function graph to be executed.
+ * @param parent When building function exector you need to set the parent
+ * executor. Since the weights and function executor maps are set at parant
+ * level, that function executor can access the function maps and weight maps
+ * through the parent.
+ */
+ function GraphExecutor(graph, parent) {
+ var _this = this;
+ this.graph = graph;
+ this.parent = parent;
+ this.compiledMap = new Map();
+ this._weightMap = {};
+ this.SEPERATOR = ',';
+ this._functions = {};
+ this._functionExecutorMap = {};
+ this.intermediateTensors = {};
+ this.keepTensorForDebug = false;
+ this._outputs = graph.outputs;
+ this._inputs = graph.inputs;
+ this._initNodes = graph.initNodes;
+ this._signature = graph.signature;
+ this._functions = graph.functions;
+ // create sub-graph executors
+ if (graph.functions != null) {
+ Object.keys(graph.functions).forEach(function (name) {
+ _this._functionExecutorMap[name] =
+ new GraphExecutor(graph.functions[name], _this);
+ });
+ }
+ }
+ Object.defineProperty(GraphExecutor.prototype, "weightIds", {
+ get: function () {
+ return this.parent ? this.parent.weightIds : this._weightIds;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "functionExecutorMap", {
+ get: function () {
+ return this.parent ? this.parent.functionExecutorMap :
+ this._functionExecutorMap;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "weightMap", {
+ get: function () {
+ return this.parent ? this.parent.weightMap : this._weightMap;
+ },
+ set: function (weightMap) {
+ var weightIds = Object.keys(weightMap).map(function (key) { return weightMap[key].map(function (tensor) { return tensor.id; }); });
+ this._weightIds = [].concat.apply([], __spread(weightIds));
+ this._weightMap = weightMap;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "resourceManager", {
+ /**
+ * Set `ResourceManager` shared by executors of a model.
+ * @param resourceManager: `ResourceManager` of the `GraphModel`.
+ */
+ set: function (resourceManager) {
+ this._resourceManager = resourceManager;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "inputs", {
+ get: function () {
+ return this._inputs.map(function (node) {
+ return {
+ name: node.name,
+ shape: node.attrParams['shape'] ?
+ node.attrParams['shape'].value :
+ undefined,
+ dtype: node.attrParams['dtype'] ?
+ node.attrParams['dtype'].value :
+ undefined
+ };
+ });
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "outputs", {
+ get: function () {
+ return this._outputs.map(function (node) {
+ return {
+ name: node.name,
+ shape: node.attrParams['shape'] ?
+ node.attrParams['shape'].value :
+ undefined,
+ dtype: node.attrParams['dtype'] ?
+ node.attrParams['dtype'].value :
+ undefined
+ };
+ });
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "inputNodes", {
+ get: function () {
+ return this._inputs.map(function (node) { return node.signatureKey || node.name; });
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "outputNodes", {
+ get: function () {
+ return this._outputs.map(function (node) {
+ var name = node.signatureKey || node.name;
+ return node.defaultOutput ? (name + ":" + node.defaultOutput) : name;
+ });
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphExecutor.prototype, "functions", {
+ get: function () {
+ var _this = this;
+ return Object.keys(this._functions).reduce(function (map, key) {
+ map[key] = _this._functions[key].signature;
+ return map;
+ }, {});
+ },
+ enumerable: true,
+ configurable: true
+ });
+ GraphExecutor.prototype.getCompilationKey = function (inputs, outputs) {
+ var sortedInputs = inputs.map(function (node) { return node.name; }).sort();
+ var sortedOutputs = outputs.map(function (node) { return node.name; }).sort();
+ return sortedInputs.join(this.SEPERATOR) + '--' +
+ sortedOutputs.join(this.SEPERATOR);
+ };
+ /**
+ * Compiles the inference graph and returns the minimal set of nodes that are
+ * required for execution, in the correct execution order.
+ */
+ GraphExecutor.prototype.compile = function (inputs, outputs) {
+ var executionInfo = getExecutionSubgraph(inputs, outputs, this.weightMap, this._initNodes);
+ var missingInputs = executionInfo.missingInputs, dynamicNode = executionInfo.dynamicNode, syncInputs = executionInfo.syncInputs;
+ if (dynamicNode != null) {
+ throw new Error("This execution contains the node '" + dynamicNode.name + "', which has " +
+ ("the dynamic op '" + dynamicNode.op + "'. Please use ") +
+ "model.executeAsync() instead. Alternatively, to avoid the " +
+ ("dynamic ops, specify the inputs [" + syncInputs + "]"));
+ }
+ if (missingInputs.length > 0) {
+ var outNames = outputs.map(function (n) { return n.name; });
+ var inNames = Object.keys(inputs);
+ throw new Error("Cannot compute the outputs [" + outNames + "] from the provided inputs " +
+ ("[" + inNames + "]. Missing the following inputs: [" + missingInputs + "]"));
+ }
+ return getNodesInTopologicalOrder(this.graph, this.weightMap, executionInfo);
+ };
+ /**
+ * Executes the inference for given input tensors.
+ * @param inputs Tensor map for the model inputs, keyed by the input node
+ * names.
+ * @param outputs Optional. output node name from the Tensorflow model, if
+ * no outputs are specified, the default outputs of the model would be used.
+ * You can inspect intermediate nodes of the model by adding them to the
+ * outputs array.
+ */
+ GraphExecutor.prototype.execute = function (inputs, outputs) {
+ var _this = this;
+ inputs = this.mapInputs(inputs);
+ var names = Object.keys(inputs).sort();
+ this.checkInputs(inputs);
+ this.checkInputShapeAndType(inputs);
+ outputs = this.mapOutputs(outputs);
+ this.checkOutputs(outputs);
+ var inputNodes = names.map(function (name) { return _this.graph.nodes[parseNodeName(name)[0]]; });
+ var outputNodeNames = outputs.map(function (name) { return parseNodeName(name)[0]; });
+ var outputNodes = outputNodeNames.map(function (name) { return _this.graph.nodes[name]; });
+ this.resetIntermediateTensors();
+ // If no outputs are specified, then use the default outputs of the model.
+ if (outputNodes.length === 0) {
+ outputNodes = this._outputs;
+ }
+ var compilationKey = this.getCompilationKey(inputNodes, outputNodes);
+ // Do nothing if the compiled graph cache contains the input.
+ var orderedNodes = this.compiledMap.get(compilationKey);
+ if (orderedNodes == null) {
+ orderedNodes = this.compile(inputs, outputNodes);
+ this.compiledMap.set(compilationKey, orderedNodes);
+ }
+ var tensorArrayMap = {};
+ var tensorListMap = {};
+ return tfc.tidy(function () {
+ var context = new ExecutionContext(_this.weightMap, tensorArrayMap, tensorListMap, _this.functionExecutorMap);
+ var tensorsMap = Object.assign({}, _this.weightMap);
+ Object.keys(inputs).forEach(function (name) {
+ var _a = __read(parseNodeName(name), 2), nodeName = _a[0], index = _a[1];
+ var tensors = [];
+ tensors[index] = inputs[name];
+ tensorsMap[nodeName] = tensors;
+ });
+ var tensorsToKeep = _this.getFrozenTensorIds(tensorsMap);
+ var intermediateTensorConsumerCount = {};
+ for (var i = 0; i < orderedNodes.length; i++) {
+ var node = orderedNodes[i];
+ if (!tensorsMap[node.name]) {
+ var tensors = executeOp(node, tensorsMap, context, _this._resourceManager);
+ if (tfc.util.isPromise(tensors)) {
+ throw new Error("The execution of the op '" + node.op + "' returned a promise. " +
+ "Please use model.executeAsync() instead.");
+ }
+ tensorsMap[node.name] = tensors;
+ _this.checkTensorForDisposal(node.name, node, tensorsMap, context, tensorsToKeep, outputNodeNames, intermediateTensorConsumerCount);
+ }
+ }
+ // dispose the context for the root executor
+ if (_this.parent == null) {
+ context.dispose(tensorsToKeep);
+ }
+ return outputs.map(function (name) { return getTensor(name, tensorsMap, context); });
+ });
+ };
+ GraphExecutor.prototype.getFrozenTensorIds = function (tensorMap) {
+ var ids = [].concat.apply([], Object.keys(tensorMap)
+ .map(function (key) { return tensorMap[key]; })
+ .map(function (tensors) { return tensors.map(function (tensor) { return tensor.id; }); }));
+ return new Set(ids);
+ };
+ GraphExecutor.prototype.checkTensorForDisposal = function (nodeName, node, tensorMap, context, tensorsToKeep, outputNames, intermediateTensorConsumerCount) {
+ var _this = this;
+ // Skip output nodes and any control flow nodes, since its dependency is
+ // tricky to track correctly.
+ if (node.category === 'control' || outputNames.indexOf(nodeName) !== -1) {
+ return;
+ }
+ tensorMap[nodeName].forEach(function (tensor) {
+ if (tensor != null) {
+ intermediateTensorConsumerCount[tensor.id] =
+ (intermediateTensorConsumerCount[tensor.id] || 0) +
+ node.children.length;
+ }
+ });
+ node.inputs.forEach(function (input) {
+ // Skip any control flow nodes, since its dependency is tricky to track
+ // correctly.
+ if (input.category !== 'control') {
+ var tensors = getTensorsForCurrentContenxt(input.name, tensorMap, context);
+ if (tensors != null) {
+ tensors.forEach(function (tensor) {
+ if (tensor && !tensor.kept && !tensorsToKeep.has(tensor.id)) {
+ var count = intermediateTensorConsumerCount[tensor.id];
+ if (count === 1) {
+ if (!_this.keepTensorForDebug) {
+ tensor.dispose();
+ }
+ else {
+ var _a = __read(getNodeNameAndIndex(node.name, context), 2), nodeName_1 = _a[0], index = _a[1];
+ if (_this.intermediateTensors[nodeName_1]) {
+ _this.intermediateTensors[nodeName_1][index] = tensor;
+ }
+ else {
+ _this.intermediateTensors[nodeName_1] = [];
+ _this.intermediateTensors[nodeName_1][index] = tensor;
+ }
+ }
+ delete intermediateTensorConsumerCount[tensor.id];
+ }
+ else if (count != null) {
+ // only intermediate nodes has count set, inputs and weights are
+ // not.
+ intermediateTensorConsumerCount[tensor.id]--;
+ }
+ }
+ });
+ }
+ }
+ });
+ };
+ /**
+ * Executes the inference for given input tensors in Async fashion.
+ * @param inputs Tensor map for the model inputs, keyed by the input node
+ * names.
+ * @param outputs output node name from the Tensorflow model, if no outputs
+ * are specified, the default outputs of the model would be used. You can
+ * inspect intermediate nodes of the model by adding them to the outputs
+ * array.
+ */
+ GraphExecutor.prototype.executeAsync = function (inputs, outputs) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ return [2 /*return*/, this._executeAsync(inputs, outputs)];
+ });
+ });
+ };
+ GraphExecutor.prototype.disposeIntermediateTensors = function () {
+ var _this = this;
+ if (!this.intermediateTensors) {
+ return;
+ }
+ Object.keys(this.intermediateTensors)
+ .forEach(function (key) { return _this.intermediateTensors[key].forEach(function (tensor) { return tensor.dispose(); }); });
+ this.disposeTensorsMap();
+ };
+ GraphExecutor.prototype.disposeTensorsMap = function () {
+ var _this = this;
+ if (!this.tensorsMap) {
+ return;
+ }
+ Object.keys(this.tensorsMap).forEach(function (key) {
+ var tensorArray = _this.tensorsMap[key];
+ tensorArray.forEach(function (tensor) {
+ if (tensor && !tensor.kept && !tensor.isDisposed &&
+ !_this.keepIds.has(tensor.id)) {
+ tensor.dispose();
+ }
+ });
+ });
+ };
+ GraphExecutor.prototype.getIntermediateTensors = function () {
+ return this.tensorsMap;
+ };
+ GraphExecutor.prototype.resetIntermediateTensors = function () {
+ for (var key in this.intermediateTensors) {
+ this.intermediateTensors[key].forEach(function (tensor) { return tensor.dispose(); });
+ delete this.intermediateTensors[key];
+ }
+ };
+ /**
+ * Executes the inference for given input tensors in Async fashion.
+ * @param inputs Tensor map for the model inputs, keyed by the input node
+ * names.
+ * @param outputs Optional. output node name from the Tensorflow model,
+ * if no outputs are specified, the default outputs of the model would be
+ * used. You can inspect intermediate nodes of the model by adding them to the
+ * outputs array.
+ * @param isFunctionExecution Optional. Flag for executing a function.
+ * @param tensorArrayMap Optional, global TensorArray map by id. Used for
+ * function execution.
+ * @param tensorArrayMap Optinal global TensorList map by id. Used for
+ * function execution.
+ */
+ GraphExecutor.prototype._executeAsync = function (inputs, outputs, isFunctionExecution, tensorArrayMap, tensorListMap) {
+ if (isFunctionExecution === void 0) { isFunctionExecution = false; }
+ if (tensorArrayMap === void 0) { tensorArrayMap = {}; }
+ if (tensorListMap === void 0) { tensorListMap = {}; }
+ return __awaiter(this, void 0, void 0, function () {
+ var context, _a, results, outputIds, inputIds;
+ var _this = this;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ if (!isFunctionExecution) {
+ inputs = this.mapInputs(inputs);
+ this.checkInputs(inputs);
+ this.checkInputShapeAndType(inputs);
+ outputs = this.mapOutputs(outputs);
+ this.checkOutputs(outputs);
+ }
+ // For model debug.
+ try {
+ this.keepTensorForDebug = tfc.env().getBool('KEEP_INTERMEDIATE_TENSORS');
+ }
+ catch (e) {
+ console.warn(e.message);
+ }
+ this.resetIntermediateTensors();
+ context = new ExecutionContext(this.weightMap, tensorArrayMap, tensorListMap, this.functionExecutorMap);
+ // Graph with control flow op requires runtime evaluation of the execution
+ // order, while without control flow the execution order is pre-determined
+ // in the compile method.
+ _a = this;
+ return [4 /*yield*/, this.executeWithControlFlow(inputs, context, outputs, isFunctionExecution)];
+ case 1:
+ // Graph with control flow op requires runtime evaluation of the execution
+ // order, while without control flow the execution order is pre-determined
+ // in the compile method.
+ _a.tensorsMap = _b.sent();
+ results = outputs.map(function (name) { return getTensor(name, _this.tensorsMap, context); });
+ outputIds = results.map(function (t) { return t.id; });
+ inputIds = Object.keys(inputs).map(function (name) { return inputs[name].id; });
+ this.keepIds =
+ new Set(__spread(outputIds, inputIds, this.weightIds));
+ if (!this.keepTensorForDebug) {
+ this.disposeTensorsMap();
+ }
+ // dispose the context for the root executor
+ if (this.parent == null) {
+ context.dispose(this.keepIds);
+ }
+ return [2 /*return*/, results];
+ }
+ });
+ });
+ };
+ GraphExecutor.prototype.executeFunctionAsync = function (inputs, tensorArrayMap, tensorListMap) {
+ return __awaiter(this, void 0, void 0, function () {
+ var mappedInputs;
+ var _this = this;
+ return __generator(this, function (_a) {
+ mappedInputs = inputs.reduce(function (map, tensor, index) {
+ map[_this.inputs[index].name] = tensor;
+ return map;
+ }, {});
+ return [2 /*return*/, this._executeAsync(mappedInputs, this.outputNodes, true, tensorArrayMap, tensorListMap)];
+ });
+ });
+ };
+ /**
+ * When there are control flow nodes in the graph, the graph execution use
+ * ExecutionContext to keep track of the frames and loop iterators.
+ * @param inputs placeholder tensors for the graph.
+ * @param context the execution context object for current execution.
+ * @param outputNames Optional. output node name from the Tensorflow model,
+ * if no outputs are specified, the default outputs of the model would be
+ * used. You can inspect intermediate nodes of the model by adding them to the
+ * outputs array.
+ * @param isFunctionExecution Flag for executing a function.
+ */
+ GraphExecutor.prototype.executeWithControlFlow = function (inputs, context, outputNames, isFunctionExecution) {
+ return __awaiter(this, void 0, void 0, function () {
+ var names, inputNodes, outputNodeNames, outputNodes, _a, usedNodes, missingInputs, dynamicNode, syncInputs, stack, tensorsMap, intermediateTensorConsumerCount, tensorsToKeep, added, promises, missingOutputs, alternativeMsg;
+ var _this = this;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ names = Object.keys(inputs);
+ inputNodes = names.map(function (name) { return _this.graph.nodes[parseNodeName(name)[0]]; });
+ outputNodeNames = outputNames.map(function (name) { return parseNodeName(name)[0]; });
+ outputNodes = outputNodeNames.map(function (name) { return _this.graph.nodes[name]; });
+ // If no outputs are specified, then use the default outputs of the model.
+ if (outputNodes.length === 0) {
+ outputNodes = this._outputs;
+ }
+ _a = getExecutionSubgraph(inputs, outputNodes, this.weightMap, this._initNodes), usedNodes = _a.usedNodes, missingInputs = _a.missingInputs, dynamicNode = _a.dynamicNode, syncInputs = _a.syncInputs;
+ stack = __spread(inputNodes, this.graph.weights, (this._initNodes || [])).map(function (node) {
+ return { node: node, contexts: context.currentContext };
+ });
+ tensorsMap = Object.assign({}, this.weightMap);
+ Object.keys(inputs).forEach(function (name) {
+ var _a = __read(parseNodeName(name), 2), nodeName = _a[0], index = _a[1];
+ var tensors = [];
+ tensors[index] = inputs[name];
+ tensorsMap[nodeName] = tensors;
+ });
+ intermediateTensorConsumerCount = {};
+ tensorsToKeep = this.getFrozenTensorIds(tensorsMap);
+ added = {};
+ _b.label = 1;
+ case 1:
+ if (!(stack.length > 0)) return [3 /*break*/, 3];
+ promises = this.processStack(inputNodes, stack, context, tensorsMap, added, tensorsToKeep, outputNodeNames, intermediateTensorConsumerCount, usedNodes);
+ return [4 /*yield*/, Promise.all(promises)];
+ case 2:
+ _b.sent();
+ return [3 /*break*/, 1];
+ case 3:
+ if (dynamicNode == null && !isFunctionExecution) {
+ console.warn("This model execution did not contain any nodes with control flow " +
+ "or dynamic output shapes. You can use model.execute() instead.");
+ }
+ missingOutputs = outputNodes
+ .filter(function (node) { return !isControlFlow(node) &&
+ !getTensor(node.name, tensorsMap, context); })
+ .map(function (node) { return node.name; });
+ if (missingOutputs.length > 0) {
+ alternativeMsg = '';
+ if (dynamicNode != null) {
+ alternativeMsg =
+ "Alternatively, to avoid the dynamic ops, use model.execute() " +
+ ("and specify the inputs [" + syncInputs + "]");
+ }
+ throw new Error("Cannot compute the outputs [" + missingOutputs + "] from the provided " +
+ ("inputs [" + names + "]. Consider providing the following inputs: ") +
+ ("[" + missingInputs + "]. " + alternativeMsg));
+ }
+ return [2 /*return*/, tensorsMap];
+ }
+ });
+ });
+ };
+ GraphExecutor.prototype.processStack = function (inputNodes, stack, context, tensorMap, added, tensorsToKeep, outputNames, intermediateTensorConsumerCount, usedNodes) {
+ var _this = this;
+ var promises = [];
+ var _loop_1 = function () {
+ var _a, _b;
+ var item = stack.pop();
+ context.currentContext = item.contexts;
+ var nodeName = '';
+ // The tensor of the Enter op with isConstant set should be set
+ // in the parent scope, so it will be available as constant for the
+ // whole loop.
+ if (item.node.op === 'Enter' &&
+ getParamValue('isConstant', item.node, tensorMap, context)) {
+ _a = __read(getNodeNameAndIndex(item.node.name, context), 1), nodeName = _a[0];
+ }
+ // only process nodes that are not in the tensorMap yet, this include
+ // inputNodes and internal initNodes.
+ if (tensorMap[item.node.name] == null) {
+ var tensors = executeOp(item.node, tensorMap, context, this_1._resourceManager);
+ if (!nodeName) {
+ _b = __read(getNodeNameAndIndex(item.node.name, context), 1), nodeName = _b[0];
+ }
+ var currentContext_1 = context.currentContext;
+ if (tfc.util.isPromise(tensors)) {
+ promises.push(tensors.then(function (t) {
+ tensorMap[nodeName] = t;
+ context.currentContext = currentContext_1;
+ _this.checkTensorForDisposal(nodeName, item.node, tensorMap, context, tensorsToKeep, outputNames, intermediateTensorConsumerCount);
+ _this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes);
+ return t;
+ }));
+ }
+ else {
+ tensorMap[nodeName] = tensors;
+ this_1.checkTensorForDisposal(nodeName, item.node, tensorMap, context, tensorsToKeep, outputNames, intermediateTensorConsumerCount);
+ this_1.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes);
+ }
+ }
+ else {
+ this_1.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes);
+ }
+ };
+ var this_1 = this;
+ while (stack.length > 0) {
+ _loop_1();
+ }
+ return promises;
+ };
+ GraphExecutor.prototype.processChildNodes = function (node, stack, context, tensorMap, added, usedNodes) {
+ node.children.forEach(function (childNode) {
+ var _a = __read(getNodeNameAndIndex(childNode.name, context), 1), nodeName = _a[0];
+ if (added[nodeName] || !usedNodes.has(childNode.name)) {
+ return;
+ }
+ // Merge op can be pushed if any of its inputs has value.
+ if (childNode.op === 'Merge') {
+ if (childNode.inputNames.some(function (name) {
+ return !!getTensor(name, tensorMap, context);
+ })) {
+ added[nodeName] = true;
+ stack.push({ contexts: context.currentContext, node: childNode });
+ }
+ }
+ else // Otherwise all inputs must to have value.
+ if (childNode.inputNames.every(function (name) {
+ return !!getTensor(name, tensorMap, context);
+ })) {
+ added[nodeName] = true;
+ stack.push({ contexts: context.currentContext, node: childNode });
+ }
+ });
+ };
+ /**
+ * Releases the memory used by the weight tensors.
+ */
+ GraphExecutor.prototype.dispose = function () {
+ var _this = this;
+ Object.keys(this.weightMap)
+ .forEach(function (key) { return _this.weightMap[key].forEach(function (tensor) { return tensor.dispose(); }); });
+ };
+ GraphExecutor.prototype.checkInputShapeAndType = function (inputs) {
+ var _this = this;
+ Object.keys(inputs).forEach(function (name) {
+ var input = inputs[name];
+ var _a = __read(parseNodeName(name), 1), nodeName = _a[0];
+ var node = _this.graph.nodes[nodeName];
+ if (node.attrParams['shape'] && node.attrParams['shape'].value) {
+ var shape_1 = node.attrParams['shape'].value;
+ var match = shape_1.length === input.shape.length &&
+ input.shape.every(function (dim, index) { return shape_1[index] === -1 || shape_1[index] === dim; });
+ tfc.util.assert(match, function () { return "The shape of dict['" + node.name + "'] provided in " +
+ ("model.execute(dict) must be [" + shape_1 + "], but was ") +
+ ("[" + input.shape + "]"); });
+ }
+ if (node.attrParams['dtype'] && node.attrParams['dtype'].value) {
+ tfc.util.assert(input.dtype === node.attrParams['dtype'].value, function () { return "The dtype of dict['" + node.name + "'] provided in " +
+ "model.execute(dict) must be " +
+ (node.attrParams['dtype'].value + ", but was " + input.dtype); });
+ }
+ });
+ };
+ GraphExecutor.prototype.mapInputs = function (inputs) {
+ var result = {};
+ for (var inputName in inputs) {
+ if (this._signature != null && this._signature.inputs != null &&
+ this._signature.inputs[inputName] != null) {
+ var tensor = this._signature.inputs[inputName];
+ result[tensor.name] = inputs[inputName];
+ }
+ else {
+ result[inputName] = inputs[inputName];
+ }
+ }
+ return result;
+ };
+ GraphExecutor.prototype.checkInputs = function (inputs) {
+ var _this = this;
+ var notInGraph = Object.keys(inputs).filter(function (name) {
+ var _a = __read(parseNodeName(name), 1), nodeName = _a[0];
+ return _this.graph.nodes[nodeName] == null;
+ });
+ if (notInGraph.length > 0) {
+ throw new Error("The dict provided in model.execute(dict) has " +
+ ("keys: [" + notInGraph + "] that are not part of graph"));
+ }
+ };
+ GraphExecutor.prototype.mapOutputs = function (outputs) {
+ var _this = this;
+ return outputs.map(function (name) {
+ if (_this._signature != null && _this._signature.outputs != null &&
+ _this._signature.outputs[name] != null) {
+ var tensor = _this._signature.outputs[name];
+ return tensor.name;
+ }
+ return name;
+ }, {});
+ };
+ GraphExecutor.prototype.checkOutputs = function (outputs) {
+ var _this = this;
+ outputs.forEach(function (name) {
+ var _a = __read(parseNodeName(name), 1), normalizedName = _a[0];
+ if (!_this.graph.nodes[normalizedName]) {
+ throw new Error("The output '" + name + "' is not found in the graph");
+ }
+ });
+ };
+ return GraphExecutor;
+ }());
+
+ /**
+ * Contains global resources of a model.
+ */
+ var ResourceManager = /** @class */ (function () {
+ function ResourceManager(hashTableNameToHandle, hashTableMap) {
+ if (hashTableNameToHandle === void 0) { hashTableNameToHandle = {}; }
+ if (hashTableMap === void 0) { hashTableMap = {}; }
+ this.hashTableNameToHandle = hashTableNameToHandle;
+ this.hashTableMap = hashTableMap;
+ }
+ /**
+ * Register a `HashTable` in the resource manager.
+ *
+ * The `HashTable` can be retrieved by `resourceManager.getHashTableById`,
+ * where id is the table handle tensor's id.
+ *
+ * @param name Op node name that creates the `HashTable`.
+ * @param hashTable The `HashTable` to be added to resource manager.
+ */
+ ResourceManager.prototype.addHashTable = function (name, hashTable) {
+ this.hashTableNameToHandle[name] = hashTable.handle;
+ this.hashTableMap[hashTable.id] = hashTable;
+ };
+ /**
+ * Get the table handle by node name.
+ * @param name Op node name that creates the `HashTable`. This name is also
+ * used in the inputs list of lookup and import `HashTable` ops.
+ */
+ ResourceManager.prototype.getHashTableHandleByName = function (name) {
+ return this.hashTableNameToHandle[name];
+ };
+ /**
+ * Get the actual `HashTable` by its handle tensor's id.
+ * @param id The id of the handle tensor.
+ */
+ ResourceManager.prototype.getHashTableById = function (id) {
+ return this.hashTableMap[id];
+ };
+ /**
+ * Dispose `ResourceManager`, including its hashTables and tensors in them.
+ */
+ ResourceManager.prototype.dispose = function () {
+ for (var key in this.hashTableMap) {
+ this.hashTableMap[key].clearAndClose();
+ delete this.hashTableMap[key];
+ }
+ for (var name in this.hashTableNameToHandle) {
+ this.hashTableNameToHandle[name].dispose();
+ delete this.hashTableNameToHandle[name];
+ }
+ };
+ return ResourceManager;
+ }());
+
+ var TFHUB_SEARCH_PARAM = '?tfjs-format=file';
+ var DEFAULT_MODEL_NAME = 'model.json';
+ /**
+ * A `tf.GraphModel` is a directed, acyclic graph built from a
+ * SavedModel GraphDef and allows inference execution.
+ *
+ * A `tf.GraphModel` can only be created by loading from a model converted from
+ * a [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) using
+ * the command line converter tool and loaded via `tf.loadGraphModel`.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ var GraphModel = /** @class */ (function () {
+ /**
+ * @param modelUrl url for the model, or an `io.IOHandler`.
+ * @param weightManifestUrl url for the weight file generated by
+ * scripts/convert.py script.
+ * @param requestOption options for Request, which allows to send credentials
+ * and custom headers.
+ * @param onProgress Optional, progress callback function, fired periodically
+ * before the load is completed.
+ */
+ function GraphModel(modelUrl, loadOptions) {
+ if (loadOptions === void 0) { loadOptions = {}; }
+ this.modelUrl = modelUrl;
+ this.loadOptions = loadOptions;
+ this.version = 'n/a';
+ if (loadOptions == null) {
+ this.loadOptions = {};
+ }
+ this.resourceManager = new ResourceManager();
+ }
+ Object.defineProperty(GraphModel.prototype, "modelVersion", {
+ // Returns the version information for the tensorflow model GraphDef.
+ get: function () {
+ return this.version;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "inputNodes", {
+ get: function () {
+ return this.executor.inputNodes;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "outputNodes", {
+ get: function () {
+ return this.executor.outputNodes;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "inputs", {
+ get: function () {
+ return this.executor.inputs;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "outputs", {
+ get: function () {
+ return this.executor.outputs;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "weights", {
+ get: function () {
+ return this.executor.weightMap;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "metadata", {
+ get: function () {
+ return this.artifacts.userDefinedMetadata;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Object.defineProperty(GraphModel.prototype, "modelSignature", {
+ get: function () {
+ return this.signature;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ GraphModel.prototype.findIOHandler = function () {
+ var path = this.modelUrl;
+ if (path.load != null) {
+ // Path is an IO Handler.
+ this.handler = path;
+ }
+ else if (this.loadOptions.requestInit != null) {
+ this.handler = tfc.io.browserHTTPRequest(path, this.loadOptions);
+ }
+ else {
+ var handlers = tfc.io.getLoadHandlers(path, this.loadOptions);
+ if (handlers.length === 0) {
+ // For backward compatibility: if no load handler can be found,
+ // assume it is a relative http path.
+ handlers.push(tfc.io.browserHTTPRequest(path, this.loadOptions));
+ }
+ else if (handlers.length > 1) {
+ throw new Error("Found more than one (" + handlers.length + ") load handlers for " +
+ ("URL '" + [path] + "'"));
+ }
+ this.handler = handlers[0];
+ }
+ };
+ /**
+ * Loads the model and weight files, construct the in memory weight map and
+ * compile the inference graph.
+ */
+ GraphModel.prototype.load = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var artifacts;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ this.findIOHandler();
+ if (this.handler.load == null) {
+ throw new Error('Cannot proceed with model loading because the IOHandler provided ' +
+ 'does not have the `load` method implemented.');
+ }
+ return [4 /*yield*/, this.handler.load()];
+ case 1:
+ artifacts = _a.sent();
+ return [2 /*return*/, this.loadSync(artifacts)];
+ }
+ });
+ });
+ };
+ /**
+ * Synchronously construct the in memory weight map and
+ * compile the inference graph. Also initialize hashtable if any.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
+ */
+ GraphModel.prototype.loadSync = function (artifacts) {
+ this.artifacts = artifacts;
+ var graph = this.artifacts.modelTopology;
+ var signature;
+ if (this.artifacts.userDefinedMetadata != null &&
+ this.artifacts.userDefinedMetadata.signature != null) {
+ signature = // tslint:disable-next-line:no-any
+ this.artifacts.userDefinedMetadata.signature;
+ }
+ else {
+ signature = this.artifacts.signature;
+ }
+ this.signature = signature;
+ this.version = graph.versions.producer + "." + graph.versions.minConsumer;
+ var weightMap = tfc.io.decodeWeights(this.artifacts.weightData, this.artifacts.weightSpecs);
+ this.executor = new GraphExecutor(OperationMapper.Instance.transformGraph(graph, this.signature));
+ this.executor.weightMap = this.convertTensorMapToTensorsMap(weightMap);
+ // Attach a model-level resourceManager to each executor to share resources,
+ // such as `HashTable`.
+ this.executor.resourceManager = this.resourceManager;
+ if (artifacts.modelInitializer != null &&
+ artifacts.modelInitializer.node != null) {
+ var initializer = OperationMapper.Instance.transformGraph(artifacts.modelInitializer);
+ this.initializer = new GraphExecutor(initializer);
+ this.initializer.weightMap = this.executor.weightMap;
+ // Attach a model-level resourceManager to the initializer, the
+ // hashTables created from when executing the initializer will be stored
+ // in the resourceManager.
+ this.initializer.resourceManager = this.resourceManager;
+ this.initializer.executeAsync({}, []);
+ }
+ return true;
+ };
+ /**
+ * Save the configuration and/or weights of the GraphModel.
+ *
+ * An `IOHandler` is an object that has a `save` method of the proper
+ * signature defined. The `save` method manages the storing or
+ * transmission of serialized data ("artifacts") that represent the
+ * model's topology and weights onto or via a specific medium, such as
+ * file downloads, local storage, IndexedDB in the web browser and HTTP
+ * requests to a server. TensorFlow.js provides `IOHandler`
+ * implementations for a number of frequently used saving mediums, such as
+ * `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io`
+ * for more details.
+ *
+ * This method also allows you to refer to certain types of `IOHandler`s
+ * as URL-like string shortcuts, such as 'localstorage://' and
+ * 'indexeddb://'.
+ *
+ * Example 1: Save `model`'s topology and weights to browser [local
+ * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);
+ * then load it back.
+ *
+ * ```js
+ * const modelUrl =
+ * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';
+ * const model = await tf.loadGraphModel(modelUrl);
+ * const zeros = tf.zeros([1, 224, 224, 3]);
+ * model.predict(zeros).print();
+ *
+ * const saveResults = await model.save('localstorage://my-model-1');
+ *
+ * const loadedModel = await tf.loadGraphModel('localstorage://my-model-1');
+ * console.log('Prediction from loaded model:');
+ * model.predict(zeros).print();
+ * ```
+ *
+ * @param handlerOrURL An instance of `IOHandler` or a URL-like,
+ * scheme-based string shortcut for `IOHandler`.
+ * @param config Options for saving the model.
+ * @returns A `Promise` of `SaveResult`, which summarizes the result of
+ * the saving, such as byte sizes of the saved artifacts for the model's
+ * topology and weight values.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
+ */
+ GraphModel.prototype.save = function (handlerOrURL, config) {
+ return __awaiter(this, void 0, void 0, function () {
+ var handlers;
+ return __generator(this, function (_a) {
+ if (typeof handlerOrURL === 'string') {
+ handlers = tfc.io.getSaveHandlers(handlerOrURL);
+ if (handlers.length === 0) {
+ throw new Error("Cannot find any save handlers for URL '" + handlerOrURL + "'");
+ }
+ else if (handlers.length > 1) {
+ throw new Error("Found more than one (" + handlers.length + ") save handlers for " +
+ ("URL '" + handlerOrURL + "'"));
+ }
+ handlerOrURL = handlers[0];
+ }
+ if (handlerOrURL.save == null) {
+ throw new Error('GraphModel.save() cannot proceed because the IOHandler ' +
+ 'provided does not have the `save` attribute defined.');
+ }
+ return [2 /*return*/, handlerOrURL.save(this.artifacts)];
+ });
+ });
+ };
+ /**
+ * Execute the inference for the input tensors.
+ *
+ * @param input The input tensors, when there is single input for the model,
+ * inputs param should be a `tf.Tensor`. For models with mutliple inputs,
+ * inputs params should be in either `tf.Tensor`[] if the input order is
+ * fixed, or otherwise NamedTensorMap format.
+ *
+ * For model with multiple inputs, we recommend you use NamedTensorMap as the
+ * input type, if you use `tf.Tensor`[], the order of the array needs to
+ * follow the
+ * order of inputNodes array. @see {@link GraphModel.inputNodes}
+ *
+ * You can also feed any intermediate nodes using the NamedTensorMap as the
+ * input type. For example, given the graph
+ * InputNode => Intermediate => OutputNode,
+ * you can execute the subgraph Intermediate => OutputNode by calling
+ * model.execute('IntermediateNode' : tf.tensor(...));
+ *
+ * This is useful for models that uses tf.dynamic_rnn, where the intermediate
+ * state needs to be fed manually.
+ *
+ * For batch inference execution, the tensors for each input need to be
+ * concatenated together. For example with mobilenet, the required input shape
+ * is [1, 244, 244, 3], which represents the [batch, height, width, channel].
+ * If we are provide a batched data of 100 images, the input tensor should be
+ * in the shape of [100, 244, 244, 3].
+ *
+ * @param config Prediction configuration for specifying the batch size and
+ * output node names. Currently the batch size option is ignored for graph
+ * model.
+ *
+ * @returns Inference result tensors. The output would be single `tf.Tensor`
+ * if model has single output node, otherwise Tensor[] or NamedTensorMap[]
+ * will be returned for model with multiple outputs.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ GraphModel.prototype.predict = function (inputs, config) {
+ return this.execute(inputs, this.outputNodes);
+ };
+ GraphModel.prototype.normalizeInputs = function (inputs) {
+ if (!(inputs instanceof tfc.Tensor) && !Array.isArray(inputs)) {
+ // The input is already a NamedTensorMap.
+ return inputs;
+ }
+ inputs = Array.isArray(inputs) ? inputs : [inputs];
+ if (inputs.length !== this.inputNodes.length) {
+ throw new Error('Input tensor count mismatch,' +
+ ("the graph model has " + this.inputNodes.length + " placeholders, ") +
+ ("while there are " + inputs.length + " input tensors."));
+ }
+ return this.inputNodes.reduce(function (map, inputName, i) {
+ map[inputName] = inputs[i];
+ return map;
+ }, {});
+ };
+ GraphModel.prototype.normalizeOutputs = function (outputs) {
+ outputs = outputs || this.outputNodes;
+ return !Array.isArray(outputs) ? [outputs] : outputs;
+ };
+ /**
+ * Executes inference for the model for given input tensors.
+ * @param inputs tensor, tensor array or tensor map of the inputs for the
+ * model, keyed by the input node names.
+ * @param outputs output node name from the Tensorflow model, if no
+ * outputs are specified, the default outputs of the model would be used.
+ * You can inspect intermediate nodes of the model by adding them to the
+ * outputs array.
+ *
+ * @returns A single tensor if provided with a single output or no outputs
+ * are provided and there is only one default output, otherwise return a
+ * tensor array. The order of the tensor array is the same as the outputs
+ * if provided, otherwise the order of outputNodes attribute of the model.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ GraphModel.prototype.execute = function (inputs, outputs) {
+ inputs = this.normalizeInputs(inputs);
+ outputs = this.normalizeOutputs(outputs);
+ var result = this.executor.execute(inputs, outputs);
+ return result.length > 1 ? result : result[0];
+ };
+ /**
+ * Executes inference for the model for given input tensors in async
+ * fashion, use this method when your model contains control flow ops.
+ * @param inputs tensor, tensor array or tensor map of the inputs for the
+ * model, keyed by the input node names.
+ * @param outputs output node name from the Tensorflow model, if no outputs
+ * are specified, the default outputs of the model would be used. You can
+ * inspect intermediate nodes of the model by adding them to the outputs
+ * array.
+ *
+ * @returns A Promise of single tensor if provided with a single output or
+ * no outputs are provided and there is only one default output, otherwise
+ * return a tensor map.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ GraphModel.prototype.executeAsync = function (inputs, outputs) {
+ return __awaiter(this, void 0, void 0, function () {
+ var result;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ inputs = this.normalizeInputs(inputs);
+ outputs = this.normalizeOutputs(outputs);
+ return [4 /*yield*/, this.executor.executeAsync(inputs, outputs)];
+ case 1:
+ result = _a.sent();
+ return [2 /*return*/, result.length > 1 ? result : result[0]];
+ }
+ });
+ });
+ };
+ /**
+ * Get intermediate tensors for model debugging mode (flag
+ * KEEP_INTERMEDIATE_TENSORS is true).
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ GraphModel.prototype.getIntermediateTensors = function () {
+ return this.executor.getIntermediateTensors();
+ };
+ /**
+ * Dispose intermediate tensors for model debugging mode (flag
+ * KEEP_INTERMEDIATE_TENSORS is true).
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ GraphModel.prototype.disposeIntermediateTensors = function () {
+ this.executor.disposeIntermediateTensors();
+ };
+ GraphModel.prototype.convertTensorMapToTensorsMap = function (map) {
+ return Object.keys(map).reduce(function (newMap, key) {
+ newMap[key] = [map[key]];
+ return newMap;
+ }, {});
+ };
+ /**
+ * Releases the memory used by the weight tensors and resourceManager.
+ *
+ * @doc {heading: 'Models', subheading: 'Classes'}
+ */
+ GraphModel.prototype.dispose = function () {
+ this.executor.dispose();
+ if (this.initializer) {
+ this.initializer.dispose();
+ }
+ this.resourceManager.dispose();
+ };
+ return GraphModel;
+ }());
+ /**
+ * Load a graph model given a URL to the model definition.
+ *
+ * Example of loading MobileNetV2 from a URL and making a prediction with a
+ * zeros input:
+ *
+ * ```js
+ * const modelUrl =
+ * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';
+ * const model = await tf.loadGraphModel(modelUrl);
+ * const zeros = tf.zeros([1, 224, 224, 3]);
+ * model.predict(zeros).print();
+ * ```
+ *
+ * Example of loading MobileNetV2 from a TF Hub URL and making a prediction with
+ * a zeros input:
+ *
+ * ```js
+ * const modelUrl =
+ * 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/2';
+ * const model = await tf.loadGraphModel(modelUrl, {fromTFHub: true});
+ * const zeros = tf.zeros([1, 224, 224, 3]);
+ * model.predict(zeros).print();
+ * ```
+ * @param modelUrl The url or an `io.IOHandler` that loads the model.
+ * @param options Options for the HTTP request, which allows to send credentials
+ * and custom headers.
+ *
+ * @doc {heading: 'Models', subheading: 'Loading'}
+ */
+ function loadGraphModel(modelUrl, options) {
+ if (options === void 0) { options = {}; }
+ return __awaiter(this, void 0, void 0, function () {
+ var model;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ if (modelUrl == null) {
+ throw new Error('modelUrl in loadGraphModel() cannot be null. Please provide a url ' +
+ 'or an IOHandler that loads the model');
+ }
+ if (options == null) {
+ options = {};
+ }
+ if (options.fromTFHub) {
+ if (modelUrl.load == null) {
+ if (!modelUrl.endsWith('/')) {
+ modelUrl = modelUrl + '/';
+ }
+ modelUrl = "" + modelUrl + DEFAULT_MODEL_NAME + TFHUB_SEARCH_PARAM;
+ }
+ }
+ model = new GraphModel(modelUrl, options);
+ return [4 /*yield*/, model.load()];
+ case 1:
+ _a.sent();
+ return [2 /*return*/, model];
+ }
+ });
+ });
+ }
+
+ /** @license See the LICENSE file. */
+ // This code is auto-generated, do not modify this file!
+ var version = '3.12.0';
+
+ exports.GraphModel = GraphModel;
+ exports.deregisterOp = deregisterOp;
+ exports.loadGraphModel = loadGraphModel;
+ exports.registerOp = registerOp;
+ exports.version_converter = version;
+
+ Object.defineProperty(exports, '__esModule', { value: true });
+
+})));
+//# sourceMappingURL=tf-converter.js.map
diff --git a/js/tf-core.js b/js/tf-core.js
new file mode 100644
index 0000000..75711cc
--- /dev/null
+++ b/js/tf-core.js
@@ -0,0 +1,27144 @@
+/**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('crypto')) :
+ typeof define === 'function' && define.amd ? define(['exports', 'crypto'], factory) :
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.tf = global.tf || {}, global.require$$0));
+}(this, (function (exports, require$$0) { 'use strict';
+
+ function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
+
+ var require$$0__default = /*#__PURE__*/_interopDefaultLegacy(require$$0);
+
+ /*! *****************************************************************************
+ Copyright (c) Microsoft Corporation.
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+ ***************************************************************************** */
+ /* global Reflect, Promise */
+ var extendStatics = function (d, b) {
+ extendStatics = Object.setPrototypeOf ||
+ ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
+ function (d, b) { for (var p in b)
+ if (b.hasOwnProperty(p))
+ d[p] = b[p]; };
+ return extendStatics(d, b);
+ };
+ function __extends(d, b) {
+ extendStatics(d, b);
+ function __() { this.constructor = d; }
+ d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
+ }
+ function __awaiter(thisArg, _arguments, P, generator) {
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
+ return new (P || (P = Promise))(function (resolve, reject) {
+ function fulfilled(value) { try {
+ step(generator.next(value));
+ }
+ catch (e) {
+ reject(e);
+ } }
+ function rejected(value) { try {
+ step(generator["throw"](value));
+ }
+ catch (e) {
+ reject(e);
+ } }
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
+ });
+ }
+ function __generator(thisArg, body) {
+ var _ = { label: 0, sent: function () { if (t[0] & 1)
+ throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
+ return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function () { return this; }), g;
+ function verb(n) { return function (v) { return step([n, v]); }; }
+ function step(op) {
+ if (f)
+ throw new TypeError("Generator is already executing.");
+ while (_)
+ try {
+ if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done)
+ return t;
+ if (y = 0, t)
+ op = [op[0] & 2, t.value];
+ switch (op[0]) {
+ case 0:
+ case 1:
+ t = op;
+ break;
+ case 4:
+ _.label++;
+ return { value: op[1], done: false };
+ case 5:
+ _.label++;
+ y = op[1];
+ op = [0];
+ continue;
+ case 7:
+ op = _.ops.pop();
+ _.trys.pop();
+ continue;
+ default:
+ if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
+ _ = 0;
+ continue;
+ }
+ if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) {
+ _.label = op[1];
+ break;
+ }
+ if (op[0] === 6 && _.label < t[1]) {
+ _.label = t[1];
+ t = op;
+ break;
+ }
+ if (t && _.label < t[2]) {
+ _.label = t[2];
+ _.ops.push(op);
+ break;
+ }
+ if (t[2])
+ _.ops.pop();
+ _.trys.pop();
+ continue;
+ }
+ op = body.call(thisArg, _);
+ }
+ catch (e) {
+ op = [6, e];
+ y = 0;
+ }
+ finally {
+ f = t = 0;
+ }
+ if (op[0] & 5)
+ throw op[1];
+ return { value: op[0] ? op[1] : void 0, done: true };
+ }
+ }
+ function __values(o) {
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
+ if (m)
+ return m.call(o);
+ if (o && typeof o.length === "number")
+ return {
+ next: function () {
+ if (o && i >= o.length)
+ o = void 0;
+ return { value: o && o[i++], done: !o };
+ }
+ };
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
+ }
+ function __read(o, n) {
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
+ if (!m)
+ return o;
+ var i = m.call(o), r, ar = [], e;
+ try {
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done)
+ ar.push(r.value);
+ }
+ catch (error) {
+ e = { error: error };
+ }
+ finally {
+ try {
+ if (r && !r.done && (m = i["return"]))
+ m.call(i);
+ }
+ finally {
+ if (e)
+ throw e.error;
+ }
+ }
+ return ar;
+ }
+ function __spread() {
+ for (var ar = [], i = 0; i < arguments.length; i++)
+ ar = ar.concat(__read(arguments[i]));
+ return ar;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var EPSILON_FLOAT32 = 1e-7;
+ var EPSILON_FLOAT16 = 1e-4;
+ /** Convenient class for storing tensor-related data. */
+ var DataStorage = /** @class */ (function () {
+ function DataStorage(backend, dataMover) {
+ this.backend = backend;
+ this.dataMover = dataMover;
+ this.data = new WeakMap();
+ this.dataIdsCount = 0;
+ }
+ DataStorage.prototype.get = function (dataId) {
+ if (!this.data.has(dataId)) {
+ this.dataMover.moveData(this.backend, dataId);
+ }
+ return this.data.get(dataId);
+ };
+ DataStorage.prototype.set = function (dataId, value) {
+ this.dataIdsCount++;
+ this.data.set(dataId, value);
+ };
+ DataStorage.prototype.has = function (dataId) {
+ return this.data.has(dataId);
+ };
+ DataStorage.prototype.delete = function (dataId) {
+ this.dataIdsCount--;
+ return this.data.delete(dataId);
+ };
+ DataStorage.prototype.numDataIds = function () {
+ return this.dataIdsCount;
+ };
+ return DataStorage;
+ }());
+ /**
+ * The interface that defines the kernels that should be implemented when
+ * adding a new backend. New backends don't need to implement every one of the
+ * methods, this can be done gradually (throw an error for unimplemented
+ * methods).
+ */
+ var KernelBackend = /** @class */ (function () {
+ function KernelBackend() {
+ }
+ KernelBackend.prototype.refCount = function (dataId) {
+ return notYetImplemented('refCount');
+ };
+ KernelBackend.prototype.incRef = function (dataId) {
+ return notYetImplemented('incRef');
+ };
+ KernelBackend.prototype.timerAvailable = function () {
+ return true;
+ };
+ KernelBackend.prototype.time = function (f) {
+ return notYetImplemented('time');
+ };
+ KernelBackend.prototype.read = function (dataId) {
+ return notYetImplemented('read');
+ };
+ KernelBackend.prototype.readSync = function (dataId) {
+ return notYetImplemented('readSync');
+ };
+ KernelBackend.prototype.numDataIds = function () {
+ return notYetImplemented('numDataIds');
+ };
+ KernelBackend.prototype.disposeData = function (dataId, force) {
+ return notYetImplemented('disposeData');
+ };
+ KernelBackend.prototype.write = function (values, shape, dtype) {
+ return notYetImplemented('write');
+ };
+ KernelBackend.prototype.move = function (dataId, values, shape, dtype, refCount) {
+ return notYetImplemented('move');
+ };
+ KernelBackend.prototype.memory = function () {
+ return notYetImplemented('memory');
+ };
+ /** Returns the highest precision for floats in bits (e.g. 16 or 32) */
+ KernelBackend.prototype.floatPrecision = function () {
+ return notYetImplemented('floatPrecision');
+ };
+ /** Returns the smallest representable number. */
+ KernelBackend.prototype.epsilon = function () {
+ return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;
+ };
+ KernelBackend.prototype.dispose = function () {
+ return notYetImplemented('dispose');
+ };
+ return KernelBackend;
+ }());
+ function notYetImplemented(kernelName) {
+ throw new Error("'" + kernelName + "' not yet implemented or not found in the registry. " +
+ "This kernel may not be supported by the tfjs backend you have chosen");
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Shuffles the array in-place using Fisher-Yates algorithm.
+ *
+ * ```js
+ * const a = [1, 2, 3, 4, 5];
+ * tf.util.shuffle(a);
+ * console.log(a);
+ * ```
+ *
+ * @param array The array to shuffle in-place.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ // tslint:disable-next-line:no-any
+ function shuffle(array) {
+ var counter = array.length;
+ var index = 0;
+ // While there are elements in the array
+ while (counter > 0) {
+ // Pick a random index
+ index = (Math.random() * counter) | 0;
+ // Decrease counter by 1
+ counter--;
+ // And swap the last element with it
+ swap(array, counter, index);
+ }
+ }
+ /**
+ * Shuffles two arrays in-place the same way using Fisher-Yates algorithm.
+ *
+ * ```js
+ * const a = [1,2,3,4,5];
+ * const b = [11,22,33,44,55];
+ * tf.util.shuffleCombo(a, b);
+ * console.log(a, b);
+ * ```
+ *
+ * @param array The first array to shuffle in-place.
+ * @param array2 The second array to shuffle in-place with the same permutation
+ * as the first array.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function shuffleCombo(
+ // tslint:disable-next-line:no-any
+ array,
+ // tslint:disable-next-line:no-any
+ array2) {
+ if (array.length !== array2.length) {
+ throw new Error("Array sizes must match to be shuffled together " +
+ ("First array length was " + array.length) +
+ ("Second array length was " + array2.length));
+ }
+ var counter = array.length;
+ var index = 0;
+ // While there are elements in the array
+ while (counter > 0) {
+ // Pick a random index
+ index = (Math.random() * counter) | 0;
+ // Decrease counter by 1
+ counter--;
+ // And swap the last element of each array with it
+ swap(array, counter, index);
+ swap(array2, counter, index);
+ }
+ }
+ /** Clamps a value to a specified range. */
+ function clamp(min, x, max) {
+ return Math.max(min, Math.min(x, max));
+ }
+ function nearestLargerEven(val) {
+ return val % 2 === 0 ? val : val + 1;
+ }
+ function swap(object, left, right) {
+ var temp = object[left];
+ object[left] = object[right];
+ object[right] = temp;
+ }
+ function sum$1(arr) {
+ var sum = 0;
+ for (var i = 0; i < arr.length; i++) {
+ sum += arr[i];
+ }
+ return sum;
+ }
+ /**
+ * Returns a sample from a uniform [a, b) distribution.
+ *
+ * @param a The minimum support (inclusive).
+ * @param b The maximum support (exclusive).
+ * @return A pseudorandom number on the half-open interval [a,b).
+ */
+ function randUniform(a, b) {
+ var r = Math.random();
+ return (b * r) + (1 - r) * a;
+ }
+ /** Returns the squared Euclidean distance between two vectors. */
+ function distSquared(a, b) {
+ var result = 0;
+ for (var i = 0; i < a.length; i++) {
+ var diff = Number(a[i]) - Number(b[i]);
+ result += diff * diff;
+ }
+ return result;
+ }
+ /**
+ * Asserts that the expression is true. Otherwise throws an error with the
+ * provided message.
+ *
+ * ```js
+ * const x = 2;
+ * tf.util.assert(x === 2, 'x is not 2');
+ * ```
+ *
+ * @param expr The expression to assert (as a boolean).
+ * @param msg A function that returns the message to report when throwing an
+ * error. We use a function for performance reasons.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function assert(expr, msg) {
+ if (!expr) {
+ throw new Error(typeof msg === 'string' ? msg : msg());
+ }
+ }
+ function assertShapesMatch(shapeA, shapeB, errorMessagePrefix) {
+ if (errorMessagePrefix === void 0) { errorMessagePrefix = ''; }
+ assert(arraysEqual(shapeA, shapeB), function () { return errorMessagePrefix + (" Shapes " + shapeA + " and " + shapeB + " must match"); });
+ }
+ function assertNonNull(a) {
+ assert(a != null, function () { return "The input to the tensor constructor must be a non-null value."; });
+ }
+ // NOTE: We explicitly type out what T extends instead of any so that
+ // util.flatten on a nested array of number doesn't try to infer T as a
+ // number[][], causing us to explicitly type util.flatten<number>().
+ /**
+ * Flattens an arbitrarily nested array.
+ *
+ * ```js
+ * const a = [[1, 2], [3, 4], [5, [6, [7]]]];
+ * const flat = tf.util.flatten(a);
+ * console.log(flat);
+ * ```
+ *
+ * @param arr The nested array to flatten.
+ * @param result The destination array which holds the elements.
+ * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults
+ * to false.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function flatten(arr, result, skipTypedArray) {
+ if (result === void 0) { result = []; }
+ if (skipTypedArray === void 0) { skipTypedArray = false; }
+ if (result == null) {
+ result = [];
+ }
+ if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {
+ for (var i = 0; i < arr.length; ++i) {
+ flatten(arr[i], result, skipTypedArray);
+ }
+ }
+ else {
+ result.push(arr);
+ }
+ return result;
+ }
+ /**
+ * Returns the size (number of elements) of the tensor given its shape.
+ *
+ * ```js
+ * const shape = [3, 4, 2];
+ * const size = tf.util.sizeFromShape(shape);
+ * console.log(size);
+ * ```
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function sizeFromShape(shape) {
+ if (shape.length === 0) {
+ // Scalar.
+ return 1;
+ }
+ var size = shape[0];
+ for (var i = 1; i < shape.length; i++) {
+ size *= shape[i];
+ }
+ return size;
+ }
+ function isScalarShape(shape) {
+ return shape.length === 0;
+ }
+ function arraysEqual(n1, n2) {
+ if (n1 === n2) {
+ return true;
+ }
+ if (n1 == null || n2 == null) {
+ return false;
+ }
+ if (n1.length !== n2.length) {
+ return false;
+ }
+ for (var i = 0; i < n1.length; i++) {
+ if (n1[i] !== n2[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ function isInt(a) {
+ return a % 1 === 0;
+ }
+ function tanh$1(x) {
+ // tslint:disable-next-line:no-any
+ if (Math.tanh != null) {
+ // tslint:disable-next-line:no-any
+ return Math.tanh(x);
+ }
+ if (x === Infinity) {
+ return 1;
+ }
+ else if (x === -Infinity) {
+ return -1;
+ }
+ else {
+ var e2x = Math.exp(2 * x);
+ return (e2x - 1) / (e2x + 1);
+ }
+ }
+ function sizeToSquarishShape(size) {
+ var width = Math.ceil(Math.sqrt(size));
+ return [width, Math.ceil(size / width)];
+ }
+ /**
+ * Creates a new array with randomized indicies to a given quantity.
+ *
+ * ```js
+ * const randomTen = tf.util.createShuffledIndices(10);
+ * console.log(randomTen);
+ * ```
+ *
+ * @param number Quantity of how many shuffled indicies to create.
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function createShuffledIndices(n) {
+ var shuffledIndices = new Uint32Array(n);
+ for (var i = 0; i < n; ++i) {
+ shuffledIndices[i] = i;
+ }
+ shuffle(shuffledIndices);
+ return shuffledIndices;
+ }
+ function rightPad(a, size) {
+ if (size <= a.length) {
+ return a;
+ }
+ return a + ' '.repeat(size - a.length);
+ }
+ function repeatedTry(checkFn, delayFn, maxCounter) {
+ if (delayFn === void 0) { delayFn = function (counter) { return 0; }; }
+ return new Promise(function (resolve, reject) {
+ var tryCount = 0;
+ var tryFn = function () {
+ if (checkFn()) {
+ resolve();
+ return;
+ }
+ tryCount++;
+ var nextBackoff = delayFn(tryCount);
+ if (maxCounter != null && tryCount >= maxCounter) {
+ reject();
+ return;
+ }
+ setTimeout(tryFn, nextBackoff);
+ };
+ tryFn();
+ });
+ }
+ /**
+ * Given the full size of the array and a shape that may contain -1 as the
+ * implicit dimension, returns the inferred shape where -1 is replaced.
+ * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].
+ *
+ * @param shape The shape, which may contain -1 in some dimension.
+ * @param size The full size (number of elements) of the array.
+ * @return The inferred shape where -1 is replaced with the inferred size.
+ */
+ function inferFromImplicitShape(shape, size) {
+ var shapeProd = 1;
+ var implicitIdx = -1;
+ for (var i = 0; i < shape.length; ++i) {
+ if (shape[i] >= 0) {
+ shapeProd *= shape[i];
+ }
+ else if (shape[i] === -1) {
+ if (implicitIdx !== -1) {
+ throw Error("Shapes can only have 1 implicit size. " +
+ ("Found -1 at dim " + implicitIdx + " and dim " + i));
+ }
+ implicitIdx = i;
+ }
+ else if (shape[i] < 0) {
+ throw Error("Shapes can not be < 0. Found " + shape[i] + " at dim " + i);
+ }
+ }
+ if (implicitIdx === -1) {
+ if (size > 0 && size !== shapeProd) {
+ throw Error("Size(" + size + ") must match the product of shape " + shape);
+ }
+ return shape;
+ }
+ if (shapeProd === 0) {
+ throw Error("Cannot infer the missing size in [" + shape + "] when " +
+ "there are 0 elements");
+ }
+ if (size % shapeProd !== 0) {
+ throw Error("The implicit shape can't be a fractional number. " +
+ ("Got " + size + " / " + shapeProd));
+ }
+ var newShape = shape.slice();
+ newShape[implicitIdx] = size / shapeProd;
+ return newShape;
+ }
+ function parseAxisParam(axis, shape) {
+ var rank = shape.length;
+ // Normalize input
+ axis = axis == null ? shape.map(function (s, i) { return i; }) : [].concat(axis);
+ // Check for valid range
+ assert(axis.every(function (ax) { return ax >= -rank && ax < rank; }), function () { return "All values in axis param must be in range [-" + rank + ", " + rank + ") but " +
+ ("got axis " + axis); });
+ // Check for only integers
+ assert(axis.every(function (ax) { return isInt(ax); }), function () { return "All values in axis param must be integers but " +
+ ("got axis " + axis); });
+ // Handle negative axis.
+ return axis.map(function (a) { return a < 0 ? rank + a : a; });
+ }
+ /** Reduces the shape by removing all dimensions of shape 1. */
+ function squeezeShape(shape, axis) {
+ var newShape = [];
+ var keptDims = [];
+ var isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;
+ var axes = (axis == null || isEmptyArray) ?
+ null :
+ parseAxisParam(axis, shape).sort();
+ var j = 0;
+ for (var i = 0; i < shape.length; ++i) {
+ if (axes != null) {
+ if (axes[j] === i && shape[i] !== 1) {
+ throw new Error("Can't squeeze axis " + i + " since its dim '" + shape[i] + "' is not 1");
+ }
+ if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {
+ newShape.push(shape[i]);
+ keptDims.push(i);
+ }
+ if (axes[j] <= i) {
+ j++;
+ }
+ }
+ if (shape[i] !== 1) {
+ newShape.push(shape[i]);
+ keptDims.push(i);
+ }
+ }
+ return { newShape: newShape, keptDims: keptDims };
+ }
+ function getTypedArrayFromDType(dtype, size) {
+ var values = null;
+ if (dtype == null || dtype === 'float32') {
+ values = new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ return values;
+ }
+ function getArrayFromDType(dtype, size) {
+ var values = null;
+ if (dtype == null || dtype === 'float32') {
+ values = new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(size);
+ }
+ else if (dtype === 'string') {
+ values = new Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ return values;
+ }
+ function checkConversionForErrors(vals, dtype) {
+ for (var i = 0; i < vals.length; i++) {
+ var num = vals[i];
+ if (isNaN(num) || !isFinite(num)) {
+ throw Error("A tensor of type " + dtype + " being uploaded contains " + num + ".");
+ }
+ }
+ }
+ /** Returns true if the dtype is valid. */
+ function isValidDtype(dtype) {
+ return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||
+ dtype === 'int32' || dtype === 'string';
+ }
+ /**
+ * Returns true if the new type can't encode the old type without loss of
+ * precision.
+ */
+ function hasEncodingLoss(oldType, newType) {
+ if (newType === 'complex64') {
+ return false;
+ }
+ if (newType === 'float32' && oldType !== 'complex64') {
+ return false;
+ }
+ if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {
+ return false;
+ }
+ if (newType === 'bool' && oldType === 'bool') {
+ return false;
+ }
+ return true;
+ }
+ function isTypedArray(a) {
+ return a instanceof Float32Array || a instanceof Int32Array ||
+ a instanceof Uint8Array || a instanceof Uint8ClampedArray;
+ }
+ function bytesPerElement(dtype) {
+ if (dtype === 'float32' || dtype === 'int32') {
+ return 4;
+ }
+ else if (dtype === 'complex64') {
+ return 8;
+ }
+ else if (dtype === 'bool') {
+ return 1;
+ }
+ else {
+ throw new Error("Unknown dtype " + dtype);
+ }
+ }
+ /**
+ * Returns the approximate number of bytes allocated in the string array - 2
+ * bytes per character. Computing the exact bytes for a native string in JS is
+ * not possible since it depends on the encoding of the html page that serves
+ * the website.
+ */
+ function bytesFromStringArray(arr) {
+ if (arr == null) {
+ return 0;
+ }
+ var bytes = 0;
+ arr.forEach(function (x) { return bytes += x.length; });
+ return bytes;
+ }
+ /** Returns true if the value is a string. */
+ function isString(value) {
+ return typeof value === 'string' || value instanceof String;
+ }
+ function isBoolean(value) {
+ return typeof value === 'boolean';
+ }
+ function isNumber(value) {
+ return typeof value === 'number';
+ }
+ function inferDtype(values) {
+ if (Array.isArray(values)) {
+ return inferDtype(values[0]);
+ }
+ if (values instanceof Float32Array) {
+ return 'float32';
+ }
+ else if (values instanceof Int32Array
+ || values instanceof Uint8Array
+ || values instanceof Uint8ClampedArray) {
+ return 'int32';
+ }
+ else if (isNumber(values)) {
+ return 'float32';
+ }
+ else if (isString(values)) {
+ return 'string';
+ }
+ else if (isBoolean(values)) {
+ return 'bool';
+ }
+ return 'float32';
+ }
+ function isFunction(f) {
+ return !!(f && f.constructor && f.call && f.apply);
+ }
+ function nearestDivisor(size, start) {
+ for (var i = start; i < size; ++i) {
+ if (size % i === 0) {
+ return i;
+ }
+ }
+ return size;
+ }
+ function computeStrides(shape) {
+ var rank = shape.length;
+ if (rank < 2) {
+ return [];
+ }
+ // Last dimension has implicit stride of 1, thus having D-1 (instead of D)
+ // strides.
+ var strides = new Array(rank - 1);
+ strides[rank - 2] = shape[rank - 1];
+ for (var i = rank - 3; i >= 0; --i) {
+ strides[i] = strides[i + 1] * shape[i + 1];
+ }
+ return strides;
+ }
+ function createNestedArray(offset, shape, a, isComplex) {
+ if (isComplex === void 0) { isComplex = false; }
+ var ret = new Array();
+ if (shape.length === 1) {
+ var d = shape[0] * (isComplex ? 2 : 1);
+ for (var i = 0; i < d; i++) {
+ ret[i] = a[offset + i];
+ }
+ }
+ else {
+ var d = shape[0];
+ var rest = shape.slice(1);
+ var len = rest.reduce(function (acc, c) { return acc * c; }) * (isComplex ? 2 : 1);
+ for (var i = 0; i < d; i++) {
+ ret[i] = createNestedArray(offset + i * len, rest, a, isComplex);
+ }
+ }
+ return ret;
+ }
+ // Provide a nested array of TypedArray in given shape.
+ function toNestedArray(shape, a, isComplex) {
+ if (isComplex === void 0) { isComplex = false; }
+ if (shape.length === 0) {
+ // Scalar type should return a single number.
+ return a[0];
+ }
+ var size = shape.reduce(function (acc, c) { return acc * c; }) * (isComplex ? 2 : 1);
+ if (size === 0) {
+ // A tensor with shape zero should be turned into empty list.
+ return [];
+ }
+ if (size !== a.length) {
+ throw new Error("[" + shape + "] does not match the input size " + a.length + (isComplex ? ' for a complex tensor' : '') + ".");
+ }
+ return createNestedArray(0, shape, a, isComplex);
+ }
+ function makeOnesTypedArray(size, dtype) {
+ var array = makeZerosTypedArray(size, dtype);
+ for (var i = 0; i < array.length; i++) {
+ array[i] = 1;
+ }
+ return array;
+ }
+ function makeZerosTypedArray(size, dtype) {
+ if (dtype == null || dtype === 'float32' || dtype === 'complex64') {
+ return new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ return new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ return new Uint8Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ }
+ /**
+ * Make nested `TypedArray` filled with zeros.
+ * @param shape The shape information for the nested array.
+ * @param dtype dtype of the array element.
+ */
+ function makeZerosNestedTypedArray(shape, dtype) {
+ var size = shape.reduce(function (prev, curr) { return prev * curr; }, 1);
+ if (dtype == null || dtype === 'float32') {
+ return toNestedArray(shape, new Float32Array(size));
+ }
+ else if (dtype === 'int32') {
+ return toNestedArray(shape, new Int32Array(size));
+ }
+ else if (dtype === 'bool') {
+ return toNestedArray(shape, new Uint8Array(size));
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ }
+ function assertNonNegativeIntegerDimensions(shape) {
+ shape.forEach(function (dimSize) {
+ assert(Number.isInteger(dimSize) && dimSize >= 0, function () { return "Tensor must have a shape comprised of positive integers but got " +
+ ("shape [" + shape + "]."); });
+ });
+ }
+ /**
+ * Computes flat index for a given location (multidimentionsal index) in a
+ * Tensor/multidimensional array.
+ *
+ * @param locs Location in the tensor.
+ * @param rank Rank of the tensor.
+ * @param strides Tensor strides.
+ */
+ function locToIndex(locs, rank, strides) {
+ if (rank === 0) {
+ return 0;
+ }
+ else if (rank === 1) {
+ return locs[0];
+ }
+ var index = locs[locs.length - 1];
+ for (var i = 0; i < locs.length - 1; ++i) {
+ index += strides[i] * locs[i];
+ }
+ return index;
+ }
+ /**
+ * Computes the location (multidimensional index) in a tensor/multidimentional
+ * array for a given flat index.
+ *
+ * @param index Index in flat array.
+ * @param rank Rank of tensor.
+ * @param strides Strides of tensor.
+ */
+ function indexToLoc(index, rank, strides) {
+ if (rank === 0) {
+ return [];
+ }
+ else if (rank === 1) {
+ return [index];
+ }
+ var locs = new Array(rank);
+ for (var i = 0; i < locs.length - 1; ++i) {
+ locs[i] = Math.floor(index / strides[i]);
+ index -= locs[i] * strides[i];
+ }
+ locs[locs.length - 1] = index;
+ return locs;
+ }
+ /**
+ * This method asserts whether an object is a Promise instance.
+ * @param object
+ */
+ // tslint:disable-next-line: no-any
+ function isPromise(object) {
+ // We chose to not use 'obj instanceOf Promise' for two reasons:
+ // 1. It only reliably works for es6 Promise, not other Promise
+ // implementations.
+ // 2. It doesn't work with framework that uses zone.js. zone.js monkey patch
+ // the async calls, so it is possible the obj (patched) is comparing to a
+ // pre-patched Promise.
+ return object && object.then && typeof object.then === 'function';
+ }
+
+ // Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.
+ var TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';
+ /**
+ * The environment contains evaluated flags as well as the registered platform.
+ * This is always used as a global singleton and can be retrieved with
+ * `tf.env()`.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ var Environment = /** @class */ (function () {
+ // tslint:disable-next-line: no-any
+ function Environment(global) {
+ this.global = global;
+ this.flags = {};
+ this.flagRegistry = {};
+ this.urlFlags = {};
+ // Jasmine spies on this in 'environment_test.ts'
+ this.getQueryParams = getQueryParams;
+ this.populateURLFlags();
+ }
+ Environment.prototype.setPlatform = function (platformName, platform) {
+ if (this.platform != null) {
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.warn("Platform " + this.platformName + " has already been set. " +
+ ("Overwriting the platform with " + platform + "."));
+ }
+ }
+ this.platformName = platformName;
+ this.platform = platform;
+ };
+ Environment.prototype.registerFlag = function (flagName, evaluationFn, setHook) {
+ this.flagRegistry[flagName] = { evaluationFn: evaluationFn, setHook: setHook };
+ // Override the flag value from the URL. This has to happen here because
+ // the environment is initialized before flags get registered.
+ if (this.urlFlags[flagName] != null) {
+ var flagValue = this.urlFlags[flagName];
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.warn("Setting feature override from URL " + flagName + ": " + flagValue + ".");
+ }
+ this.set(flagName, flagValue);
+ }
+ };
+ Environment.prototype.getAsync = function (flagName) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a, _b;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ if (flagName in this.flags) {
+ return [2 /*return*/, this.flags[flagName]];
+ }
+ _a = this.flags;
+ _b = flagName;
+ return [4 /*yield*/, this.evaluateFlag(flagName)];
+ case 1:
+ _a[_b] = _c.sent();
+ return [2 /*return*/, this.flags[flagName]];
+ }
+ });
+ });
+ };
+ Environment.prototype.get = function (flagName) {
+ if (flagName in this.flags) {
+ return this.flags[flagName];
+ }
+ var flagValue = this.evaluateFlag(flagName);
+ if (isPromise(flagValue)) {
+ throw new Error("Flag " + flagName + " cannot be synchronously evaluated. " +
+ "Please use getAsync() instead.");
+ }
+ this.flags[flagName] = flagValue;
+ return this.flags[flagName];
+ };
+ Environment.prototype.getNumber = function (flagName) {
+ return this.get(flagName);
+ };
+ Environment.prototype.getBool = function (flagName) {
+ return this.get(flagName);
+ };
+ Environment.prototype.getFlags = function () {
+ return this.flags;
+ };
+ Object.defineProperty(Environment.prototype, "features", {
+ // For backwards compatibility.
+ get: function () {
+ return this.flags;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Environment.prototype.set = function (flagName, value) {
+ if (this.flagRegistry[flagName] == null) {
+ throw new Error("Cannot set flag " + flagName + " as it has not been registered.");
+ }
+ this.flags[flagName] = value;
+ if (this.flagRegistry[flagName].setHook != null) {
+ this.flagRegistry[flagName].setHook(value);
+ }
+ };
+ Environment.prototype.evaluateFlag = function (flagName) {
+ if (this.flagRegistry[flagName] == null) {
+ throw new Error("Cannot evaluate flag '" + flagName + "': no evaluation function found.");
+ }
+ return this.flagRegistry[flagName].evaluationFn();
+ };
+ Environment.prototype.setFlags = function (flags) {
+ this.flags = Object.assign({}, flags);
+ };
+ Environment.prototype.reset = function () {
+ this.flags = {};
+ this.urlFlags = {};
+ this.populateURLFlags();
+ };
+ Environment.prototype.populateURLFlags = function () {
+ var _this = this;
+ if (typeof this.global === 'undefined' ||
+ typeof this.global.location === 'undefined' ||
+ typeof this.global.location.search === 'undefined') {
+ return;
+ }
+ var urlParams = this.getQueryParams(this.global.location.search);
+ if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {
+ var keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');
+ keyValues.forEach(function (keyValue) {
+ var _a = __read(keyValue.split(':'), 2), key = _a[0], value = _a[1];
+ _this.urlFlags[key] = parseValue(key, value);
+ });
+ }
+ };
+ return Environment;
+ }());
+ function getQueryParams(queryString) {
+ var params = {};
+ queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, function (s) {
+ var t = [];
+ for (var _i = 1; _i < arguments.length; _i++) {
+ t[_i - 1] = arguments[_i];
+ }
+ decodeParam(params, t[0], t[1]);
+ return t.join('=');
+ });
+ return params;
+ }
+ function decodeParam(params, name, value) {
+ params[decodeURIComponent(name)] = decodeURIComponent(value || '');
+ }
+ function parseValue(flagName, value) {
+ value = value.toLowerCase();
+ if (value === 'true' || value === 'false') {
+ return value === 'true';
+ }
+ else if ("" + +value === value) {
+ return +value;
+ }
+ throw new Error("Could not parse value flag value " + value + " for flag " + flagName + ".");
+ }
+ /**
+ * Returns the current environment (a global singleton).
+ *
+ * The environment object contains the evaluated feature values as well as the
+ * active platform.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ function env() {
+ return exports.ENV;
+ }
+ exports.ENV = null;
+ function setEnvironmentGlobal(environment) {
+ exports.ENV = environment;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Note that the identifier globalNameSpace is scoped to this module, but will
+ // always resolve to the same global object regardless of how the module is
+ // resolved.
+ // tslint:disable-next-line:no-any
+ var globalNameSpace;
+ // tslint:disable-next-line:no-any
+ function getGlobalNamespace() {
+ if (globalNameSpace == null) {
+ // tslint:disable-next-line:no-any
+ var ns = void 0;
+ if (typeof (window) !== 'undefined') {
+ ns = window;
+ }
+ else if (typeof (global) !== 'undefined') {
+ ns = global;
+ }
+ else if (typeof (process) !== 'undefined') {
+ ns = process;
+ }
+ else if (typeof (self) !== 'undefined') {
+ ns = self;
+ }
+ else {
+ throw new Error('Could not find a global object');
+ }
+ globalNameSpace = ns;
+ }
+ return globalNameSpace;
+ }
+ // tslint:disable-next-line:no-any
+ function getGlobalMap() {
+ var ns = getGlobalNamespace();
+ if (ns._tfGlobals == null) {
+ ns._tfGlobals = new Map();
+ }
+ return ns._tfGlobals;
+ }
+ /**
+ * Returns a globally accessible 'singleton' object.
+ *
+ * @param key the name of the object
+ * @param init a function to initialize to initialize this object
+ * the first time it is fetched.
+ */
+ function getGlobal(key, init) {
+ var globalMap = getGlobalMap();
+ if (globalMap.has(key)) {
+ return globalMap.get(key);
+ }
+ else {
+ var singleton = init();
+ globalMap.set(key, singleton);
+ return globalMap.get(key);
+ }
+ }
+
+ var Abs = 'Abs';
+ var Acos = 'Acos';
+ var Acosh = 'Acosh';
+ var Add = 'Add';
+ var AddN = 'AddN';
+ var All = 'All';
+ var Any = 'Any';
+ var ArgMax = 'ArgMax';
+ var ArgMin = 'ArgMin';
+ var Asin = 'Asin';
+ var Asinh = 'Asinh';
+ var Atan = 'Atan';
+ var Atanh = 'Atanh';
+ var Atan2 = 'Atan2';
+ var AvgPool = 'AvgPool';
+ var AvgPoolGrad = 'AvgPoolGrad';
+ var AvgPool3D = 'AvgPool3D';
+ var AvgPool3DGrad = 'AvgPool3DGrad';
+ var BatchMatMul = 'BatchMatMul';
+ var BatchToSpaceND = 'BatchToSpaceND';
+ var Bincount = 'Bincount';
+ var BroadcastTo = 'BroadcastTo';
+ var BroadcastArgs = 'BroadcastArgs';
+ var Cast = 'Cast';
+ var Ceil = 'Ceil';
+ var ClipByValue = 'ClipByValue';
+ var Complex = 'Complex';
+ var ComplexAbs = 'ComplexAbs';
+ var Concat = 'Concat';
+ var Conv2D = 'Conv2D';
+ var Conv2DBackpropFilter = 'Conv2DBackpropFilter';
+ var Conv2DBackpropInput = 'Conv2DBackpropInput';
+ var Conv3D = 'Conv3D';
+ var Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';
+ var Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';
+ var Cos = 'Cos';
+ var Cosh = 'Cosh';
+ var Cumsum = 'Cumsum';
+ var CropAndResize = 'CropAndResize';
+ var DenseBincount = 'DenseBincount';
+ var DepthToSpace = 'DepthToSpace';
+ var DepthwiseConv2dNative = 'DepthwiseConv2dNative';
+ var DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';
+ var DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';
+ var Diag = 'Diag';
+ var Dilation2D = 'Dilation2D';
+ var Dilation2DBackpropInput = 'Dilation2DBackpropInput';
+ var Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';
+ var RealDiv = 'RealDiv';
+ var Einsum = 'Einsum';
+ var Elu = 'Elu';
+ var EluGrad = 'EluGrad';
+ var Erf = 'Erf';
+ var Equal = 'Equal';
+ var Exp = 'Exp';
+ var ExpandDims = 'ExpandDims';
+ var Expm1 = 'Expm1';
+ var FFT = 'FFT';
+ var Fill = 'Fill';
+ var FlipLeftRight = 'FlipLeftRight';
+ var Floor = 'Floor';
+ var FloorDiv = 'FloorDiv';
+ var FusedBatchNorm = 'FusedBatchNorm';
+ var GatherV2 = 'GatherV2';
+ var GatherNd = 'GatherNd';
+ var Greater = 'Greater';
+ var GreaterEqual = 'GreaterEqual';
+ var Identity = 'Identity';
+ var IFFT = 'IFFT';
+ var Imag = 'Imag';
+ var IsFinite = 'IsFinite';
+ var IsInf = 'IsInf';
+ var IsNan = 'IsNan';
+ var LeakyRelu = 'LeakyRelu';
+ var Less = 'Less';
+ var LessEqual = 'LessEqual';
+ var LinSpace = 'LinSpace';
+ var Log = 'Log';
+ var Log1p = 'Log1p';
+ var LogicalAnd = 'LogicalAnd';
+ var LogicalNot = 'LogicalNot';
+ var LogicalOr = 'LogicalOr';
+ var LogSoftmax = 'LogSoftmax';
+ var LRN = 'LRN';
+ var LRNGrad = 'LRNGrad';
+ var Max = 'Max';
+ var Maximum = 'Maximum';
+ var MaxPool = 'MaxPool';
+ var MaxPoolGrad = 'MaxPoolGrad';
+ var MaxPool3D = 'MaxPool3D';
+ var MaxPool3DGrad = 'MaxPool3DGrad';
+ var MaxPoolWithArgmax = 'MaxPoolWithArgmax';
+ var Mean = 'Mean';
+ var Min = 'Min';
+ var Minimum = 'Minimum';
+ var MirrorPad = 'MirrorPad';
+ var Mod = 'Mod';
+ var Multinomial = 'Multinomial';
+ var Multiply = 'Multiply';
+ var Neg = 'Neg';
+ var NotEqual = 'NotEqual';
+ var NonMaxSuppressionV3 = 'NonMaxSuppressionV3';
+ var NonMaxSuppressionV4 = 'NonMaxSuppressionV4';
+ var NonMaxSuppressionV5 = 'NonMaxSuppressionV5';
+ var OnesLike = 'OnesLike';
+ var OneHot = 'OneHot';
+ var Pack = 'Pack';
+ var PadV2 = 'PadV2';
+ var Pool = 'Pool';
+ var Pow = 'Pow';
+ var Prelu = 'Prelu';
+ var Prod = 'Prod';
+ var Range = 'Range';
+ var Real = 'Real';
+ var Reciprocal = 'Reciprocal';
+ var Relu = 'Relu';
+ var Reshape = 'Reshape';
+ var ResizeNearestNeighbor = 'ResizeNearestNeighbor';
+ var ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';
+ var ResizeBilinear = 'ResizeBilinear';
+ var ResizeBilinearGrad = 'ResizeBilinearGrad';
+ var Relu6 = 'Relu6';
+ var Reverse = 'Reverse';
+ var Round = 'Round';
+ var Rsqrt = 'Rsqrt';
+ var ScatterNd = 'ScatterNd';
+ var Select = 'Select';
+ var Selu = 'Selu';
+ var Slice = 'Slice';
+ var Sin = 'Sin';
+ var Sinh = 'Sinh';
+ var Sign = 'Sign';
+ var Sigmoid = 'Sigmoid';
+ var Softplus = 'Softplus';
+ var Sqrt = 'Sqrt';
+ var Sum = 'Sum';
+ var SpaceToBatchND = 'SpaceToBatchND';
+ var SplitV = 'SplitV';
+ var Softmax = 'Softmax';
+ var SparseFillEmptyRows = 'SparseFillEmptyRows';
+ var SparseReshape = 'SparseReshape';
+ var SparseSegmentMean = 'SparseSegmentMean';
+ var SparseSegmentSum = 'SparseSegmentSum';
+ var SparseToDense = 'SparseToDense';
+ var SquaredDifference = 'SquaredDifference';
+ var Square = 'Square';
+ var StridedSlice = 'StridedSlice';
+ var StringNGrams = 'StringNGrams';
+ var StringSplit = 'StringSplit';
+ var StringToHashBucketFast = 'StringToHashBucketFast';
+ var Sub = 'Sub';
+ var Tan = 'Tan';
+ var Tanh = 'Tanh';
+ var Tile = 'Tile';
+ var TopK = 'TopK';
+ var Transform = 'Transform';
+ var Transpose = 'Transpose';
+ var Unique = 'Unique';
+ var Unpack = 'Unpack';
+ var UnsortedSegmentSum = 'UnsortedSegmentSum';
+ var ZerosLike = 'ZerosLike';
+ /**
+ * TensorFlow.js-only kernels
+ */
+ var Step = 'Step';
+ var FromPixels = 'FromPixels';
+ var RotateWithOffset = 'RotateWithOffset';
+ var _FusedMatMul = '_FusedMatMul';
+ var FusedConv2D = 'FusedConv2D';
+ var FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';
+
+ function warn() {
+ var msg = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ msg[_i] = arguments[_i];
+ }
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.warn.apply(console, __spread(msg));
+ }
+ }
+ function log$1() {
+ var msg = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ msg[_i] = arguments[_i];
+ }
+ if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {
+ console.log.apply(console, __spread(msg));
+ }
+ }
+
+ var kernelRegistry = getGlobal('kernelRegistry', function () { return new Map(); });
+ var gradRegistry = getGlobal('gradRegistry', function () { return new Map(); });
+ /**
+ * Returns the kernel function (code) associated with the provided names.
+ *
+ * @param kernelName The official name of the kernel.
+ * @param backendName The official name of the backend.
+ */
+ function getKernel(kernelName, backendName) {
+ var key = makeKey(kernelName, backendName);
+ return kernelRegistry.get(key);
+ }
+ /**
+ * Returns the registered gradient info associated with the provided kernel.
+ * @param kernelName The official TF kernel name.
+ */
+ function getGradient(kernelName) {
+ return gradRegistry.get(kernelName);
+ }
+ function getKernelsForBackend(backendName) {
+ var it = kernelRegistry.entries();
+ var result = [];
+ while (true) {
+ var _a = it.next(), done = _a.done, value = _a.value;
+ if (done) {
+ break;
+ }
+ var _b = __read(value, 2), key = _b[0], config = _b[1];
+ var _c = __read(key.split('_'), 1), backend = _c[0];
+ if (backend === backendName) {
+ result.push(config);
+ }
+ }
+ return result;
+ }
+ /**
+ * Registers the function (forward pass) for the kernel in a global registry.
+ *
+ * @param config A config object with the following properties:
+ * - `kernelName` The official name of the kernel.
+ * - `backendName` The official name of the backend.
+ * - `kernelFunc` The function to run during the forward pass of the kernel.
+ * - `setupFunc` Optional. Gets called once, after the backend initializes.
+ * - `disposeFunc` Optional. Gets called once, right before the backend is
+ * disposed.
+ */
+ function registerKernel(config) {
+ var kernelName = config.kernelName, backendName = config.backendName;
+ var key = makeKey(kernelName, backendName);
+ if (kernelRegistry.has(key)) {
+ warn("The kernel '" + kernelName + "' for backend " +
+ ("'" + backendName + "' is already registered"));
+ }
+ kernelRegistry.set(key, config);
+ }
+ /**
+ * Registers a gradient function for a given kernel in the global registry,
+ * to be used during the back-propagation of that kernel.
+ *
+ * @param config An object with the following properties:
+ * - `kernelName` The name of the kernel that the gradient function is for.
+ * - `gradFunc` The function to run during back-propagation.
+ */
+ function registerGradient(config) {
+ var kernelName = config.kernelName;
+ if (gradRegistry.has(kernelName)) {
+ // TODO (yassogba) after 3.0 assess whether we need to keep this gated
+ // to debug mode.
+ if (env().getBool('DEBUG')) {
+ warn("Overriding the gradient for '" + kernelName + "'");
+ }
+ }
+ gradRegistry.set(kernelName, config);
+ }
+ /**
+ * Removes the kernel function from the registry.
+ *
+ * @param kernelName The official name of the kernel.
+ * @param backendName The official name of the backend.
+ *
+ */
+ function unregisterKernel(kernelName, backendName) {
+ var key = makeKey(kernelName, backendName);
+ if (!kernelRegistry.has(key)) {
+ throw new Error("The kernel '" + kernelName + "' for backend " +
+ ("'" + backendName + "' is not registered"));
+ }
+ kernelRegistry.delete(key);
+ }
+ /** Removes the registered gradient from the global registry. */
+ function unregisterGradient(kernelName) {
+ if (!gradRegistry.has(kernelName)) {
+ throw new Error("The gradient '" + kernelName + "' for backend is not registered");
+ }
+ gradRegistry.delete(kernelName);
+ }
+ /**
+ * Finds kernels that have already been registered to a backend and re-registers
+ * them for a new backend. Useful for registering custom backends.
+ * @param registeredBackendName Already registered backend.
+ * @param newBackendName New backend.
+ */
+ function copyRegisteredKernels(registeredBackendName, newBackendName) {
+ var kernels = getKernelsForBackend(registeredBackendName);
+ kernels.forEach(function (kernelConfig) {
+ var newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName });
+ registerKernel(newKernelConfig);
+ });
+ }
+ function makeKey(kernelName, backendName) {
+ return backendName + "_" + kernelName;
+ }
+
+ var long = Long$1;
+ /**
+ * wasm optimizations, to do native i64 multiplication and divide
+ */
+ var wasm = null;
+ try {
+ wasm = new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([
+ 0, 97, 115, 109, 1, 0, 0, 0, 1, 13, 2, 96, 0, 1, 127, 96, 4, 127, 127, 127, 127, 1, 127, 3, 7, 6, 0, 1, 1, 1, 1, 1, 6, 6, 1, 127, 1, 65, 0, 11, 7, 50, 6, 3, 109, 117, 108, 0, 1, 5, 100, 105, 118, 95, 115, 0, 2, 5, 100, 105, 118, 95, 117, 0, 3, 5, 114, 101, 109, 95, 115, 0, 4, 5, 114, 101, 109, 95, 117, 0, 5, 8, 103, 101, 116, 95, 104, 105, 103, 104, 0, 0, 10, 191, 1, 6, 4, 0, 35, 0, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 126, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 127, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 128, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 129, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 130, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11
+ ])), {}).exports;
+ }
+ catch (e) {
+ // no wasm support :(
+ }
+ /**
+ * Constructs a 64 bit two's-complement integer, given its low and high 32 bit values as *signed* integers.
+ * See the from* functions below for more convenient ways of constructing Longs.
+ * @exports Long
+ * @class A Long class for representing a 64 bit two's-complement integer value.
+ * @param {number} low The low (signed) 32 bits of the long
+ * @param {number} high The high (signed) 32 bits of the long
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @constructor
+ */
+ function Long$1(low, high, unsigned) {
+ /**
+ * The low 32 bits as a signed value.
+ * @type {number}
+ */
+ this.low = low | 0;
+ /**
+ * The high 32 bits as a signed value.
+ * @type {number}
+ */
+ this.high = high | 0;
+ /**
+ * Whether unsigned or not.
+ * @type {boolean}
+ */
+ this.unsigned = !!unsigned;
+ }
+ // The internal representation of a long is the two given signed, 32-bit values.
+ // We use 32-bit pieces because these are the size of integers on which
+ // Javascript performs bit-operations. For operations like addition and
+ // multiplication, we split each number into 16 bit pieces, which can easily be
+ // multiplied within Javascript's floating-point representation without overflow
+ // or change in sign.
+ //
+ // In the algorithms below, we frequently reduce the negative case to the
+ // positive case by negating the input(s) and then post-processing the result.
+ // Note that we must ALWAYS check specially whether those values are MIN_VALUE
+ // (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as
+ // a positive number, it overflows back into a negative). Not handling this
+ // case would often result in infinite recursion.
+ //
+ // Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the from*
+ // methods on which they depend.
+ /**
+ * An indicator used to reliably determine if an object is a Long or not.
+ * @type {boolean}
+ * @const
+ * @private
+ */
+ Long$1.prototype.__isLong__;
+ Object.defineProperty(Long$1.prototype, "__isLong__", { value: true });
+ /**
+ * @function
+ * @param {*} obj Object
+ * @returns {boolean}
+ * @inner
+ */
+ function isLong(obj) {
+ return (obj && obj["__isLong__"]) === true;
+ }
+ /**
+ * Tests if the specified object is a Long.
+ * @function
+ * @param {*} obj Object
+ * @returns {boolean}
+ */
+ Long$1.isLong = isLong;
+ /**
+ * A cache of the Long representations of small integer values.
+ * @type {!Object}
+ * @inner
+ */
+ var INT_CACHE = {};
+ /**
+ * A cache of the Long representations of small unsigned integer values.
+ * @type {!Object}
+ * @inner
+ */
+ var UINT_CACHE = {};
+ /**
+ * @param {number} value
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromInt(value, unsigned) {
+ var obj, cachedObj, cache;
+ if (unsigned) {
+ value >>>= 0;
+ if (cache = (0 <= value && value < 256)) {
+ cachedObj = UINT_CACHE[value];
+ if (cachedObj)
+ return cachedObj;
+ }
+ obj = fromBits(value, (value | 0) < 0 ? -1 : 0, true);
+ if (cache)
+ UINT_CACHE[value] = obj;
+ return obj;
+ }
+ else {
+ value |= 0;
+ if (cache = (-128 <= value && value < 128)) {
+ cachedObj = INT_CACHE[value];
+ if (cachedObj)
+ return cachedObj;
+ }
+ obj = fromBits(value, value < 0 ? -1 : 0, false);
+ if (cache)
+ INT_CACHE[value] = obj;
+ return obj;
+ }
+ }
+ /**
+ * Returns a Long representing the given 32 bit integer value.
+ * @function
+ * @param {number} value The 32 bit integer in question
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromInt = fromInt;
+ /**
+ * @param {number} value
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromNumber(value, unsigned) {
+ if (isNaN(value))
+ return unsigned ? UZERO : ZERO;
+ if (unsigned) {
+ if (value < 0)
+ return UZERO;
+ if (value >= TWO_PWR_64_DBL)
+ return MAX_UNSIGNED_VALUE;
+ }
+ else {
+ if (value <= -TWO_PWR_63_DBL)
+ return MIN_VALUE;
+ if (value + 1 >= TWO_PWR_63_DBL)
+ return MAX_VALUE;
+ }
+ if (value < 0)
+ return fromNumber(-value, unsigned).neg();
+ return fromBits((value % TWO_PWR_32_DBL) | 0, (value / TWO_PWR_32_DBL) | 0, unsigned);
+ }
+ /**
+ * Returns a Long representing the given value, provided that it is a finite number. Otherwise, zero is returned.
+ * @function
+ * @param {number} value The number in question
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromNumber = fromNumber;
+ /**
+ * @param {number} lowBits
+ * @param {number} highBits
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromBits(lowBits, highBits, unsigned) {
+ return new Long$1(lowBits, highBits, unsigned);
+ }
+ /**
+ * Returns a Long representing the 64 bit integer that comes by concatenating the given low and high bits. Each is
+ * assumed to use 32 bits.
+ * @function
+ * @param {number} lowBits The low 32 bits
+ * @param {number} highBits The high 32 bits
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromBits = fromBits;
+ /**
+ * @function
+ * @param {number} base
+ * @param {number} exponent
+ * @returns {number}
+ * @inner
+ */
+ var pow_dbl = Math.pow; // Used 4 times (4*8 to 15+4)
+ /**
+ * @param {string} str
+ * @param {(boolean|number)=} unsigned
+ * @param {number=} radix
+ * @returns {!Long}
+ * @inner
+ */
+ function fromString(str, unsigned, radix) {
+ if (str.length === 0)
+ throw Error('empty string');
+ if (str === "NaN" || str === "Infinity" || str === "+Infinity" || str === "-Infinity")
+ return ZERO;
+ if (typeof unsigned === 'number') {
+ // For goog.math.long compatibility
+ radix = unsigned,
+ unsigned = false;
+ }
+ else {
+ unsigned = !!unsigned;
+ }
+ radix = radix || 10;
+ if (radix < 2 || 36 < radix)
+ throw RangeError('radix');
+ var p;
+ if ((p = str.indexOf('-')) > 0)
+ throw Error('interior hyphen');
+ else if (p === 0) {
+ return fromString(str.substring(1), unsigned, radix).neg();
+ }
+ // Do several (8) digits each time through the loop, so as to
+ // minimize the calls to the very expensive emulated div.
+ var radixToPower = fromNumber(pow_dbl(radix, 8));
+ var result = ZERO;
+ for (var i = 0; i < str.length; i += 8) {
+ var size = Math.min(8, str.length - i), value = parseInt(str.substring(i, i + size), radix);
+ if (size < 8) {
+ var power = fromNumber(pow_dbl(radix, size));
+ result = result.mul(power).add(fromNumber(value));
+ }
+ else {
+ result = result.mul(radixToPower);
+ result = result.add(fromNumber(value));
+ }
+ }
+ result.unsigned = unsigned;
+ return result;
+ }
+ /**
+ * Returns a Long representation of the given string, written using the specified radix.
+ * @function
+ * @param {string} str The textual representation of the Long
+ * @param {(boolean|number)=} unsigned Whether unsigned or not, defaults to signed
+ * @param {number=} radix The radix in which the text is written (2-36), defaults to 10
+ * @returns {!Long} The corresponding Long value
+ */
+ Long$1.fromString = fromString;
+ /**
+ * @function
+ * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val
+ * @param {boolean=} unsigned
+ * @returns {!Long}
+ * @inner
+ */
+ function fromValue(val, unsigned) {
+ if (typeof val === 'number')
+ return fromNumber(val, unsigned);
+ if (typeof val === 'string')
+ return fromString(val, unsigned);
+ // Throws for non-objects, converts non-instanceof Long:
+ return fromBits(val.low, val.high, typeof unsigned === 'boolean' ? unsigned : val.unsigned);
+ }
+ /**
+ * Converts the specified value to a Long using the appropriate from* function for its type.
+ * @function
+ * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val Value
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {!Long}
+ */
+ Long$1.fromValue = fromValue;
+ // NOTE: the compiler should inline these constant values below and then remove these variables, so there should be
+ // no runtime penalty for these.
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_16_DBL = 1 << 16;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_24_DBL = 1 << 24;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL;
+ /**
+ * @type {number}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_63_DBL = TWO_PWR_64_DBL / 2;
+ /**
+ * @type {!Long}
+ * @const
+ * @inner
+ */
+ var TWO_PWR_24 = fromInt(TWO_PWR_24_DBL);
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var ZERO = fromInt(0);
+ /**
+ * Signed zero.
+ * @type {!Long}
+ */
+ Long$1.ZERO = ZERO;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var UZERO = fromInt(0, true);
+ /**
+ * Unsigned zero.
+ * @type {!Long}
+ */
+ Long$1.UZERO = UZERO;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var ONE = fromInt(1);
+ /**
+ * Signed one.
+ * @type {!Long}
+ */
+ Long$1.ONE = ONE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var UONE = fromInt(1, true);
+ /**
+ * Unsigned one.
+ * @type {!Long}
+ */
+ Long$1.UONE = UONE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var NEG_ONE = fromInt(-1);
+ /**
+ * Signed negative one.
+ * @type {!Long}
+ */
+ Long$1.NEG_ONE = NEG_ONE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var MAX_VALUE = fromBits(0xFFFFFFFF | 0, 0x7FFFFFFF | 0, false);
+ /**
+ * Maximum signed value.
+ * @type {!Long}
+ */
+ Long$1.MAX_VALUE = MAX_VALUE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var MAX_UNSIGNED_VALUE = fromBits(0xFFFFFFFF | 0, 0xFFFFFFFF | 0, true);
+ /**
+ * Maximum unsigned value.
+ * @type {!Long}
+ */
+ Long$1.MAX_UNSIGNED_VALUE = MAX_UNSIGNED_VALUE;
+ /**
+ * @type {!Long}
+ * @inner
+ */
+ var MIN_VALUE = fromBits(0, 0x80000000 | 0, false);
+ /**
+ * Minimum signed value.
+ * @type {!Long}
+ */
+ Long$1.MIN_VALUE = MIN_VALUE;
+ /**
+ * @alias Long.prototype
+ * @inner
+ */
+ var LongPrototype = Long$1.prototype;
+ /**
+ * Converts the Long to a 32 bit integer, assuming it is a 32 bit integer.
+ * @returns {number}
+ */
+ LongPrototype.toInt = function toInt() {
+ return this.unsigned ? this.low >>> 0 : this.low;
+ };
+ /**
+ * Converts the Long to a the nearest floating-point representation of this value (double, 53 bit mantissa).
+ * @returns {number}
+ */
+ LongPrototype.toNumber = function toNumber() {
+ if (this.unsigned)
+ return ((this.high >>> 0) * TWO_PWR_32_DBL) + (this.low >>> 0);
+ return this.high * TWO_PWR_32_DBL + (this.low >>> 0);
+ };
+ /**
+ * Converts the Long to a string written in the specified radix.
+ * @param {number=} radix Radix (2-36), defaults to 10
+ * @returns {string}
+ * @override
+ * @throws {RangeError} If `radix` is out of range
+ */
+ LongPrototype.toString = function toString(radix) {
+ radix = radix || 10;
+ if (radix < 2 || 36 < radix)
+ throw RangeError('radix');
+ if (this.isZero())
+ return '0';
+ if (this.isNegative()) { // Unsigned Longs are never negative
+ if (this.eq(MIN_VALUE)) {
+ // We need to change the Long value before it can be negated, so we remove
+ // the bottom-most digit in this base and then recurse to do the rest.
+ var radixLong = fromNumber(radix), div = this.div(radixLong), rem1 = div.mul(radixLong).sub(this);
+ return div.toString(radix) + rem1.toInt().toString(radix);
+ }
+ else
+ return '-' + this.neg().toString(radix);
+ }
+ // Do several (6) digits each time through the loop, so as to
+ // minimize the calls to the very expensive emulated div.
+ var radixToPower = fromNumber(pow_dbl(radix, 6), this.unsigned), rem = this;
+ var result = '';
+ while (true) {
+ var remDiv = rem.div(radixToPower), intval = rem.sub(remDiv.mul(radixToPower)).toInt() >>> 0, digits = intval.toString(radix);
+ rem = remDiv;
+ if (rem.isZero())
+ return digits + result;
+ else {
+ while (digits.length < 6)
+ digits = '0' + digits;
+ result = '' + digits + result;
+ }
+ }
+ };
+ /**
+ * Gets the high 32 bits as a signed integer.
+ * @returns {number} Signed high bits
+ */
+ LongPrototype.getHighBits = function getHighBits() {
+ return this.high;
+ };
+ /**
+ * Gets the high 32 bits as an unsigned integer.
+ * @returns {number} Unsigned high bits
+ */
+ LongPrototype.getHighBitsUnsigned = function getHighBitsUnsigned() {
+ return this.high >>> 0;
+ };
+ /**
+ * Gets the low 32 bits as a signed integer.
+ * @returns {number} Signed low bits
+ */
+ LongPrototype.getLowBits = function getLowBits() {
+ return this.low;
+ };
+ /**
+ * Gets the low 32 bits as an unsigned integer.
+ * @returns {number} Unsigned low bits
+ */
+ LongPrototype.getLowBitsUnsigned = function getLowBitsUnsigned() {
+ return this.low >>> 0;
+ };
+ /**
+ * Gets the number of bits needed to represent the absolute value of this Long.
+ * @returns {number}
+ */
+ LongPrototype.getNumBitsAbs = function getNumBitsAbs() {
+ if (this.isNegative()) // Unsigned Longs are never negative
+ return this.eq(MIN_VALUE) ? 64 : this.neg().getNumBitsAbs();
+ var val = this.high != 0 ? this.high : this.low;
+ for (var bit = 31; bit > 0; bit--)
+ if ((val & (1 << bit)) != 0)
+ break;
+ return this.high != 0 ? bit + 33 : bit + 1;
+ };
+ /**
+ * Tests if this Long's value equals zero.
+ * @returns {boolean}
+ */
+ LongPrototype.isZero = function isZero() {
+ return this.high === 0 && this.low === 0;
+ };
+ /**
+ * Tests if this Long's value equals zero. This is an alias of {@link Long#isZero}.
+ * @returns {boolean}
+ */
+ LongPrototype.eqz = LongPrototype.isZero;
+ /**
+ * Tests if this Long's value is negative.
+ * @returns {boolean}
+ */
+ LongPrototype.isNegative = function isNegative() {
+ return !this.unsigned && this.high < 0;
+ };
+ /**
+ * Tests if this Long's value is positive.
+ * @returns {boolean}
+ */
+ LongPrototype.isPositive = function isPositive() {
+ return this.unsigned || this.high >= 0;
+ };
+ /**
+ * Tests if this Long's value is odd.
+ * @returns {boolean}
+ */
+ LongPrototype.isOdd = function isOdd() {
+ return (this.low & 1) === 1;
+ };
+ /**
+ * Tests if this Long's value is even.
+ * @returns {boolean}
+ */
+ LongPrototype.isEven = function isEven() {
+ return (this.low & 1) === 0;
+ };
+ /**
+ * Tests if this Long's value equals the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.equals = function equals(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ if (this.unsigned !== other.unsigned && (this.high >>> 31) === 1 && (other.high >>> 31) === 1)
+ return false;
+ return this.high === other.high && this.low === other.low;
+ };
+ /**
+ * Tests if this Long's value equals the specified's. This is an alias of {@link Long#equals}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.eq = LongPrototype.equals;
+ /**
+ * Tests if this Long's value differs from the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.notEquals = function notEquals(other) {
+ return !this.eq(/* validates */ other);
+ };
+ /**
+ * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.neq = LongPrototype.notEquals;
+ /**
+ * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.ne = LongPrototype.notEquals;
+ /**
+ * Tests if this Long's value is less than the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lessThan = function lessThan(other) {
+ return this.comp(/* validates */ other) < 0;
+ };
+ /**
+ * Tests if this Long's value is less than the specified's. This is an alias of {@link Long#lessThan}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lt = LongPrototype.lessThan;
+ /**
+ * Tests if this Long's value is less than or equal the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lessThanOrEqual = function lessThanOrEqual(other) {
+ return this.comp(/* validates */ other) <= 0;
+ };
+ /**
+ * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.lte = LongPrototype.lessThanOrEqual;
+ /**
+ * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.le = LongPrototype.lessThanOrEqual;
+ /**
+ * Tests if this Long's value is greater than the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.greaterThan = function greaterThan(other) {
+ return this.comp(/* validates */ other) > 0;
+ };
+ /**
+ * Tests if this Long's value is greater than the specified's. This is an alias of {@link Long#greaterThan}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.gt = LongPrototype.greaterThan;
+ /**
+ * Tests if this Long's value is greater than or equal the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.greaterThanOrEqual = function greaterThanOrEqual(other) {
+ return this.comp(/* validates */ other) >= 0;
+ };
+ /**
+ * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.gte = LongPrototype.greaterThanOrEqual;
+ /**
+ * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {boolean}
+ */
+ LongPrototype.ge = LongPrototype.greaterThanOrEqual;
+ /**
+ * Compares this Long's value with the specified's.
+ * @param {!Long|number|string} other Other value
+ * @returns {number} 0 if they are the same, 1 if the this is greater and -1
+ * if the given one is greater
+ */
+ LongPrototype.compare = function compare(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ if (this.eq(other))
+ return 0;
+ var thisNeg = this.isNegative(), otherNeg = other.isNegative();
+ if (thisNeg && !otherNeg)
+ return -1;
+ if (!thisNeg && otherNeg)
+ return 1;
+ // At this point the sign bits are the same
+ if (!this.unsigned)
+ return this.sub(other).isNegative() ? -1 : 1;
+ // Both are positive if at least one is unsigned
+ return (other.high >>> 0) > (this.high >>> 0) || (other.high === this.high && (other.low >>> 0) > (this.low >>> 0)) ? -1 : 1;
+ };
+ /**
+ * Compares this Long's value with the specified's. This is an alias of {@link Long#compare}.
+ * @function
+ * @param {!Long|number|string} other Other value
+ * @returns {number} 0 if they are the same, 1 if the this is greater and -1
+ * if the given one is greater
+ */
+ LongPrototype.comp = LongPrototype.compare;
+ /**
+ * Negates this Long's value.
+ * @returns {!Long} Negated Long
+ */
+ LongPrototype.negate = function negate() {
+ if (!this.unsigned && this.eq(MIN_VALUE))
+ return MIN_VALUE;
+ return this.not().add(ONE);
+ };
+ /**
+ * Negates this Long's value. This is an alias of {@link Long#negate}.
+ * @function
+ * @returns {!Long} Negated Long
+ */
+ LongPrototype.neg = LongPrototype.negate;
+ /**
+ * Returns the sum of this and the specified Long.
+ * @param {!Long|number|string} addend Addend
+ * @returns {!Long} Sum
+ */
+ LongPrototype.add = function add(addend) {
+ if (!isLong(addend))
+ addend = fromValue(addend);
+ // Divide each number into 4 chunks of 16 bits, and then sum the chunks.
+ var a48 = this.high >>> 16;
+ var a32 = this.high & 0xFFFF;
+ var a16 = this.low >>> 16;
+ var a00 = this.low & 0xFFFF;
+ var b48 = addend.high >>> 16;
+ var b32 = addend.high & 0xFFFF;
+ var b16 = addend.low >>> 16;
+ var b00 = addend.low & 0xFFFF;
+ var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
+ c00 += a00 + b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 + b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 + b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 + b48;
+ c48 &= 0xFFFF;
+ return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned);
+ };
+ /**
+ * Returns the difference of this and the specified Long.
+ * @param {!Long|number|string} subtrahend Subtrahend
+ * @returns {!Long} Difference
+ */
+ LongPrototype.subtract = function subtract(subtrahend) {
+ if (!isLong(subtrahend))
+ subtrahend = fromValue(subtrahend);
+ return this.add(subtrahend.neg());
+ };
+ /**
+ * Returns the difference of this and the specified Long. This is an alias of {@link Long#subtract}.
+ * @function
+ * @param {!Long|number|string} subtrahend Subtrahend
+ * @returns {!Long} Difference
+ */
+ LongPrototype.sub = LongPrototype.subtract;
+ /**
+ * Returns the product of this and the specified Long.
+ * @param {!Long|number|string} multiplier Multiplier
+ * @returns {!Long} Product
+ */
+ LongPrototype.multiply = function multiply(multiplier) {
+ if (this.isZero())
+ return ZERO;
+ if (!isLong(multiplier))
+ multiplier = fromValue(multiplier);
+ // use wasm support if present
+ if (wasm) {
+ var low = wasm.mul(this.low, this.high, multiplier.low, multiplier.high);
+ return fromBits(low, wasm.get_high(), this.unsigned);
+ }
+ if (multiplier.isZero())
+ return ZERO;
+ if (this.eq(MIN_VALUE))
+ return multiplier.isOdd() ? MIN_VALUE : ZERO;
+ if (multiplier.eq(MIN_VALUE))
+ return this.isOdd() ? MIN_VALUE : ZERO;
+ if (this.isNegative()) {
+ if (multiplier.isNegative())
+ return this.neg().mul(multiplier.neg());
+ else
+ return this.neg().mul(multiplier).neg();
+ }
+ else if (multiplier.isNegative())
+ return this.mul(multiplier.neg()).neg();
+ // If both longs are small, use float multiplication
+ if (this.lt(TWO_PWR_24) && multiplier.lt(TWO_PWR_24))
+ return fromNumber(this.toNumber() * multiplier.toNumber(), this.unsigned);
+ // Divide each long into 4 chunks of 16 bits, and then add up 4x4 products.
+ // We can skip products that would overflow.
+ var a48 = this.high >>> 16;
+ var a32 = this.high & 0xFFFF;
+ var a16 = this.low >>> 16;
+ var a00 = this.low & 0xFFFF;
+ var b48 = multiplier.high >>> 16;
+ var b32 = multiplier.high & 0xFFFF;
+ var b16 = multiplier.low >>> 16;
+ var b00 = multiplier.low & 0xFFFF;
+ var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
+ c00 += a00 * b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 * b00;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c16 += a00 * b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 * b00;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a16 * b16;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a00 * b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
+ c48 &= 0xFFFF;
+ return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned);
+ };
+ /**
+ * Returns the product of this and the specified Long. This is an alias of {@link Long#multiply}.
+ * @function
+ * @param {!Long|number|string} multiplier Multiplier
+ * @returns {!Long} Product
+ */
+ LongPrototype.mul = LongPrototype.multiply;
+ /**
+ * Returns this Long divided by the specified. The result is signed if this Long is signed or
+ * unsigned if this Long is unsigned.
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Quotient
+ */
+ LongPrototype.divide = function divide(divisor) {
+ if (!isLong(divisor))
+ divisor = fromValue(divisor);
+ if (divisor.isZero())
+ throw Error('division by zero');
+ // use wasm support if present
+ if (wasm) {
+ // guard against signed division overflow: the largest
+ // negative number / -1 would be 1 larger than the largest
+ // positive number, due to two's complement.
+ if (!this.unsigned &&
+ this.high === -0x80000000 &&
+ divisor.low === -1 && divisor.high === -1) {
+ // be consistent with non-wasm code path
+ return this;
+ }
+ var low = (this.unsigned ? wasm.div_u : wasm.div_s)(this.low, this.high, divisor.low, divisor.high);
+ return fromBits(low, wasm.get_high(), this.unsigned);
+ }
+ if (this.isZero())
+ return this.unsigned ? UZERO : ZERO;
+ var approx, rem, res;
+ if (!this.unsigned) {
+ // This section is only relevant for signed longs and is derived from the
+ // closure library as a whole.
+ if (this.eq(MIN_VALUE)) {
+ if (divisor.eq(ONE) || divisor.eq(NEG_ONE))
+ return MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE
+ else if (divisor.eq(MIN_VALUE))
+ return ONE;
+ else {
+ // At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.
+ var halfThis = this.shr(1);
+ approx = halfThis.div(divisor).shl(1);
+ if (approx.eq(ZERO)) {
+ return divisor.isNegative() ? ONE : NEG_ONE;
+ }
+ else {
+ rem = this.sub(divisor.mul(approx));
+ res = approx.add(rem.div(divisor));
+ return res;
+ }
+ }
+ }
+ else if (divisor.eq(MIN_VALUE))
+ return this.unsigned ? UZERO : ZERO;
+ if (this.isNegative()) {
+ if (divisor.isNegative())
+ return this.neg().div(divisor.neg());
+ return this.neg().div(divisor).neg();
+ }
+ else if (divisor.isNegative())
+ return this.div(divisor.neg()).neg();
+ res = ZERO;
+ }
+ else {
+ // The algorithm below has not been made for unsigned longs. It's therefore
+ // required to take special care of the MSB prior to running it.
+ if (!divisor.unsigned)
+ divisor = divisor.toUnsigned();
+ if (divisor.gt(this))
+ return UZERO;
+ if (divisor.gt(this.shru(1))) // 15 >>> 1 = 7 ; with divisor = 8 ; true
+ return UONE;
+ res = UZERO;
+ }
+ // Repeat the following until the remainder is less than other: find a
+ // floating-point that approximates remainder / other *from below*, add this
+ // into the result, and subtract it from the remainder. It is critical that
+ // the approximate value is less than or equal to the real value so that the
+ // remainder never becomes negative.
+ rem = this;
+ while (rem.gte(divisor)) {
+ // Approximate the result of division. This may be a little greater or
+ // smaller than the actual value.
+ approx = Math.max(1, Math.floor(rem.toNumber() / divisor.toNumber()));
+ // We will tweak the approximate result by changing it in the 48-th digit or
+ // the smallest non-fractional digit, whichever is larger.
+ var log2 = Math.ceil(Math.log(approx) / Math.LN2), delta = (log2 <= 48) ? 1 : pow_dbl(2, log2 - 48),
+ // Decrease the approximation until it is smaller than the remainder. Note
+ // that if it is too large, the product overflows and is negative.
+ approxRes = fromNumber(approx), approxRem = approxRes.mul(divisor);
+ while (approxRem.isNegative() || approxRem.gt(rem)) {
+ approx -= delta;
+ approxRes = fromNumber(approx, this.unsigned);
+ approxRem = approxRes.mul(divisor);
+ }
+ // We know the answer can't be zero... and actually, zero would cause
+ // infinite recursion since we would make no progress.
+ if (approxRes.isZero())
+ approxRes = ONE;
+ res = res.add(approxRes);
+ rem = rem.sub(approxRem);
+ }
+ return res;
+ };
+ /**
+ * Returns this Long divided by the specified. This is an alias of {@link Long#divide}.
+ * @function
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Quotient
+ */
+ LongPrototype.div = LongPrototype.divide;
+ /**
+ * Returns this Long modulo the specified.
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Remainder
+ */
+ LongPrototype.modulo = function modulo(divisor) {
+ if (!isLong(divisor))
+ divisor = fromValue(divisor);
+ // use wasm support if present
+ if (wasm) {
+ var low = (this.unsigned ? wasm.rem_u : wasm.rem_s)(this.low, this.high, divisor.low, divisor.high);
+ return fromBits(low, wasm.get_high(), this.unsigned);
+ }
+ return this.sub(this.div(divisor).mul(divisor));
+ };
+ /**
+ * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}.
+ * @function
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Remainder
+ */
+ LongPrototype.mod = LongPrototype.modulo;
+ /**
+ * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}.
+ * @function
+ * @param {!Long|number|string} divisor Divisor
+ * @returns {!Long} Remainder
+ */
+ LongPrototype.rem = LongPrototype.modulo;
+ /**
+ * Returns the bitwise NOT of this Long.
+ * @returns {!Long}
+ */
+ LongPrototype.not = function not() {
+ return fromBits(~this.low, ~this.high, this.unsigned);
+ };
+ /**
+ * Returns the bitwise AND of this Long and the specified.
+ * @param {!Long|number|string} other Other Long
+ * @returns {!Long}
+ */
+ LongPrototype.and = function and(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ return fromBits(this.low & other.low, this.high & other.high, this.unsigned);
+ };
+ /**
+ * Returns the bitwise OR of this Long and the specified.
+ * @param {!Long|number|string} other Other Long
+ * @returns {!Long}
+ */
+ LongPrototype.or = function or(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ return fromBits(this.low | other.low, this.high | other.high, this.unsigned);
+ };
+ /**
+ * Returns the bitwise XOR of this Long and the given one.
+ * @param {!Long|number|string} other Other Long
+ * @returns {!Long}
+ */
+ LongPrototype.xor = function xor(other) {
+ if (!isLong(other))
+ other = fromValue(other);
+ return fromBits(this.low ^ other.low, this.high ^ other.high, this.unsigned);
+ };
+ /**
+ * Returns this Long with bits shifted to the left by the given amount.
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shiftLeft = function shiftLeft(numBits) {
+ if (isLong(numBits))
+ numBits = numBits.toInt();
+ if ((numBits &= 63) === 0)
+ return this;
+ else if (numBits < 32)
+ return fromBits(this.low << numBits, (this.high << numBits) | (this.low >>> (32 - numBits)), this.unsigned);
+ else
+ return fromBits(0, this.low << (numBits - 32), this.unsigned);
+ };
+ /**
+ * Returns this Long with bits shifted to the left by the given amount. This is an alias of {@link Long#shiftLeft}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shl = LongPrototype.shiftLeft;
+ /**
+ * Returns this Long with bits arithmetically shifted to the right by the given amount.
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shiftRight = function shiftRight(numBits) {
+ if (isLong(numBits))
+ numBits = numBits.toInt();
+ if ((numBits &= 63) === 0)
+ return this;
+ else if (numBits < 32)
+ return fromBits((this.low >>> numBits) | (this.high << (32 - numBits)), this.high >> numBits, this.unsigned);
+ else
+ return fromBits(this.high >> (numBits - 32), this.high >= 0 ? 0 : -1, this.unsigned);
+ };
+ /**
+ * Returns this Long with bits arithmetically shifted to the right by the given amount. This is an alias of {@link Long#shiftRight}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shr = LongPrototype.shiftRight;
+ /**
+ * Returns this Long with bits logically shifted to the right by the given amount.
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shiftRightUnsigned = function shiftRightUnsigned(numBits) {
+ if (isLong(numBits))
+ numBits = numBits.toInt();
+ numBits &= 63;
+ if (numBits === 0)
+ return this;
+ else {
+ var high = this.high;
+ if (numBits < 32) {
+ var low = this.low;
+ return fromBits((low >>> numBits) | (high << (32 - numBits)), high >>> numBits, this.unsigned);
+ }
+ else if (numBits === 32)
+ return fromBits(high, 0, this.unsigned);
+ else
+ return fromBits(high >>> (numBits - 32), 0, this.unsigned);
+ }
+ };
+ /**
+ * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shru = LongPrototype.shiftRightUnsigned;
+ /**
+ * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}.
+ * @function
+ * @param {number|!Long} numBits Number of bits
+ * @returns {!Long} Shifted Long
+ */
+ LongPrototype.shr_u = LongPrototype.shiftRightUnsigned;
+ /**
+ * Converts this Long to signed.
+ * @returns {!Long} Signed long
+ */
+ LongPrototype.toSigned = function toSigned() {
+ if (!this.unsigned)
+ return this;
+ return fromBits(this.low, this.high, false);
+ };
+ /**
+ * Converts this Long to unsigned.
+ * @returns {!Long} Unsigned long
+ */
+ LongPrototype.toUnsigned = function toUnsigned() {
+ if (this.unsigned)
+ return this;
+ return fromBits(this.low, this.high, true);
+ };
+ /**
+ * Converts this Long to its byte representation.
+ * @param {boolean=} le Whether little or big endian, defaults to big endian
+ * @returns {!Array.<number>} Byte representation
+ */
+ LongPrototype.toBytes = function toBytes(le) {
+ return le ? this.toBytesLE() : this.toBytesBE();
+ };
+ /**
+ * Converts this Long to its little endian byte representation.
+ * @returns {!Array.<number>} Little endian byte representation
+ */
+ LongPrototype.toBytesLE = function toBytesLE() {
+ var hi = this.high, lo = this.low;
+ return [
+ lo & 0xff,
+ lo >>> 8 & 0xff,
+ lo >>> 16 & 0xff,
+ lo >>> 24,
+ hi & 0xff,
+ hi >>> 8 & 0xff,
+ hi >>> 16 & 0xff,
+ hi >>> 24
+ ];
+ };
+ /**
+ * Converts this Long to its big endian byte representation.
+ * @returns {!Array.<number>} Big endian byte representation
+ */
+ LongPrototype.toBytesBE = function toBytesBE() {
+ var hi = this.high, lo = this.low;
+ return [
+ hi >>> 24,
+ hi >>> 16 & 0xff,
+ hi >>> 8 & 0xff,
+ hi & 0xff,
+ lo >>> 24,
+ lo >>> 16 & 0xff,
+ lo >>> 8 & 0xff,
+ lo & 0xff
+ ];
+ };
+ /**
+ * Creates a Long from its byte representation.
+ * @param {!Array.<number>} bytes Byte representation
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @param {boolean=} le Whether little or big endian, defaults to big endian
+ * @returns {Long} The corresponding Long value
+ */
+ Long$1.fromBytes = function fromBytes(bytes, unsigned, le) {
+ return le ? Long$1.fromBytesLE(bytes, unsigned) : Long$1.fromBytesBE(bytes, unsigned);
+ };
+ /**
+ * Creates a Long from its little endian byte representation.
+ * @param {!Array.<number>} bytes Little endian byte representation
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {Long} The corresponding Long value
+ */
+ Long$1.fromBytesLE = function fromBytesLE(bytes, unsigned) {
+ return new Long$1(bytes[0] |
+ bytes[1] << 8 |
+ bytes[2] << 16 |
+ bytes[3] << 24, bytes[4] |
+ bytes[5] << 8 |
+ bytes[6] << 16 |
+ bytes[7] << 24, unsigned);
+ };
+ /**
+ * Creates a Long from its big endian byte representation.
+ * @param {!Array.<number>} bytes Big endian byte representation
+ * @param {boolean=} unsigned Whether unsigned or not, defaults to signed
+ * @returns {Long} The corresponding Long value
+ */
+ Long$1.fromBytesBE = function fromBytesBE(bytes, unsigned) {
+ return new Long$1(bytes[4] << 24 |
+ bytes[5] << 16 |
+ bytes[6] << 8 |
+ bytes[7], bytes[0] << 24 |
+ bytes[1] << 16 |
+ bytes[2] << 8 |
+ bytes[3], unsigned);
+ };
+
+ var LongExports = /*#__PURE__*/Object.assign(/*#__PURE__*/Object.create(null), long, {
+ 'default': long
+ });
+
+ // tslint:disable-next-line
+ var Long =
+ // tslint:disable-next-line
+ long || LongExports;
+ function hexToLong(hex) {
+ return Long.fromString(hex, true, 16);
+ }
+ // Some primes between 2^63 and 2^64 for various uses.
+ // Hex 0xc3a5c85c97cb3127
+ var k0 = hexToLong('c3a5c85c97cb3127');
+ // Hex 0xb492b66fbe98f273
+ var k1 = hexToLong('b492b66fbe98f273');
+ // Hex 0x9ae16a3b2f90404f
+ var k2 = hexToLong('9ae16a3b2f90404f');
+ function shiftMix(val) {
+ return val.xor(val.shru(47));
+ }
+ function fetch$2(s, offset, numBytes) {
+ var bytes = s.slice(offset, offset + numBytes);
+ return Long.fromBytes(Array.from(bytes), true, true);
+ }
+ function fetch64(s, offset) {
+ return fetch$2(s, offset, 8);
+ }
+ function fetch32(s, offset) {
+ return fetch$2(s, offset, 4);
+ }
+ function rotate64(val, shift) {
+ // Avoid shifting by 64: doing so yields an undefined result.
+ return shift === 0 ? val : val.shru(shift).or(val.shl(64 - shift));
+ }
+ function hashLen16(u, v, mul) {
+ if (mul === void 0) { mul = hexToLong('9ddfea08eb382d69'); }
+ // Murmur-inspired hashing.
+ var a = u.xor(v).mul(mul);
+ a = a.xor(a.shru(47));
+ var b = v.xor(a).mul(mul);
+ b = b.xor(b.shru(47));
+ b = b.mul(mul);
+ return b;
+ }
+ // Return a 16-byte hash for 48 bytes. Quick and dirty.
+ // Callers do best to use "random-looking" values for a and b.
+ function weakHashLen32WithSeeds(w, x, y, z, a, b) {
+ a = a.add(w);
+ b = rotate64(b.add(a).add(z), 21);
+ var c = a;
+ a = a.add(x);
+ a = a.add(y);
+ b = b.add(rotate64(a, 44));
+ return [a.add(z), b.add(c)];
+ }
+ function weakHashLen32WithSeedsStr(s, offset, a, b) {
+ return weakHashLen32WithSeeds(fetch64(s, offset), fetch64(s, offset + 8), fetch64(s, offset + 16), fetch64(s, offset + 24), a, b);
+ }
+ function hashLen0to16(s, len) {
+ if (len === void 0) { len = s.length; }
+ if (len >= 8) {
+ var mul = k2.add(len * 2);
+ var a = fetch64(s, 0).add(k2);
+ var b = fetch64(s, len - 8);
+ var c = rotate64(b, 37).mul(mul).add(a);
+ var d = rotate64(a, 25).add(b).mul(mul);
+ return hashLen16(c, d, mul);
+ }
+ if (len >= 4) {
+ var mul = k2.add(len * 2);
+ var a = fetch32(s, 0);
+ return hashLen16(a.shl(3).add(len), fetch32(s, len - 4), mul);
+ }
+ if (len > 0) {
+ var a = s[0];
+ var b = s[len >> 1];
+ var c = s[len - 1];
+ var y = a + (b << 8);
+ var z = len + (c << 2);
+ return shiftMix(k2.mul(y).xor(k0.mul(z))).mul(k2);
+ }
+ return k2;
+ }
+ function hashLen17to32(s, len) {
+ if (len === void 0) { len = s.length; }
+ var mul = k2.add(len * 2);
+ var a = fetch64(s, 0).mul(k1);
+ var b = fetch64(s, 8);
+ var c = fetch64(s, len - 8).mul(mul);
+ var d = fetch64(s, len - 16).mul(k2);
+ return hashLen16(rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d), a.add(rotate64(b.add(k2), 18)).add(c), mul);
+ }
+ function hashLen33to64(s, len) {
+ if (len === void 0) { len = s.length; }
+ var mul = k2.add(len * 2);
+ var a = fetch64(s, 0).mul(k2);
+ var b = fetch64(s, 8);
+ var c = fetch64(s, len - 8).mul(mul);
+ var d = fetch64(s, len - 16).mul(k2);
+ var y = rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d);
+ var z = hashLen16(y, a.add(rotate64(b.add(k2), 18)).add(c), mul);
+ var e = fetch64(s, 16).mul(mul);
+ var f = fetch64(s, 24);
+ var g = y.add(fetch64(s, len - 32)).mul(mul);
+ var h = z.add(fetch64(s, len - 24)).mul(mul);
+ return hashLen16(rotate64(e.add(f), 43).add(rotate64(g, 30)).add(h), e.add(rotate64(f.add(a), 18)).add(g), mul);
+ }
+ function fingerPrint64(s, len) {
+ var _a, _b;
+ if (len === void 0) { len = s.length; }
+ var seed = Long.fromNumber(81, true);
+ if (len <= 32) {
+ if (len <= 16) {
+ return hashLen0to16(s, len);
+ }
+ else {
+ return hashLen17to32(s, len);
+ }
+ }
+ else if (len <= 64) {
+ return hashLen33to64(s, len);
+ }
+ // For strings over 64 bytes we loop. Internal state consists of
+ // 56 bytes: v, w, x, y, and z.
+ var x = seed;
+ var y = seed.mul(k1).add(113);
+ var z = shiftMix(y.mul(k2).add(113)).mul(k2);
+ var v = [Long.UZERO, Long.UZERO];
+ var w = [Long.UZERO, Long.UZERO];
+ x = x.mul(k2).add(fetch64(s, 0));
+ var offset = 0;
+ // Set end so that after the loop we have 1 to 64 bytes left to process.
+ var end = ((len - 1) >> 6) * 64;
+ var last64 = end + ((len - 1) & 63) - 63;
+ do {
+ x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(k1);
+ y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(k1);
+ x = x.xor(w[1]);
+ y = y.add(v[0]).add(fetch64(s, offset + 40));
+ z = rotate64(z.add(w[0]), 33).mul(k1);
+ v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(k1), x.add(w[0]));
+ w = weakHashLen32WithSeedsStr(s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16)));
+ _a = __read([x, z], 2), z = _a[0], x = _a[1];
+ offset += 64;
+ } while (offset !== end);
+ var mul = k1.add(z.and(0xff).shl(1));
+ // Point to the last 64 bytes of input.
+ offset = last64;
+ w[0] = w[0].add((len - 1) & 63);
+ v[0] = v[0].add(w[0]);
+ w[0] = w[0].add(v[0]);
+ x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(mul);
+ y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(mul);
+ x = x.xor(w[1].mul(9));
+ y = y.add(v[0].mul(9).add(fetch64(s, offset + 40)));
+ z = rotate64(z.add(w[0]), 33).mul(mul);
+ v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(mul), x.add(w[0]));
+ w = weakHashLen32WithSeedsStr(s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16)));
+ _b = __read([x, z], 2), z = _b[0], x = _b[1];
+ return hashLen16(hashLen16(v[0], w[0], mul).add(shiftMix(y).mul(k0)).add(z), hashLen16(v[1], w[1], mul).add(x), mul);
+ }
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Create typed array for scalar value. Used for storing in `DataStorage`.
+ */
+ function createScalarValue(value, dtype) {
+ if (dtype === 'string') {
+ return encodeString(value);
+ }
+ return toTypedArray([value], dtype);
+ }
+ function noConversionNeeded(a, dtype) {
+ return (a instanceof Float32Array && dtype === 'float32') ||
+ (a instanceof Int32Array && dtype === 'int32') ||
+ (a instanceof Uint8Array && dtype === 'bool');
+ }
+ function toTypedArray(a, dtype) {
+ if (dtype === 'string') {
+ throw new Error('Cannot convert a string[] to a TypedArray');
+ }
+ if (Array.isArray(a)) {
+ a = flatten(a);
+ }
+ if (env().getBool('DEBUG')) {
+ checkConversionForErrors(a, dtype);
+ }
+ if (noConversionNeeded(a, dtype)) {
+ return a;
+ }
+ if (dtype == null || dtype === 'float32' || dtype === 'complex64') {
+ return new Float32Array(a);
+ }
+ else if (dtype === 'int32') {
+ return new Int32Array(a);
+ }
+ else if (dtype === 'bool') {
+ var bool = new Uint8Array(a.length);
+ for (var i = 0; i < bool.length; ++i) {
+ if (Math.round(a[i]) !== 0) {
+ bool[i] = 1;
+ }
+ }
+ return bool;
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ }
+ /**
+ * Returns the current high-resolution time in milliseconds relative to an
+ * arbitrary time in the past. It works across different platforms (node.js,
+ * browsers).
+ *
+ * ```js
+ * console.log(tf.util.now());
+ * ```
+ *
+ * @doc {heading: 'Util', namespace: 'util'}
+ */
+ function now() {
+ return env().platform.now();
+ }
+ /**
+ * Returns a platform-specific implementation of
+ * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).
+ *
+ * If `fetch` is defined on the global object (`window`, `process`, etc.),
+ * `tf.util.fetch` returns that function.
+ *
+ * If not, `tf.util.fetch` returns a platform-specific solution.
+ *
+ * ```js
+ * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');
+ * // handle response
+ * ```
+ *
+ * @doc {heading: 'Util'}
+ */
+ function fetch$1(path, requestInits) {
+ return env().platform.fetch(path, requestInits);
+ }
+ /**
+ * Encodes the provided string into bytes using the provided encoding scheme.
+ *
+ * @param s The string to encode.
+ * @param encoding The encoding scheme. Defaults to utf-8.
+ *
+ * @doc {heading: 'Util'}
+ */
+ function encodeString(s, encoding) {
+ if (encoding === void 0) { encoding = 'utf-8'; }
+ encoding = encoding || 'utf-8';
+ return env().platform.encode(s, encoding);
+ }
+ /**
+ * Decodes the provided bytes into a string using the provided encoding scheme.
+ * @param bytes The bytes to decode.
+ *
+ * @param encoding The encoding scheme. Defaults to utf-8.
+ *
+ * @doc {heading: 'Util'}
+ */
+ function decodeString(bytes, encoding) {
+ if (encoding === void 0) { encoding = 'utf-8'; }
+ encoding = encoding || 'utf-8';
+ return env().platform.decode(bytes, encoding);
+ }
+
+ var util = {
+ __proto__: null,
+ createScalarValue: createScalarValue,
+ toTypedArray: toTypedArray,
+ now: now,
+ fetch: fetch$1,
+ encodeString: encodeString,
+ decodeString: decodeString,
+ shuffle: shuffle,
+ shuffleCombo: shuffleCombo,
+ clamp: clamp,
+ nearestLargerEven: nearestLargerEven,
+ swap: swap,
+ sum: sum$1,
+ randUniform: randUniform,
+ distSquared: distSquared,
+ assert: assert,
+ assertShapesMatch: assertShapesMatch,
+ assertNonNull: assertNonNull,
+ flatten: flatten,
+ sizeFromShape: sizeFromShape,
+ isScalarShape: isScalarShape,
+ arraysEqual: arraysEqual,
+ isInt: isInt,
+ tanh: tanh$1,
+ sizeToSquarishShape: sizeToSquarishShape,
+ createShuffledIndices: createShuffledIndices,
+ rightPad: rightPad,
+ repeatedTry: repeatedTry,
+ inferFromImplicitShape: inferFromImplicitShape,
+ parseAxisParam: parseAxisParam,
+ squeezeShape: squeezeShape,
+ getTypedArrayFromDType: getTypedArrayFromDType,
+ getArrayFromDType: getArrayFromDType,
+ checkConversionForErrors: checkConversionForErrors,
+ isValidDtype: isValidDtype,
+ hasEncodingLoss: hasEncodingLoss,
+ isTypedArray: isTypedArray,
+ bytesPerElement: bytesPerElement,
+ bytesFromStringArray: bytesFromStringArray,
+ isString: isString,
+ isBoolean: isBoolean,
+ isNumber: isNumber,
+ inferDtype: inferDtype,
+ isFunction: isFunction,
+ nearestDivisor: nearestDivisor,
+ computeStrides: computeStrides,
+ toNestedArray: toNestedArray,
+ makeOnesTypedArray: makeOnesTypedArray,
+ makeZerosTypedArray: makeZerosTypedArray,
+ makeZerosNestedTypedArray: makeZerosNestedTypedArray,
+ assertNonNegativeIntegerDimensions: assertNonNegativeIntegerDimensions,
+ locToIndex: locToIndex,
+ indexToLoc: indexToLoc,
+ isPromise: isPromise,
+ hexToLong: hexToLong,
+ fingerPrint64: fingerPrint64
+ };
+
+ var Profiler = /** @class */ (function () {
+ function Profiler(backendTimer, logger) {
+ this.backendTimer = backendTimer;
+ this.logger = logger;
+ if (logger == null) {
+ this.logger = new Logger();
+ }
+ }
+ Profiler.prototype.profileKernel = function (kernelName, inputs, f) {
+ var e_1, _a;
+ var outputs;
+ var holdResultWrapperFn = function () {
+ outputs = f();
+ };
+ var timer;
+ var start = now();
+ if (this.backendTimer.timerAvailable()) {
+ timer = this.backendTimer.time(holdResultWrapperFn);
+ }
+ else {
+ holdResultWrapperFn();
+ try {
+ for (var outputs_1 = __values(outputs), outputs_1_1 = outputs_1.next(); !outputs_1_1.done; outputs_1_1 = outputs_1.next()) {
+ var output = outputs_1_1.value;
+ output.dataSync();
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (outputs_1_1 && !outputs_1_1.done && (_a = outputs_1.return)) _a.call(outputs_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ timer = Promise.resolve({ kernelMs: now() - start });
+ }
+ if (env().getBool('CHECK_COMPUTATION_FOR_ERRORS')) {
+ var _loop_1 = function (i) {
+ var output = outputs[i];
+ // Dangling promise here because we don't want to propagate up
+ // asynchronicity.
+ output.data().then(function (tensorVals) {
+ checkComputationForErrors(tensorVals, output.dtype, kernelName);
+ });
+ };
+ for (var i = 0; i < outputs.length; i++) {
+ _loop_1(i);
+ }
+ }
+ var kernelProfile = {
+ kernelName: kernelName,
+ outputs: outputs,
+ inputs: inputs,
+ timeMs: timer.then(function (timing) { return timing.kernelMs; }),
+ extraInfo: timer.then(function (timing) { return timing.getExtraProfileInfo != null ?
+ timing.getExtraProfileInfo() :
+ ''; })
+ };
+ return kernelProfile;
+ };
+ Profiler.prototype.logKernelProfile = function (kernelProfile) {
+ var _this = this;
+ var kernelName = kernelProfile.kernelName, outputs = kernelProfile.outputs, timeMs = kernelProfile.timeMs, inputs = kernelProfile.inputs, extraInfo = kernelProfile.extraInfo;
+ outputs.forEach(function (result) {
+ Promise.all([result.data(), timeMs, extraInfo]).then(function (valueContainer) {
+ _this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);
+ });
+ });
+ };
+ return Profiler;
+ }());
+ function checkComputationForErrors(vals, dtype, kernelName) {
+ if (dtype !== 'float32') {
+ // Only floating point computations will generate NaN values
+ return false;
+ }
+ for (var i = 0; i < vals.length; i++) {
+ var num = vals[i];
+ if (isNaN(num) || !isFinite(num)) {
+ // Throwing custom exception so behavior is testable.
+ console.warn("Found " + num + " in the result of '" + kernelName + "'");
+ return true;
+ }
+ }
+ return false;
+ }
+ var Logger = /** @class */ (function () {
+ function Logger() {
+ }
+ Logger.prototype.logKernelProfile = function (name, result, vals, timeMs, inputs, extraInfo) {
+ var time = typeof timeMs === 'number' ? rightPad(timeMs + "ms", 9) :
+ timeMs['error'];
+ var paddedName = rightPad(name, 25);
+ var rank = result.rank;
+ var size = result.size;
+ var shape = rightPad(result.shape.toString(), 14);
+ var inputShapesDescription = '';
+ for (var name_1 in inputs) {
+ var input = inputs[name_1];
+ if (input != null) {
+ // The input might be a non-tensor (e.g HTMLImageElement), in which case
+ // we claim the output shape as input shape.
+ var inputShape = input.shape || result.shape;
+ var inputRank = inputShape.length;
+ inputShapesDescription +=
+ name_1 + ": " + inputRank + "D " + (inputRank > 0 ? inputShape : '') + " ";
+ }
+ }
+ console.log("%c" + paddedName + "\t%c" + time + "\t%c" + rank + "D " + shape + "\t%c" + size + "\t%c" + inputShapesDescription + "\t%c" + extraInfo, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');
+ };
+ return Logger;
+ }());
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes a list of TapeNodes that connect x to y, filtering everything else
+ * out and preserving the order of the original tape elements.
+ *
+ * @param tape The tape elements to filter.
+ * @param xs The input Tensors.
+ * @param y The output Tensor.
+ */
+ function getFilteredNodesXToY(tape, xs, y) {
+ // Forward pass to compute all the nodes and Tensors that are transitively a
+ // function of x.
+ var tensorsFromX = {};
+ var nodesFromX = {};
+ for (var i = 0; i < xs.length; i++) {
+ tensorsFromX[xs[i].id] = true;
+ }
+ for (var i = 0; i < tape.length; i++) {
+ var node = tape[i];
+ var nodeInputs = node.inputs;
+ for (var inputName in nodeInputs) {
+ var input = nodeInputs[inputName];
+ var anyInputFromX = false;
+ for (var j = 0; j < xs.length; j++) {
+ if (tensorsFromX[input.id]) {
+ node.outputs.forEach(function (output) { return tensorsFromX[output.id] = true; });
+ anyInputFromX = true;
+ nodesFromX[node.id] = true;
+ break;
+ }
+ }
+ if (anyInputFromX) {
+ break;
+ }
+ }
+ }
+ // Backward pass to find all of the nodes and Tensors that lead to y.
+ var tensorsLeadToY = {};
+ tensorsLeadToY[y.id] = true;
+ var nodesToY = {};
+ for (var i = tape.length - 1; i >= 0; i--) {
+ var node = tape[i];
+ var nodeInputs = node.inputs;
+ // If any of the outputs lead to y, mark all of the inputs as leading to y.
+ for (var j = 0; j < node.outputs.length; j++) {
+ if (tensorsLeadToY[node.outputs[j].id]) {
+ for (var inputName in nodeInputs) {
+ tensorsLeadToY[nodeInputs[inputName].id] = true;
+ nodesToY[node.id] = true;
+ }
+ break;
+ }
+ }
+ }
+ // Return the paths that come from x and lead to y.
+ var filteredTape = [];
+ for (var i = 0; i < tape.length; i++) {
+ var node = tape[i];
+ if (nodesFromX[node.id] && nodesToY[node.id]) {
+ // Prune the inputs from the node that aren't a function of x.
+ var prunedInputs = {};
+ for (var inputName in node.inputs) {
+ var nodeInput = node.inputs[inputName];
+ if (tensorsFromX[nodeInput.id]) {
+ prunedInputs[inputName] = nodeInput;
+ }
+ }
+ // Copy the node and overwrite inputsAndArgs to the pruned version.
+ var prunedNode = Object.assign({}, node);
+ prunedNode.inputs = prunedInputs;
+ prunedNode.outputs = node.outputs;
+ filteredTape.push(prunedNode);
+ }
+ }
+ return filteredTape;
+ }
+ /**
+ * Backpropagate gradients through the filtered TapeNodes.
+ *
+ * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map
+ * is mutated by this method.
+ * @param filteredTape The filtered TapeNodes to backprop through.
+ */
+ function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {
+ var _loop_1 = function (i) {
+ var node = filteredTape[i];
+ var dys = [];
+ node.outputs.forEach(function (o) {
+ var gradTensor = tensorAccumulatedGradientMap[o.id];
+ if (gradTensor != null) {
+ dys.push(gradTensor);
+ }
+ else {
+ // This particular output is not in the back-propagation subgraph, so it
+ // does not affect the final output, thus we put null for its dy.
+ dys.push(null);
+ }
+ });
+ if (node.gradient == null) {
+ throw new Error("Cannot compute gradient: gradient function not found " +
+ ("for " + node.kernelName + "."));
+ }
+ // Backprop dy through this node and accumulate gradients over the inputs.
+ var inputGradients = node.gradient(dys);
+ var _loop_2 = function (inputName) {
+ if (!(inputName in inputGradients)) {
+ throw new Error("Cannot backprop through input " + inputName + ". " +
+ ("Available gradients found: " + Object.keys(inputGradients) + "."));
+ }
+ // Call the gradient function.
+ var dx = tidy(function () { return inputGradients[inputName](); });
+ if (dx.dtype !== 'float32') {
+ throw new Error("Error in gradient for op " + node.kernelName + ". The gradient of input " +
+ (inputName + " must have 'float32' dtype, but has '" + dx.dtype + "'"));
+ }
+ var x = node.inputs[inputName];
+ if (!arraysEqual(dx.shape, x.shape)) {
+ throw new Error("Error in gradient for op " + node.kernelName + ". The gradient of input " +
+ ("'" + inputName + "' has shape '" + dx.shape + "', which does not match ") +
+ ("the shape of the input '" + x.shape + "'"));
+ }
+ if (tensorAccumulatedGradientMap[x.id] == null) {
+ tensorAccumulatedGradientMap[x.id] = dx;
+ }
+ else {
+ var curGradient = tensorAccumulatedGradientMap[x.id];
+ tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);
+ curGradient.dispose();
+ }
+ };
+ for (var inputName in node.inputs) {
+ _loop_2(inputName);
+ }
+ };
+ // Walk the tape backward and keep a map of Tensor to its gradient.
+ for (var i = filteredTape.length - 1; i >= 0; i--) {
+ _loop_1(i);
+ }
+ }
+
+ // Maximum number of values before we decide to show ellipsis.
+ var FORMAT_LIMIT_NUM_VALS = 20;
+ // Number of first and last values to show when displaying a, b,...,y, z.
+ var FORMAT_NUM_FIRST_LAST_VALS = 3;
+ // Number of significant digits to show.
+ var FORMAT_NUM_SIG_DIGITS = 7;
+ function tensorToString(vals, shape, dtype, verbose) {
+ var strides = computeStrides(shape);
+ var padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);
+ var rank = shape.length;
+ var valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);
+ var lines = ['Tensor'];
+ if (verbose) {
+ lines.push(" dtype: " + dtype);
+ lines.push(" rank: " + rank);
+ lines.push(" shape: [" + shape + "]");
+ lines.push(" values:");
+ }
+ lines.push(valsLines.map(function (l) { return ' ' + l; }).join('\n'));
+ return lines.join('\n');
+ }
+ function computeMaxSizePerColumn(vals, shape, dtype, strides) {
+ var n = sizeFromShape(shape);
+ var numCols = strides[strides.length - 1];
+ var padPerCol = new Array(numCols).fill(0);
+ var rank = shape.length;
+ var valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;
+ if (rank > 1) {
+ for (var row = 0; row < n / numCols; row++) {
+ var offset = row * numCols;
+ for (var j = 0; j < numCols; j++) {
+ padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);
+ }
+ }
+ }
+ return padPerCol;
+ }
+ function valToString(val, pad, dtype) {
+ var valStr;
+ if (Array.isArray(val)) {
+ valStr = parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS)) + " + " +
+ (parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS)) + "j");
+ }
+ else if (isString(val)) {
+ valStr = "'" + val + "'";
+ }
+ else if (dtype === 'bool') {
+ valStr = boolNumToString(val);
+ }
+ else {
+ valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();
+ }
+ return rightPad(valStr, pad);
+ }
+ function boolNumToString(v) {
+ return v === 0 ? 'false' : 'true';
+ }
+ function subTensorToString(vals, shape, dtype, strides, padPerCol, isLast) {
+ if (isLast === void 0) { isLast = true; }
+ var storagePerElement = dtype === 'complex64' ? 2 : 1;
+ var size = shape[0];
+ var rank = shape.length;
+ if (rank === 0) {
+ if (dtype === 'complex64') {
+ var complexTuple = createComplexTuples(vals);
+ return [valToString(complexTuple[0], 0, dtype)];
+ }
+ if (dtype === 'bool') {
+ return [boolNumToString(vals[0])];
+ }
+ return [vals[0].toString()];
+ }
+ if (rank === 1) {
+ if (size > FORMAT_LIMIT_NUM_VALS) {
+ var firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;
+ var firstVals = Array.from(vals.slice(0, firstValsSize));
+ var lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));
+ if (dtype === 'complex64') {
+ firstVals = createComplexTuples(firstVals);
+ lastVals = createComplexTuples(lastVals);
+ }
+ return [
+ '[' +
+ firstVals.map(function (x, i) { return valToString(x, padPerCol[i], dtype); })
+ .join(', ') +
+ ', ..., ' +
+ lastVals
+ .map(function (x, i) { return valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype); })
+ .join(', ') +
+ ']'
+ ];
+ }
+ var displayVals = dtype === 'complex64' ? createComplexTuples(vals) :
+ Array.from(vals);
+ return [
+ '[' +
+ displayVals.map(function (x, i) { return valToString(x, padPerCol[i], dtype); })
+ .join(', ') +
+ ']'
+ ];
+ }
+ // The array is rank 2 or more.
+ var subshape = shape.slice(1);
+ var substrides = strides.slice(1);
+ var stride = strides[0] * storagePerElement;
+ var lines = [];
+ if (size > FORMAT_LIMIT_NUM_VALS) {
+ for (var i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {
+ var start = i * stride;
+ var end = start + stride;
+ lines.push.apply(lines, __spread(subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */)));
+ }
+ lines.push('...');
+ for (var i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {
+ var start = i * stride;
+ var end = start + stride;
+ lines.push.apply(lines, __spread(subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */)));
+ }
+ }
+ else {
+ for (var i = 0; i < size; i++) {
+ var start = i * stride;
+ var end = start + stride;
+ lines.push.apply(lines, __spread(subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */)));
+ }
+ }
+ var sep = rank === 2 ? ',' : '';
+ lines[0] = '[' + lines[0] + sep;
+ for (var i = 1; i < lines.length - 1; i++) {
+ lines[i] = ' ' + lines[i] + sep;
+ }
+ var newLineSep = ',\n';
+ for (var i = 2; i < rank; i++) {
+ newLineSep += '\n';
+ }
+ lines[lines.length - 1] =
+ ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);
+ return lines;
+ }
+ function createComplexTuples(vals) {
+ var complexTuples = [];
+ for (var i = 0; i < vals.length; i += 2) {
+ complexTuples.push([vals[i], vals[i + 1]]);
+ }
+ return complexTuples;
+ }
+
+ /**
+ * A mutable object, similar to `tf.Tensor`, that allows users to set values
+ * at locations before converting to an immutable `tf.Tensor`.
+ *
+ * See `tf.buffer` for creating a tensor buffer.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ var TensorBuffer = /** @class */ (function () {
+ function TensorBuffer(shape, dtype, values) {
+ var _this = this;
+ this.dtype = dtype;
+ this.shape = shape.slice();
+ this.size = sizeFromShape(shape);
+ if (values != null) {
+ var n_1 = values.length;
+ assert(n_1 === this.size, function () { return "Length of values '" + n_1 + "' does not match the size " +
+ ("inferred by the shape '" + _this.size + "'."); });
+ }
+ if (dtype === 'complex64') {
+ throw new Error("complex64 dtype TensorBuffers are not supported. Please create " +
+ "a TensorBuffer for the real and imaginary parts separately and " +
+ "call tf.complex(real, imag).");
+ }
+ this.values = values || getArrayFromDType(dtype, this.size);
+ this.strides = computeStrides(shape);
+ }
+ /**
+ * Sets a value in the buffer at a given location.
+ *
+ * @param value The value to set.
+ * @param locs The location indices.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ TensorBuffer.prototype.set = function (value) {
+ var _this = this;
+ var locs = [];
+ for (var _i = 1; _i < arguments.length; _i++) {
+ locs[_i - 1] = arguments[_i];
+ }
+ if (locs.length === 0) {
+ locs = [0];
+ }
+ assert(locs.length === this.rank, function () { return "The number of provided coordinates (" + locs.length + ") must " +
+ ("match the rank (" + _this.rank + ")"); });
+ var index = this.locToIndex(locs);
+ this.values[index] = value;
+ };
+ /**
+ * Returns the value in the buffer at the provided location.
+ *
+ * @param locs The location indices.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ TensorBuffer.prototype.get = function () {
+ var e_1, _b;
+ var locs = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ locs[_i] = arguments[_i];
+ }
+ if (locs.length === 0) {
+ locs = [0];
+ }
+ var i = 0;
+ try {
+ for (var locs_1 = __values(locs), locs_1_1 = locs_1.next(); !locs_1_1.done; locs_1_1 = locs_1.next()) {
+ var loc = locs_1_1.value;
+ if (loc < 0 || loc >= this.shape[i]) {
+ var msg = "Requested out of range element at " + locs + ". " +
+ (" Buffer shape=" + this.shape);
+ throw new Error(msg);
+ }
+ i++;
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (locs_1_1 && !locs_1_1.done && (_b = locs_1.return)) _b.call(locs_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ var index = locs[locs.length - 1];
+ for (var i_1 = 0; i_1 < locs.length - 1; ++i_1) {
+ index += this.strides[i_1] * locs[i_1];
+ }
+ return this.values[index];
+ };
+ TensorBuffer.prototype.locToIndex = function (locs) {
+ if (this.rank === 0) {
+ return 0;
+ }
+ else if (this.rank === 1) {
+ return locs[0];
+ }
+ var index = locs[locs.length - 1];
+ for (var i = 0; i < locs.length - 1; ++i) {
+ index += this.strides[i] * locs[i];
+ }
+ return index;
+ };
+ TensorBuffer.prototype.indexToLoc = function (index) {
+ if (this.rank === 0) {
+ return [];
+ }
+ else if (this.rank === 1) {
+ return [index];
+ }
+ var locs = new Array(this.shape.length);
+ for (var i = 0; i < locs.length - 1; ++i) {
+ locs[i] = Math.floor(index / this.strides[i]);
+ index -= locs[i] * this.strides[i];
+ }
+ locs[locs.length - 1] = index;
+ return locs;
+ };
+ Object.defineProperty(TensorBuffer.prototype, "rank", {
+ get: function () {
+ return this.shape.length;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Creates an immutable `tf.Tensor` object from the buffer.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ TensorBuffer.prototype.toTensor = function () {
+ return trackerFn().makeTensor(this.values, this.shape, this.dtype);
+ };
+ return TensorBuffer;
+ }());
+ // For tracking tensor creation and disposal.
+ var trackerFn = null;
+ // Used by chaining methods to call into ops.
+ var opHandler$1 = null;
+ /**
+ * An external consumer can register itself as the tensor tracker. This way
+ * the Tensor class can notify the tracker for every tensor created and
+ * disposed.
+ */
+ function setTensorTracker(fn) {
+ trackerFn = fn;
+ }
+ /**
+ * An external consumer can register itself as the op handler. This way the
+ * Tensor class can have chaining methods that call into ops via the op
+ * handler.
+ */
+ function setOpHandler(handler) {
+ opHandler$1 = handler;
+ }
+ /**
+ * A `tf.Tensor` object represents an immutable, multidimensional array of
+ * numbers that has a shape and a data type.
+ *
+ * For performance reasons, functions that create tensors do not necessarily
+ * perform a copy of the data passed to them (e.g. if the data is passed as a
+ * `Float32Array`), and changes to the data will change the tensor. This is not
+ * a feature and is not supported. To avoid this behavior, use the tensor before
+ * changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.
+ *
+ * See `tf.tensor` for details on how to create a `tf.Tensor`.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ var Tensor = /** @class */ (function () {
+ function Tensor(shape, dtype, dataId, id) {
+ /** Whether this tensor has been globally kept. */
+ this.kept = false;
+ this.isDisposedInternal = false;
+ this.shape = shape.slice();
+ this.dtype = dtype || 'float32';
+ this.size = sizeFromShape(shape);
+ this.strides = computeStrides(shape);
+ this.dataId = dataId;
+ this.id = id;
+ this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');
+ }
+ Object.defineProperty(Tensor.prototype, "rank", {
+ get: function () {
+ return this.shape.length;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Returns a promise of `tf.TensorBuffer` that holds the underlying data.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.buffer = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var vals;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0: return [4 /*yield*/, this.data()];
+ case 1:
+ vals = _b.sent();
+ return [2 /*return*/, opHandler$1.buffer(this.shape, this.dtype, vals)];
+ }
+ });
+ });
+ };
+ /**
+ * Returns a `tf.TensorBuffer` that holds the underlying data.
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.bufferSync = function () {
+ return opHandler$1.buffer(this.shape, this.dtype, this.dataSync());
+ };
+ /**
+ * Returns the tensor data as a nested array. The transfer of data is done
+ * asynchronously.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.array = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var vals;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0: return [4 /*yield*/, this.data()];
+ case 1:
+ vals = _b.sent();
+ return [2 /*return*/, toNestedArray(this.shape, vals, this.dtype === 'complex64')];
+ }
+ });
+ });
+ };
+ /**
+ * Returns the tensor data as a nested array. The transfer of data is done
+ * synchronously.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.arraySync = function () {
+ return toNestedArray(this.shape, this.dataSync(), this.dtype === 'complex64');
+ };
+ /**
+ * Asynchronously downloads the values from the `tf.Tensor`. Returns a
+ * promise of `TypedArray` that resolves when the computation has finished.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.data = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var data, bytes;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ this.throwIfDisposed();
+ data = trackerFn().read(this.dataId);
+ if (!(this.dtype === 'string')) return [3 /*break*/, 2];
+ return [4 /*yield*/, data];
+ case 1:
+ bytes = _b.sent();
+ try {
+ return [2 /*return*/, bytes.map(function (b) { return decodeString(b); })];
+ }
+ catch (_a) {
+ throw new Error('Failed to decode the string bytes into utf-8. ' +
+ 'To get the original bytes, call tensor.bytes().');
+ }
+ _b.label = 2;
+ case 2: return [2 /*return*/, data];
+ }
+ });
+ });
+ };
+ /**
+ * Synchronously downloads the values from the `tf.Tensor`. This blocks the
+ * UI thread until the values are ready, which can cause performance issues.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.dataSync = function () {
+ this.throwIfDisposed();
+ var data = trackerFn().readSync(this.dataId);
+ if (this.dtype === 'string') {
+ try {
+ return data.map(function (b) { return decodeString(b); });
+ }
+ catch (_a) {
+ throw new Error('Failed to decode the string bytes into utf-8. ' +
+ 'To get the original bytes, call tensor.bytes().');
+ }
+ }
+ return data;
+ };
+ /** Returns the underlying bytes of the tensor's data. */
+ Tensor.prototype.bytes = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var data;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ this.throwIfDisposed();
+ return [4 /*yield*/, trackerFn().read(this.dataId)];
+ case 1:
+ data = _b.sent();
+ if (this.dtype === 'string') {
+ return [2 /*return*/, data];
+ }
+ else {
+ return [2 /*return*/, new Uint8Array(data.buffer)];
+ }
+ }
+ });
+ });
+ };
+ /**
+ * Disposes `tf.Tensor` from memory.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.dispose = function () {
+ if (this.isDisposed) {
+ return;
+ }
+ trackerFn().disposeTensor(this);
+ this.isDisposedInternal = true;
+ };
+ Object.defineProperty(Tensor.prototype, "isDisposed", {
+ get: function () {
+ return this.isDisposedInternal;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Tensor.prototype.throwIfDisposed = function () {
+ if (this.isDisposed) {
+ throw new Error("Tensor is disposed.");
+ }
+ };
+ /**
+ * Prints the `tf.Tensor`. See `tf.print` for details.
+ *
+ * @param verbose Whether to print verbose information about the tensor,
+ * including dtype and size.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.print = function (verbose) {
+ if (verbose === void 0) { verbose = false; }
+ return opHandler$1.print(this, verbose);
+ };
+ /**
+ * Returns a copy of the tensor. See `tf.clone` for details.
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.clone = function () {
+ this.throwIfDisposed();
+ return opHandler$1.clone(this);
+ };
+ /**
+ * Returns a human-readable description of the tensor. Useful for logging.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Tensor.prototype.toString = function (verbose) {
+ if (verbose === void 0) { verbose = false; }
+ var vals = this.dataSync();
+ return tensorToString(vals, this.shape, this.dtype, verbose);
+ };
+ Tensor.prototype.cast = function (dtype) {
+ this.throwIfDisposed();
+ return opHandler$1.cast(this, dtype);
+ };
+ Tensor.prototype.variable = function (trainable, name, dtype) {
+ if (trainable === void 0) { trainable = true; }
+ this.throwIfDisposed();
+ return trackerFn().makeVariable(this, trainable, name, dtype);
+ };
+ return Tensor;
+ }());
+ Object.defineProperty(Tensor, Symbol.hasInstance, {
+ value: function (instance) {
+ // Implementation note: we should use properties of the object that will be
+ // defined before the constructor body has finished executing (methods).
+ // This is because when this code is transpiled by babel, babel will call
+ // classCallCheck before the constructor body is run.
+ // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.
+ return !!instance && instance.data != null && instance.dataSync != null &&
+ instance.throwIfDisposed != null;
+ }
+ });
+ function getGlobalTensorClass() {
+ // Use getGlobal so that we can augment the Tensor class across package
+ // boundaries becase the node resolution alg may result in different modules
+ // being returned for this file depending on the path they are loaded from.
+ return getGlobal('Tensor', function () {
+ return Tensor;
+ });
+ }
+ // Global side effect. Cache global reference to Tensor class
+ getGlobalTensorClass();
+ /**
+ * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ var Variable = /** @class */ (function (_super) {
+ __extends(Variable, _super);
+ function Variable(initialValue, trainable, name, tensorId) {
+ var _this = _super.call(this, initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId) || this;
+ _this.trainable = trainable;
+ _this.name = name;
+ return _this;
+ }
+ /**
+ * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have
+ * the same shape and dtype as the old `tf.Tensor`.
+ *
+ * @param newValue New tensor to be assigned to this variable.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Classes'}
+ */
+ Variable.prototype.assign = function (newValue) {
+ if (newValue.dtype !== this.dtype) {
+ throw new Error("dtype of the new value (" + newValue.dtype + ") and " +
+ ("previous value (" + this.dtype + ") must match"));
+ }
+ if (!arraysEqual(newValue.shape, this.shape)) {
+ throw new Error("shape of the new value (" + newValue.shape + ") and " +
+ ("previous value (" + this.shape + ") must match"));
+ }
+ trackerFn().disposeTensor(this);
+ this.dataId = newValue.dataId;
+ trackerFn().incRef(this, null /* backend */);
+ };
+ Variable.prototype.dispose = function () {
+ trackerFn().disposeVariable(this);
+ this.isDisposedInternal = true;
+ };
+ return Variable;
+ }(Tensor));
+ Object.defineProperty(Variable, Symbol.hasInstance, {
+ value: function (instance) {
+ return instance instanceof Tensor && instance.assign != null &&
+ instance.assign instanceof Function;
+ }
+ });
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ exports.Rank = void 0;
+ (function (Rank) {
+ Rank["R0"] = "R0";
+ Rank["R1"] = "R1";
+ Rank["R2"] = "R2";
+ Rank["R3"] = "R3";
+ Rank["R4"] = "R4";
+ Rank["R5"] = "R5";
+ Rank["R6"] = "R6";
+ })(exports.Rank || (exports.Rank = {}));
+ // Looks for upcasting types. Used, for example, in operations with mixed dtype
+ // inputs.
+ var UpcastInt32AndMap;
+ (function (UpcastInt32AndMap) {
+ UpcastInt32AndMap["float32"] = "float32";
+ UpcastInt32AndMap["int32"] = "int32";
+ UpcastInt32AndMap["bool"] = "int32";
+ UpcastInt32AndMap["complex64"] = "complex64";
+ })(UpcastInt32AndMap || (UpcastInt32AndMap = {}));
+ var UpcastBoolAndMap;
+ (function (UpcastBoolAndMap) {
+ UpcastBoolAndMap["float32"] = "float32";
+ UpcastBoolAndMap["int32"] = "int32";
+ UpcastBoolAndMap["bool"] = "bool";
+ UpcastBoolAndMap["complex64"] = "complex64";
+ })(UpcastBoolAndMap || (UpcastBoolAndMap = {}));
+ var UpcastFloat32AndMap;
+ (function (UpcastFloat32AndMap) {
+ UpcastFloat32AndMap["float32"] = "float32";
+ UpcastFloat32AndMap["int32"] = "float32";
+ UpcastFloat32AndMap["bool"] = "float32";
+ UpcastFloat32AndMap["complex64"] = "complex64";
+ })(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));
+ var UpcastComplex64AndMap;
+ (function (UpcastComplex64AndMap) {
+ UpcastComplex64AndMap["float32"] = "complex64";
+ UpcastComplex64AndMap["int32"] = "complex64";
+ UpcastComplex64AndMap["bool"] = "complex64";
+ UpcastComplex64AndMap["complex64"] = "complex64";
+ })(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));
+ var upcastTypeMap = {
+ 'float32': UpcastFloat32AndMap,
+ 'int32': UpcastInt32AndMap,
+ 'bool': UpcastBoolAndMap,
+ 'complex64': UpcastComplex64AndMap
+ };
+ function upcastType(typeA, typeB) {
+ if (typeA === 'string' || typeB === 'string') {
+ if (typeA === 'string' && typeB === 'string') {
+ return 'string';
+ }
+ throw new Error("Can not upcast " + typeA + " with " + typeB);
+ }
+ return upcastTypeMap[typeA][typeB];
+ }
+ /** Returns the output type after summation. */
+ function sumOutType(type) {
+ return upcastType(type, 'int32');
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function makeTypesMatch(a, b) {
+ if (a.dtype === b.dtype) {
+ return [a, b];
+ }
+ var dtype = upcastType(a.dtype, b.dtype);
+ return [a.cast(dtype), b.cast(dtype)];
+ }
+ function assertTypesMatch(a, b) {
+ assert(a.dtype === b.dtype, function () { return "The dtypes of the first(" + a.dtype + ") and" +
+ (" second(" + b.dtype + ") input must match"); });
+ }
+ function isTensorInList(tensor, tensorList) {
+ return tensorList.some(function (x) { return x.id === tensor.id; });
+ }
+ /**
+ * Extracts any `Tensor`s found within the provided object.
+ *
+ * @param container an object that may be a `Tensor` or may directly contain
+ * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it
+ * is safe to pass any object here, except that `Promise`s are not
+ * supported.
+ * @returns An array of `Tensors` found within the passed object. If the
+ * argument is simply a `Tensor', a list containing that `Tensor` is
+ * returned. If the object is not a `Tensor` or does not
+ * contain `Tensors`, an empty list is returned.
+ */
+ function getTensorsInContainer(result) {
+ var list = [];
+ var seen = new Set();
+ walkTensorContainer(result, list, seen);
+ return list;
+ }
+ function walkTensorContainer(container, list, seen) {
+ if (container == null) {
+ return;
+ }
+ if (container instanceof Tensor) {
+ list.push(container);
+ return;
+ }
+ if (!isIterable(container)) {
+ return;
+ }
+ // Iteration over keys works also for arrays.
+ var iterable = container;
+ for (var k in iterable) {
+ var val = iterable[k];
+ if (!seen.has(val)) {
+ seen.add(val);
+ walkTensorContainer(val, list, seen);
+ }
+ }
+ }
+ // tslint:disable-next-line:no-any
+ function isIterable(obj) {
+ return Array.isArray(obj) || typeof obj === 'object';
+ }
+
+ var tensor_util = {
+ __proto__: null,
+ makeTypesMatch: makeTypesMatch,
+ assertTypesMatch: assertTypesMatch,
+ isTensorInList: isTensorInList,
+ getTensorsInContainer: getTensorsInContainer
+ };
+
+ function isRegisteredKernelInvocation(kernelInvocation) {
+ return kernelInvocation.kernelName != null;
+ }
+ var EngineState = /** @class */ (function () {
+ function EngineState() {
+ // Public since optimizers will use it.
+ this.registeredVariables = {};
+ this.nextTapeNodeId = 0;
+ this.numBytes = 0;
+ this.numTensors = 0;
+ this.numStringTensors = 0;
+ this.numDataBuffers = 0;
+ // Number of nested tf.grad() statements when computing higher-order
+ // gradients. E.g. `1` for first-order gradients and `2` for second-order
+ // gradients. Used to track if the tape should be removed after a backprop.
+ this.gradientDepth = 0;
+ // Number of nested kernel calls. When kernel depth is greater than 1, we turn
+ // off the tape.
+ this.kernelDepth = 0;
+ this.scopeStack = [];
+ /**
+ * Keeps track of the number of data moves during a kernel execution. We
+ * maintain a stack since kernels can call other kernels, recursively.
+ */
+ this.numDataMovesStack = [];
+ this.nextScopeId = 0;
+ this.tensorInfo = new WeakMap();
+ this.profiling = false;
+ this.activeProfile = {
+ newBytes: 0,
+ newTensors: 0,
+ peakBytes: 0,
+ kernels: [],
+ result: null,
+ get kernelNames() {
+ return Array.from(new Set(this.kernels.map(function (k) { return k.name; })));
+ }
+ };
+ }
+ EngineState.prototype.dispose = function () {
+ for (var variableName in this.registeredVariables) {
+ this.registeredVariables[variableName].dispose();
+ }
+ };
+ return EngineState;
+ }());
+ var Engine = /** @class */ (function () {
+ function Engine(ENV) {
+ this.ENV = ENV;
+ this.registry = {};
+ this.registryFactory = {};
+ this.pendingBackendInitId = 0;
+ this.state = new EngineState();
+ }
+ Engine.prototype.ready = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var sortedBackends, i, backendName, success;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ if (this.pendingBackendInit != null) {
+ return [2 /*return*/, this.pendingBackendInit.then(function () { })];
+ }
+ if (this.backendInstance != null) {
+ return [2 /*return*/];
+ }
+ sortedBackends = this.getSortedBackends();
+ i = 0;
+ _a.label = 1;
+ case 1:
+ if (!(i < sortedBackends.length)) return [3 /*break*/, 5];
+ backendName = sortedBackends[i];
+ return [4 /*yield*/, this.initializeBackend(backendName).success];
+ case 2:
+ success = _a.sent();
+ if (!success) return [3 /*break*/, 4];
+ return [4 /*yield*/, this.setBackend(backendName)];
+ case 3:
+ _a.sent();
+ return [2 /*return*/];
+ case 4:
+ i++;
+ return [3 /*break*/, 1];
+ case 5: throw new Error("Could not initialize any backends, all backend initializations " +
+ "failed.");
+ }
+ });
+ });
+ };
+ Object.defineProperty(Engine.prototype, "backend", {
+ get: function () {
+ if (this.pendingBackendInit != null) {
+ throw new Error("Backend '" + this.backendName + "' has not yet been initialized. Make " +
+ "sure to await tf.ready() or await tf.setBackend() before calling " +
+ "other methods");
+ }
+ if (this.backendInstance == null) {
+ var _a = this.initializeBackendsAndReturnBest(), name = _a.name, asyncInit = _a.asyncInit;
+ if (asyncInit) {
+ throw new Error("The highest priority backend '" + name + "' has not yet been " +
+ "initialized. Make sure to await tf.ready() or " +
+ "await tf.setBackend() before calling other methods");
+ }
+ this.setBackend(name);
+ }
+ return this.backendInstance;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Engine.prototype.backendNames = function () {
+ return Object.keys(this.registryFactory);
+ };
+ Engine.prototype.findBackend = function (backendName) {
+ if (!(backendName in this.registry)) {
+ // If the backend hasn't been initialized but we have a registry entry for
+ // it, initialize it and return it.
+ if (backendName in this.registryFactory) {
+ var asyncInit = this.initializeBackend(backendName).asyncInit;
+ if (asyncInit) {
+ // Backend is not ready yet.
+ return null;
+ }
+ }
+ else {
+ return null;
+ }
+ }
+ return this.registry[backendName];
+ };
+ Engine.prototype.findBackendFactory = function (backendName) {
+ if (!(backendName in this.registryFactory)) {
+ return null;
+ }
+ return this.registryFactory[backendName].factory;
+ };
+ Engine.prototype.registerBackend = function (backendName, factory, priority) {
+ if (priority === void 0) { priority = 1; }
+ if (backendName in this.registryFactory) {
+ warn(backendName + " backend was already registered. " +
+ "Reusing existing backend factory.");
+ return false;
+ }
+ this.registryFactory[backendName] = { factory: factory, priority: priority };
+ return true;
+ };
+ Engine.prototype.setBackend = function (backendName) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a, success, asyncInit, result, _b;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ if (this.registryFactory[backendName] == null) {
+ throw new Error("Backend name '" + backendName + "' not found in registry");
+ }
+ this.backendName = backendName;
+ if (!(this.registry[backendName] == null)) return [3 /*break*/, 4];
+ this.backendInstance = null;
+ _a = this.initializeBackend(backendName), success = _a.success, asyncInit = _a.asyncInit;
+ if (!asyncInit) return [3 /*break*/, 2];
+ return [4 /*yield*/, success];
+ case 1:
+ _b = _c.sent();
+ return [3 /*break*/, 3];
+ case 2:
+ _b = success;
+ _c.label = 3;
+ case 3:
+ result = _b;
+ if (!result) {
+ return [2 /*return*/, false];
+ }
+ _c.label = 4;
+ case 4:
+ this.backendInstance = this.registry[backendName];
+ this.setupRegisteredKernels();
+ // Reset the profiler.
+ this.profiler = new Profiler(this.backendInstance);
+ return [2 /*return*/, true];
+ }
+ });
+ });
+ };
+ Engine.prototype.setupRegisteredKernels = function () {
+ var _this = this;
+ var kernels = getKernelsForBackend(this.backendName);
+ kernels.forEach(function (kernel) {
+ if (kernel.setupFunc != null) {
+ kernel.setupFunc(_this.backendInstance);
+ }
+ });
+ };
+ Engine.prototype.disposeRegisteredKernels = function (backendName) {
+ var _this = this;
+ var kernels = getKernelsForBackend(backendName);
+ kernels.forEach(function (kernel) {
+ if (kernel.disposeFunc != null) {
+ kernel.disposeFunc(_this.registry[backendName]);
+ }
+ });
+ };
+ /**
+ * Initializes a backend by looking up the backend name in the factory
+ * registry and calling the factory method. Returns a boolean representing
+ * whether the initialization of the backend suceeded. Throws an error if
+ * there is no backend in the factory registry.
+ */
+ Engine.prototype.initializeBackend = function (backendName) {
+ var _this = this;
+ var registryFactoryEntry = this.registryFactory[backendName];
+ if (registryFactoryEntry == null) {
+ throw new Error("Cannot initialize backend " + backendName + ", no registration found.");
+ }
+ try {
+ var backend = registryFactoryEntry.factory();
+ /* Test if the factory returns a promise.
+ Done in a more liberal way than
+ previous 'Promise.resolve(backend)===backend'
+ as we needed to account for custom Promise
+ implementations (e.g. Angular) */
+ if (backend && !(backend instanceof KernelBackend) &&
+ typeof backend.then === 'function') {
+ var promiseId_1 = ++this.pendingBackendInitId;
+ var success = backend
+ .then(function (backendInstance) {
+ // Outdated promise. Another backend was set in the meantime.
+ if (promiseId_1 < _this.pendingBackendInitId) {
+ return false;
+ }
+ _this.registry[backendName] = backendInstance;
+ _this.pendingBackendInit = null;
+ return true;
+ })
+ .catch(function (err) {
+ // Outdated promise. Another backend was set in the meantime.
+ if (promiseId_1 < _this.pendingBackendInitId) {
+ return false;
+ }
+ _this.pendingBackendInit = null;
+ warn("Initialization of backend " + backendName + " failed");
+ warn(err.stack || err.message);
+ return false;
+ });
+ this.pendingBackendInit = success;
+ return { success: success, asyncInit: true };
+ }
+ else {
+ this.registry[backendName] = backend;
+ return { success: true, asyncInit: false };
+ }
+ }
+ catch (err) {
+ warn("Initialization of backend " + backendName + " failed");
+ warn(err.stack || err.message);
+ return { success: false, asyncInit: false };
+ }
+ };
+ Engine.prototype.removeBackend = function (backendName) {
+ if (!(backendName in this.registryFactory)) {
+ throw new Error(backendName + " backend not found in registry");
+ }
+ if (this.backendName === backendName && this.pendingBackendInit != null) {
+ // There is a pending promise of the backend we want to remove. Make it
+ // obsolete.
+ this.pendingBackendInitId++;
+ }
+ if (backendName in this.registry) {
+ this.disposeRegisteredKernels(backendName);
+ this.registry[backendName].dispose();
+ delete this.registry[backendName];
+ }
+ delete this.registryFactory[backendName];
+ // Unset the backend if it is active.
+ if (this.backendName === backendName) {
+ this.pendingBackendInit = null;
+ this.backendName = null;
+ this.backendInstance = null;
+ }
+ };
+ Engine.prototype.getSortedBackends = function () {
+ var _this = this;
+ if (Object.keys(this.registryFactory).length === 0) {
+ throw new Error('No backend found in registry.');
+ }
+ return Object.keys(this.registryFactory).sort(function (a, b) {
+ // Highest priority comes first.
+ return _this.registryFactory[b].priority -
+ _this.registryFactory[a].priority;
+ });
+ };
+ Engine.prototype.initializeBackendsAndReturnBest = function () {
+ var sortedBackends = this.getSortedBackends();
+ for (var i = 0; i < sortedBackends.length; i++) {
+ var backendName = sortedBackends[i];
+ var _a = this.initializeBackend(backendName), success = _a.success, asyncInit = _a.asyncInit;
+ if (asyncInit || success) {
+ return { name: backendName, asyncInit: asyncInit };
+ }
+ }
+ throw new Error("Could not initialize any backends, all backend initializations " +
+ "failed.");
+ };
+ Engine.prototype.moveData = function (backend, dataId) {
+ var info = this.state.tensorInfo.get(dataId);
+ var srcBackend = info.backend;
+ var values = this.readSync(dataId);
+ var refCount = srcBackend.refCount(dataId);
+ // Delete the tensor from the old backend and move it to the new
+ // backend.
+ srcBackend.disposeData(dataId, true);
+ info.backend = backend;
+ backend.move(dataId, values, info.shape, info.dtype, refCount);
+ if (this.shouldCheckForMemLeaks()) {
+ // Track the number of moves during a kernel execution to correctly
+ // detect memory leaks.
+ this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;
+ }
+ };
+ Engine.prototype.tidy = function (nameOrFn, fn) {
+ var _this = this;
+ var name = null;
+ if (fn == null) {
+ // Called with only 1 argument.
+ if (typeof nameOrFn !== 'function') {
+ throw new Error('Please provide a function to tidy()');
+ }
+ fn = nameOrFn;
+ }
+ else {
+ // Called with 2 arguments.
+ if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {
+ throw new Error('When calling with two arguments, the first argument ' +
+ 'to tidy() must be a string');
+ }
+ if (typeof fn !== 'function') {
+ throw new Error('When calling with two arguments, the 2nd argument ' +
+ 'to tidy() must be a function');
+ }
+ name = nameOrFn;
+ // TODO(nsthorat,smilkov): Do operation logging and performance
+ // profiling.
+ }
+ var result;
+ return this.scopedRun(function () { return _this.startScope(name); }, function () { return _this.endScope(result); }, function () {
+ result = fn();
+ if (result instanceof Promise) {
+ console.error('Cannot return a Promise inside of tidy.');
+ }
+ return result;
+ });
+ };
+ Engine.prototype.scopedRun = function (start, end, f) {
+ start();
+ try {
+ var res = f();
+ end();
+ return res;
+ }
+ catch (ex) {
+ end();
+ throw ex;
+ }
+ };
+ Engine.prototype.nextTensorId = function () {
+ return Engine.nextTensorId++;
+ };
+ Engine.prototype.nextVariableId = function () {
+ return Engine.nextVariableId++;
+ };
+ /**
+ * This method is called instead of the public-facing tensor.clone() when
+ * saving a tensor for backwards pass. It makes sure to add the clone
+ * operation to the tape regardless of being called inside a kernel
+ * execution.
+ */
+ Engine.prototype.clone = function (x) {
+ var y = ENGINE.runKernel(Identity, { x: x });
+ var inputs = { x: x };
+ var grad = function (dy) { return ({
+ x: function () {
+ var dtype = 'float32';
+ var gradInputs = { x: dy };
+ var attrs = { dtype: dtype };
+ return ENGINE.runKernel(Cast, gradInputs,
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ attrs);
+ }
+ }); };
+ var saved = [];
+ this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});
+ return y;
+ };
+ /**
+ * Execute a kernel with the given name and return the output tensor.
+ *
+ * @param kernelName The name of the kernel to execute.
+ * @param inputs A map of input names to tensors.
+ * @param attrs A map of attribute names to their values. An attribute is a
+ * primitive (non-tensor) input to the kernel.
+ * @param inputsToSave A list of tensors, inputs to save for the backprop
+ * computation.
+ * @param outputsToSave A list of booleans, specifying which output to save
+ * for the backprop computation. These are booleans since the output
+ * tensors are not visible to the user.
+ */
+ Engine.prototype.runKernel = function (kernelName, inputs, attrs) {
+ if (this.backendName == null) {
+ // backend has not been initialized yet (backend initialization is lazy
+ // can be deferred until an op/ kernel is run).
+ // The below getter has side effects that will try to initialize the
+ // backend and set properties like this.backendName
+ // tslint:disable-next-line: no-unused-expression
+ this.backend;
+ }
+ var hasKernel = getKernel(kernelName, this.backendName) != null;
+ if (!hasKernel) {
+ throw new Error("Kernel '" + kernelName + "' not registered for backend '" + this.backendName + "'");
+ }
+ return this.runKernelFunc({ kernelName: kernelName, inputs: inputs, attrs: attrs });
+ };
+ Engine.prototype.shouldCheckForMemLeaks = function () {
+ return this.ENV.getBool('IS_TEST');
+ };
+ Engine.prototype.checkKernelForMemLeak = function (kernelName, numDataIdsBefore, outInfos) {
+ var numDataIdsAfter = this.backend.numDataIds();
+ // Count the number of data ids associated with the result of the kernel.
+ var numOutputDataIds = 0;
+ outInfos.forEach(function (info) {
+ // Complex numbers allocate 3 data ids, one for 'real', one for
+ // 'imaginary', and one for the container that holds the former two.
+ numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);
+ });
+ // Account for the number of moves during kernel execution. A "data move"
+ // can happen in the middle of a kernel execution, placing a new (key,value)
+ // pair in the data storage. Since data moves have net zero effect (we
+ // always remove the data from the old backend), we have to cancel them out
+ // when detecting memory leaks.
+ var numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];
+ var dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;
+ if (dataIdsLeaked > 0) {
+ throw new Error("Backend '" + this.backendName + "' has an internal memory leak " +
+ ("(" + dataIdsLeaked + " data ids) after running '" + kernelName + "'"));
+ }
+ };
+ /**
+ * Internal helper method to execute a kernel Func
+ *
+ * Use `runKernel` to execute kernels from outside of engine.
+ */
+ Engine.prototype.runKernelFunc = function (kernelParams) {
+ var _this = this;
+ var outputs;
+ var saved = [];
+ var isTapeOn = this.isTapeOn();
+ var startingBytecount = this.state.numBytes;
+ var startingNumTensors = this.state.numTensors;
+ if (this.shouldCheckForMemLeaks()) {
+ this.state.numDataMovesStack.push(0);
+ }
+ var kernelFunc;
+ if (this.backendName == null) {
+ // backend has not been initialized yet (backend initialization is lazy
+ // can be deferred until an op/ kernel is run).
+ // The below getter has side effects that will try to initialize the
+ // backend and set properties like this.backendName
+ // tslint:disable-next-line: no-unused-expression
+ this.backend;
+ }
+ var out;
+ var kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ?
+ kernelParams.kernelName :
+ this.state.activeScope != null ? this.state.activeScope.name : '';
+ // Create the kernelFunc from either a registered kernel OR passed in
+ // forward/backward functions (used by custom grad). In this context a
+ // kernelFunc wraps a kernel implementation with some bookkeeping.
+ if (isRegisteredKernelInvocation(kernelParams)) {
+ var kernelName_1 = kernelParams.kernelName, inputs_1 = kernelParams.inputs, attrs_1 = kernelParams.attrs;
+ if (this.backendName == null) {
+ // backend has not been initialized yet (backend initialization is lazy
+ // can be deferred until an op/ kernel is run).
+ // The below getter has side effects that will try to initialize the
+ // backend and set properties like this.backendName
+ // tslint:disable-next-line: no-unused-expression
+ this.backend;
+ }
+ var kernel_1 = getKernel(kernelName_1, this.backendName);
+ assert(kernel_1 != null, function () { return "Cannot find registered kernel '" + kernelName_1 + "' for backend '" + _this.backendName + "'"; });
+ kernelFunc = function () {
+ var numDataIdsBefore = _this.backend.numDataIds();
+ out = kernel_1.kernelFunc({ inputs: inputs_1, attrs: attrs_1, backend: _this.backend });
+ var outInfos = Array.isArray(out) ? out : [out];
+ if (_this.shouldCheckForMemLeaks()) {
+ _this.checkKernelForMemLeak(kernelName_1, numDataIdsBefore, outInfos);
+ }
+ var outTensors = outInfos.map(function (outInfo) {
+ // todo (yassogba) remove this option (Tensor) when node backend
+ // methods have been modularized and they all return tensorInfo.
+ // TensorInfos do not have a rank attribute.
+ if (outInfo.rank != null) {
+ return outInfo;
+ }
+ var dataId = outInfo.dataId, shape = outInfo.shape, dtype = outInfo.dtype;
+ return _this.makeTensorFromDataId(dataId, shape, dtype);
+ });
+ // Save any required inputs and outputs.
+ // Do not save unless we are recording to the tape. Otherwise it would
+ // cause a mem leak since there would be no backprop for these tensors
+ // (which would otherwise dispose them).
+ if (isTapeOn) {
+ var tensorsToSave = _this.getTensorsForGradient(kernelName_1, inputs_1, outTensors);
+ saved = _this.saveTensorsForBackwardMode(tensorsToSave);
+ }
+ return outTensors;
+ };
+ }
+ else {
+ var forwardFunc_1 = kernelParams.forwardFunc;
+ // Running a customGrad op.
+ var saveFunc_1 = function (tensors) {
+ // Do not save unless we are recording to the tape. Otherwise it would
+ // cause a mem leak since we would never run backprop, which disposes
+ // the kept tensors.
+ if (!isTapeOn) {
+ return;
+ }
+ saved = tensors.map(function (tensor) { return _this.keep(_this.clone(tensor)); });
+ };
+ kernelFunc = function () {
+ var numDataIdsBefore = _this.backend.numDataIds();
+ out = _this.tidy(function () { return forwardFunc_1(_this.backend, saveFunc_1); });
+ var outs = (Array.isArray(out) ? out : [out]);
+ if (_this.shouldCheckForMemLeaks()) {
+ // Scope name is used to print a more helpful error message if needed.
+ _this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs);
+ }
+ return outs;
+ };
+ }
+ //
+ // Run the kernelFunc. Optionally profiling it.
+ //
+ var inputs = kernelParams.inputs, attrs = kernelParams.attrs;
+ var backwardsFunc = isRegisteredKernelInvocation(kernelParams) ?
+ null :
+ kernelParams.backwardsFunc;
+ var kernelProfile;
+ this.scopedRun(
+ // Stop recording to a tape when running a kernel.
+ function () { return _this.state.kernelDepth++; }, function () { return _this.state.kernelDepth--; }, function () {
+ if (!_this.ENV.getBool('DEBUG') && !_this.state.profiling) {
+ outputs = kernelFunc();
+ }
+ else {
+ kernelProfile = _this.profiler.profileKernel(kernelOrScopeName, inputs, function () { return kernelFunc(); });
+ if (_this.ENV.getBool('DEBUG')) {
+ _this.profiler.logKernelProfile(kernelProfile);
+ }
+ outputs = kernelProfile.outputs;
+ }
+ });
+ if (isTapeOn) {
+ this.addTapeNode(kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs);
+ }
+ if (this.state.profiling) {
+ this.state.activeProfile.kernels.push({
+ name: kernelOrScopeName,
+ bytesAdded: this.state.numBytes - startingBytecount,
+ totalBytesSnapshot: this.state.numBytes,
+ tensorsAdded: this.state.numTensors - startingNumTensors,
+ totalTensorsSnapshot: this.state.numTensors,
+ inputShapes: Object.keys(inputs).map(function (key) { return inputs[key] != null ? inputs[key].shape : null; }),
+ outputShapes: outputs.map(function (item) { return item.shape; }),
+ kernelTimeMs: kernelProfile.timeMs,
+ extraInfo: kernelProfile.extraInfo
+ });
+ }
+ return (Array.isArray(out) ? outputs : outputs[0]);
+ };
+ /**
+ * Saves tensors used in forward mode for use in backward mode.
+ *
+ * @param tensors the list of tensors to save.
+ */
+ Engine.prototype.saveTensorsForBackwardMode = function (tensors) {
+ var _this = this;
+ var saved = tensors.map(function (tensor) { return _this.keep(_this.clone(tensor)); });
+ return saved;
+ };
+ /**
+ * Returns a list of tensors to save for a given gradient calculation.
+ *
+ * @param kernelName name of kernel to look up gradient for.
+ * @param inputs a map of input tensors.
+ * @param outputs an array of output tensors from forward mode of kernel.
+ */
+ Engine.prototype.getTensorsForGradient = function (kernelName, inputs, outputs) {
+ var gradConfig = getGradient(kernelName);
+ if (gradConfig != null) {
+ var inputsToSave = gradConfig.inputsToSave || [];
+ var outputsToSave_1 = gradConfig.outputsToSave || [];
+ // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs
+ // specified in inputsToSave will be saved.
+ var inputTensorsToSave = void 0;
+ if (gradConfig.saveAllInputs) {
+ assert(Array.isArray(inputs), function () { return 'saveAllInputs is true, expected inputs to be an array.'; });
+ inputTensorsToSave = Object.keys(inputs).map(function (key) { return inputs[key]; });
+ }
+ else {
+ inputTensorsToSave = inputsToSave.map(function (inputName) { return inputs[inputName]; });
+ }
+ var outputTensorsToSave = outputs.filter(function (_, i) { return outputsToSave_1[i]; });
+ return inputTensorsToSave.concat(outputTensorsToSave);
+ }
+ // We return an empty list rather than throw an error because the kernel we
+ // are looking up may not actually be relevant to backproping through the
+ // overall function
+ //
+ // See 'does not error if irrelevant (pruned) ops are missing grads' test
+ // in gradients_test.ts for an example.
+ return [];
+ };
+ /**
+ * Internal method used by public APIs for tensor creation. Makes a new
+ * tensor with the provided shape, dtype and values. It always
+ * creates a new data id and writes the values to the underlying backend.
+ */
+ Engine.prototype.makeTensor = function (values, shape, dtype, backend) {
+ if (values == null) {
+ throw new Error('Values passed to engine.makeTensor() are null');
+ }
+ dtype = dtype || 'float32';
+ backend = backend || this.backend;
+ var backendVals = values;
+ if (dtype === 'string' && isString(values[0])) {
+ backendVals = values.map(function (d) { return encodeString(d); });
+ }
+ var dataId = backend.write(backendVals, shape, dtype);
+ var t = new Tensor(shape, dtype, dataId, this.nextTensorId());
+ this.trackTensor(t, backend);
+ // Count bytes for string tensors.
+ if (dtype === 'string') {
+ var info = this.state.tensorInfo.get(dataId);
+ var newBytes = bytesFromStringArray(backendVals);
+ this.state.numBytes += newBytes - info.bytes;
+ info.bytes = newBytes;
+ }
+ return t;
+ };
+ /**
+ * Internal method used by backends. Makes a new tensor
+ * that is a wrapper around an existing data id. It doesn't create
+ * a new data id, only increments the ref count used in memory tracking.
+ */
+ Engine.prototype.makeTensorFromDataId = function (dataId, shape, dtype, backend) {
+ dtype = dtype || 'float32';
+ var t = new Tensor(shape, dtype, dataId, this.nextTensorId());
+ this.trackTensor(t, backend);
+ return t;
+ };
+ Engine.prototype.makeVariable = function (initialValue, trainable, name, dtype) {
+ if (trainable === void 0) { trainable = true; }
+ name = name || this.nextVariableId().toString();
+ if (dtype != null && dtype !== initialValue.dtype) {
+ initialValue = initialValue.cast(dtype);
+ }
+ var v = new Variable(initialValue, trainable, name, this.nextTensorId());
+ if (this.state.registeredVariables[v.name] != null) {
+ throw new Error("Variable with name " + v.name + " was already registered");
+ }
+ this.state.registeredVariables[v.name] = v;
+ this.incRef(v, this.backend);
+ return v;
+ };
+ Engine.prototype.trackTensor = function (a, backend) {
+ this.state.numTensors++;
+ if (a.dtype === 'string') {
+ this.state.numStringTensors++;
+ }
+ // Bytes for complex numbers are counted by their components. Bytes for
+ // string tensors are counted when writing values.
+ var bytes = 0;
+ if (a.dtype !== 'complex64' && a.dtype !== 'string') {
+ bytes = a.size * bytesPerElement(a.dtype);
+ }
+ this.state.numBytes += bytes;
+ if (!this.state.tensorInfo.has(a.dataId)) {
+ this.state.numDataBuffers++;
+ this.state.tensorInfo.set(a.dataId, {
+ backend: backend || this.backend,
+ dtype: a.dtype,
+ shape: a.shape,
+ bytes: bytes
+ });
+ }
+ if (!(a instanceof Variable)) {
+ this.track(a);
+ }
+ };
+ // Track the tensor by dataId and increase the refCount for the dataId in the
+ // backend.
+ // TODO(pyu10055): This is currently used by makeVariable method, to increase
+ // refCount on the backend for the dataId. It can potentially be replaced with
+ // Identity op indead of calling backend directly.
+ Engine.prototype.incRef = function (a, backend) {
+ this.trackTensor(a, backend);
+ this.backend.incRef(a.dataId);
+ };
+ Engine.prototype.removeDataId = function (dataId, backend) {
+ if (this.state.tensorInfo.has(dataId) &&
+ this.state.tensorInfo.get(dataId).backend === backend) {
+ this.state.tensorInfo.delete(dataId);
+ this.state.numDataBuffers--;
+ }
+ };
+ Engine.prototype.disposeTensor = function (a) {
+ if (!this.state.tensorInfo.has(a.dataId)) {
+ return;
+ }
+ var info = this.state.tensorInfo.get(a.dataId);
+ this.state.numTensors--;
+ if (a.dtype === 'string') {
+ this.state.numStringTensors--;
+ this.state.numBytes -= info.bytes;
+ }
+ // Don't count bytes for complex numbers as they are counted by their
+ // components.
+ if (a.dtype !== 'complex64' && a.dtype !== 'string') {
+ var bytes = a.size * bytesPerElement(a.dtype);
+ this.state.numBytes -= bytes;
+ }
+ // Remove the reference to dataId if backend dispose the data successfully
+ if (info.backend.disposeData(a.dataId)) {
+ this.removeDataId(a.dataId, info.backend);
+ }
+ // TODO(nsthorat): Construct an error and save the stack trace for
+ // debugging when in debug mode. Creating a stack trace is too expensive
+ // to do unconditionally.
+ };
+ Engine.prototype.disposeVariables = function () {
+ for (var varName in this.state.registeredVariables) {
+ var v = this.state.registeredVariables[varName];
+ this.disposeVariable(v);
+ }
+ };
+ Engine.prototype.disposeVariable = function (v) {
+ this.disposeTensor(v);
+ if (this.state.registeredVariables[v.name] != null) {
+ delete this.state.registeredVariables[v.name];
+ }
+ };
+ Engine.prototype.memory = function () {
+ var info = this.backend.memory();
+ info.numTensors = this.state.numTensors;
+ info.numDataBuffers = this.state.numDataBuffers;
+ info.numBytes = this.state.numBytes;
+ if (this.state.numStringTensors > 0) {
+ info.unreliable = true;
+ if (info.reasons == null) {
+ info.reasons = [];
+ }
+ info.reasons.push('Memory usage by string tensors is approximate ' +
+ '(2 bytes per character)');
+ }
+ return info;
+ };
+ Engine.prototype.profile = function (query) {
+ return __awaiter(this, void 0, void 0, function () {
+ var startBytes, startNumTensors, _a, _b, _c, kernel, _d, _e, e_1_1;
+ var e_1, _f;
+ return __generator(this, function (_g) {
+ switch (_g.label) {
+ case 0:
+ this.state.profiling = true;
+ startBytes = this.state.numBytes;
+ startNumTensors = this.state.numTensors;
+ this.state.activeProfile.kernels = [];
+ _a = this.state.activeProfile;
+ return [4 /*yield*/, query()];
+ case 1:
+ _a.result = _g.sent();
+ this.state.profiling = false;
+ this.state.activeProfile.peakBytes = Math.max.apply(Math, __spread(this.state.activeProfile.kernels.map(function (d) { return d.totalBytesSnapshot; })));
+ this.state.activeProfile.newBytes = this.state.numBytes - startBytes;
+ this.state.activeProfile.newTensors =
+ this.state.numTensors - startNumTensors;
+ _g.label = 2;
+ case 2:
+ _g.trys.push([2, 8, 9, 10]);
+ _b = __values(this.state.activeProfile.kernels), _c = _b.next();
+ _g.label = 3;
+ case 3:
+ if (!!_c.done) return [3 /*break*/, 7];
+ kernel = _c.value;
+ _d = kernel;
+ return [4 /*yield*/, kernel.kernelTimeMs];
+ case 4:
+ _d.kernelTimeMs = _g.sent();
+ _e = kernel;
+ return [4 /*yield*/, kernel.extraInfo];
+ case 5:
+ _e.extraInfo = _g.sent();
+ _g.label = 6;
+ case 6:
+ _c = _b.next();
+ return [3 /*break*/, 3];
+ case 7: return [3 /*break*/, 10];
+ case 8:
+ e_1_1 = _g.sent();
+ e_1 = { error: e_1_1 };
+ return [3 /*break*/, 10];
+ case 9:
+ try {
+ if (_c && !_c.done && (_f = _b.return)) _f.call(_b);
+ }
+ finally { if (e_1) throw e_1.error; }
+ return [7 /*endfinally*/];
+ case 10: return [2 /*return*/, this.state.activeProfile];
+ }
+ });
+ });
+ };
+ Engine.prototype.isTapeOn = function () {
+ return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;
+ };
+ Engine.prototype.addTapeNode = function (kernelName, inputs, outputs, gradientsFunc, saved, attrs) {
+ var _this = this;
+ var tapeNode = { id: this.state.nextTapeNodeId++, kernelName: kernelName, inputs: inputs, outputs: outputs, saved: saved };
+ var gradConfig = getGradient(kernelName);
+ if (gradConfig != null) {
+ gradientsFunc = gradConfig.gradFunc;
+ }
+ if (gradientsFunc != null) {
+ tapeNode.gradient = function (dys) {
+ // TODO(smilkov): To optimize back-prop, pass dys that are not used in
+ // the backprop graph to the user as null instead of zeros
+ dys = dys.map(function (dy, i) {
+ if (dy == null) {
+ var output = outputs[i];
+ var vals = makeZerosTypedArray(output.size, output.dtype);
+ return _this.makeTensor(vals, output.shape, output.dtype);
+ }
+ return dy;
+ });
+ // Grad functions of ops with single outputs expect a dy, while ops
+ // with multiple outputs expect dys (array of dy).
+ return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);
+ };
+ }
+ this.state.activeTape.push(tapeNode);
+ };
+ Engine.prototype.keep = function (result) {
+ result.kept = true;
+ return result;
+ };
+ Engine.prototype.startTape = function () {
+ if (this.state.gradientDepth === 0) {
+ this.state.activeTape = [];
+ }
+ this.state.gradientDepth++;
+ };
+ Engine.prototype.endTape = function () {
+ this.state.gradientDepth--;
+ };
+ /**
+ * Start a scope. Use this with endScope() to achieve the same functionality
+ * as scope() without the need for a function closure.
+ */
+ Engine.prototype.startScope = function (name) {
+ var scopeInfo = {
+ track: [],
+ name: 'unnamed scope',
+ id: this.state.nextScopeId++
+ };
+ if (name) {
+ scopeInfo.name = name;
+ }
+ this.state.scopeStack.push(scopeInfo);
+ this.state.activeScope = scopeInfo;
+ };
+ /**
+ * End a scope. Use this with startScope() to achieve the same functionality
+ * as scope() without the need for a function closure.
+ */
+ Engine.prototype.endScope = function (result) {
+ var _this = this;
+ var tensorsToTrackInParent = getTensorsInContainer(result);
+ var tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(function (t) { return t.id; }));
+ // Dispose the arrays tracked in this scope.
+ for (var i = 0; i < this.state.activeScope.track.length; i++) {
+ var tensor = this.state.activeScope.track[i];
+ if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {
+ tensor.dispose();
+ }
+ }
+ var oldScope = this.state.scopeStack.pop();
+ this.state.activeScope = this.state.scopeStack.length === 0 ?
+ null :
+ this.state.scopeStack[this.state.scopeStack.length - 1];
+ // Track the current result in the parent scope.
+ tensorsToTrackInParent.forEach(function (tensor) {
+ // Only track the tensor if was allocated in the inner scope and is not
+ // globally kept.
+ if (!tensor.kept && tensor.scopeId === oldScope.id) {
+ _this.track(tensor);
+ }
+ });
+ };
+ /**
+ * Returns gradients of `f` with respect to each of the `xs`. The gradients
+ * returned are of the same length as `xs`, but some might be null if `f`
+ * was not a function of that `x`. It also takes optional dy to multiply the
+ * gradient, which defaults to `1`.
+ */
+ Engine.prototype.gradients = function (f, xs, dy, allowNoGradients) {
+ var _this = this;
+ if (allowNoGradients === void 0) { allowNoGradients = false; }
+ assert(xs.length > 0, function () { return 'gradients() received an empty list of xs.'; });
+ if (dy != null && dy.dtype !== 'float32') {
+ throw new Error("dy must have 'float32' dtype, but has '" + dy.dtype + "'");
+ }
+ var y = this.scopedRun(function () { return _this.startTape(); }, function () { return _this.endTape(); }, function () { return _this.tidy('forward', f); });
+ assert(y instanceof Tensor, function () { return 'The result y returned by f() must be a tensor.'; });
+ // Filter out the nodes that don't connect x => y.
+ var filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);
+ if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {
+ throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +
+ 'that the f you passed encloses all operations that lead from x ' +
+ 'to y.');
+ }
+ return this.tidy('backward', function () {
+ var accumulatedGradientMap = {};
+ accumulatedGradientMap[y.id] = (dy == null) ? ones$1(y.shape) : dy;
+ // Backprop gradients through the filtered nodes.
+ backpropagateGradients(accumulatedGradientMap, filteredTape,
+ // Pass the tidy function to avoid circular dep with `tape.ts`.
+ function (f) { return _this.tidy(f); },
+ // Pass an add function to avoide a circular dep with `tape.ts`.
+ add$1);
+ var grads = xs.map(function (x) { return accumulatedGradientMap[x.id]; });
+ if (_this.state.gradientDepth === 0) {
+ // This means that we are not computing higher-order gradients
+ // and can clean up the tape.
+ _this.state.activeTape.forEach(function (node) {
+ var e_2, _a;
+ try {
+ for (var _b = __values(node.saved), _c = _b.next(); !_c.done; _c = _b.next()) {
+ var tensor = _c.value;
+ tensor.dispose();
+ }
+ }
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
+ finally {
+ try {
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
+ }
+ finally { if (e_2) throw e_2.error; }
+ }
+ });
+ _this.state.activeTape = null;
+ }
+ return { value: y, grads: grads };
+ });
+ };
+ Engine.prototype.customGrad = function (f) {
+ var _this = this;
+ assert(isFunction(f), function () { return 'The f passed in customGrad(f) must be a function.'; });
+ return function () {
+ var inputs = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ inputs[_i] = arguments[_i];
+ }
+ assert(inputs.every(function (t) { return t instanceof Tensor; }), function () { return 'The args passed in customGrad(f)(x1, x2,...) must all be ' +
+ 'tensors'; });
+ var res;
+ var inputMap = {};
+ inputs.forEach(function (input, i) {
+ inputMap[i] = input;
+ });
+ var forwardFunc = function (_, save) {
+ res = f.apply(void 0, __spread(inputs, [save]));
+ assert(res.value instanceof Tensor, function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.value` is a tensor'; });
+ assert(isFunction(res.gradFunc), function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.gradFunc` is a function.'; });
+ return res.value;
+ };
+ var backwardsFunc = function (dy, saved) {
+ var gradRes = res.gradFunc(dy, saved);
+ var grads = Array.isArray(gradRes) ? gradRes : [gradRes];
+ assert(grads.length === inputs.length, function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.gradFunc` is a function that returns ' +
+ 'the same number of tensors as inputs passed to f(...).'; });
+ assert(grads.every(function (t) { return t instanceof Tensor; }), function () { return 'The function f passed in customGrad(f) must return an ' +
+ 'object where `obj.gradFunc` is a function that returns ' +
+ 'a list of only tensors.'; });
+ var gradMap = {};
+ grads.forEach(function (grad, i) {
+ gradMap[i] = function () { return grad; };
+ });
+ return gradMap;
+ };
+ return _this.runKernelFunc({
+ forwardFunc: forwardFunc,
+ backwardsFunc: backwardsFunc,
+ inputs: inputMap,
+ });
+ };
+ };
+ Engine.prototype.readSync = function (dataId) {
+ // Route the read to the correct backend.
+ var info = this.state.tensorInfo.get(dataId);
+ return info.backend.readSync(dataId);
+ };
+ Engine.prototype.read = function (dataId) {
+ // Route the read to the correct backend.
+ var info = this.state.tensorInfo.get(dataId);
+ return info.backend.read(dataId);
+ };
+ Engine.prototype.time = function (query) {
+ return __awaiter(this, void 0, void 0, function () {
+ var start, timingInfo;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ start = now();
+ return [4 /*yield*/, this.backend.time(query)];
+ case 1:
+ timingInfo = _a.sent();
+ timingInfo.wallMs = now() - start;
+ return [2 /*return*/, timingInfo];
+ }
+ });
+ });
+ };
+ /**
+ * Tracks a Tensor in the current scope to be automatically cleaned up
+ * when the current scope ends, and returns the value.
+ *
+ * @param result The Tensor to track in the current scope.
+ */
+ Engine.prototype.track = function (result) {
+ if (this.state.activeScope != null) {
+ result.scopeId = this.state.activeScope.id;
+ this.state.activeScope.track.push(result);
+ }
+ return result;
+ };
+ Object.defineProperty(Engine.prototype, "registeredVariables", {
+ get: function () {
+ return this.state.registeredVariables;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ /**
+ * Resets the engine state. Removes all backends but does not remove
+ * registered backend factories.
+ */
+ Engine.prototype.reset = function () {
+ // Make any pending promise obsolete.
+ this.pendingBackendInitId++;
+ this.state.dispose();
+ this.ENV.reset();
+ this.state = new EngineState();
+ for (var backendName in this.registry) {
+ this.disposeRegisteredKernels(backendName);
+ this.registry[backendName].dispose();
+ delete this.registry[backendName];
+ }
+ this.backendName = null;
+ this.backendInstance = null;
+ this.pendingBackendInit = null;
+ };
+ return Engine;
+ }());
+ Engine.nextTensorId = 0;
+ Engine.nextVariableId = 0;
+ function ones$1(shape) {
+ var values = makeOnesTypedArray(sizeFromShape(shape), 'float32');
+ return ENGINE.makeTensor(values, shape, 'float32');
+ }
+ function getOrMakeEngine() {
+ var ns = getGlobalNamespace();
+ if (ns._tfengine == null) {
+ var environment = new Environment(ns);
+ ns._tfengine = new Engine(environment);
+ }
+ setEnvironmentGlobal(ns._tfengine.ENV);
+ // Tell the current tensor interface that the global engine is responsible
+ // for tracking.
+ setTensorTracker(function () { return ns._tfengine; });
+ return ns._tfengine;
+ }
+ var ENGINE = getOrMakeEngine();
+ /**
+ * A implementation of the add op for use within engine and tape.
+ *
+ * This allows us to avoid a circular dependency between add.ts and engine.
+ * It is exported to be available in tape tests.
+ */
+ function add$1(a, b) {
+ // We duplicate Add here to avoid a circular dependency with add.ts.
+ var inputs = { a: a, b: b };
+ return ENGINE.runKernel(Add, inputs);
+ }
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // tslint:disable-next-line:no-any
+ function _isNavigatorDefined() {
+ return typeof navigator !== 'undefined' && navigator != null;
+ }
+ var isMobileMockValue;
+ function mockIsMobile(value) {
+ isMobileMockValue = value;
+ }
+ function isMobile(nav) {
+ if (isMobileMockValue !== undefined) {
+ return isMobileMockValue;
+ }
+ if (nav || _isNavigatorDefined()) {
+ if (!nav) {
+ nav = navigator;
+ }
+ if (nav.product === 'ReactNative') {
+ return true;
+ }
+ var a = nav.userAgent || nav.vendor ||
+ // tslint:disable-next-line:no-any
+ (typeof window !== 'undefined' ? window.opera : '');
+ // Use `navigator.userAgentData.mobile` as fallback.
+ if (!a) {
+ // tslint:disable-next-line:no-any
+ var navAny = nav;
+ return navAny.userAgentData && navAny.userAgentData.mobile;
+ }
+ // tslint:disable-next-line:max-line-length
+ return /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i
+ .test(a) ||
+ // tslint:disable-next-line:max-line-length
+ /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i
+ .test(a.substr(0, 4));
+ }
+ return false;
+ }
+ function isBrowser() {
+ return (typeof window !== 'undefined' && window.document != null) ||
+ //@ts-ignore
+ (typeof WorkerGlobalScope !== 'undefined');
+ }
+
+ var device_util = {
+ __proto__: null,
+ mockIsMobile: mockIsMobile,
+ isMobile: isMobile,
+ isBrowser: isBrowser
+ };
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var ENV = env();
+ /**
+ * This file contains environment-related flag registrations.
+ */
+ /** Whether to enable debug mode. */
+ ENV.registerFlag('DEBUG', function () { return false; }, function (debugValue) {
+ if (debugValue) {
+ console.warn('Debugging mode is ON. The output of every math call will ' +
+ 'be downloaded to CPU and checked for NaNs. ' +
+ 'This significantly impacts performance.');
+ }
+ });
+ /** Whether we are in a browser (as versus, say, node.js) environment. */
+ ENV.registerFlag('IS_BROWSER', function () { return isBrowser(); });
+ /** Whether we are in a browser (as versus, say, node.js) environment. */
+ ENV.registerFlag('IS_NODE', function () { return (typeof process !== 'undefined') &&
+ (typeof process.versions !== 'undefined') &&
+ (typeof process.versions.node !== 'undefined'); });
+ /** Whether this browser is Chrome. */
+ ENV.registerFlag('IS_CHROME', function () { return typeof navigator !== 'undefined' && navigator != null &&
+ navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&
+ /Google Inc/.test(navigator.vendor); });
+ /**
+ * True when the environment is "production" where we disable safety checks
+ * to gain performance.
+ */
+ ENV.registerFlag('PROD', function () { return false; });
+ /**
+ * Whether to do sanity checks when inferring a shape from user-provided
+ * values, used when creating a new tensor.
+ */
+ ENV.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', function () { return ENV.getBool('DEBUG'); });
+ /** Whether deprecation warnings are enabled. */
+ ENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', function () { return true; });
+ /** True if running unit tests. */
+ ENV.registerFlag('IS_TEST', function () { return false; });
+ /** Whether to check computation result for errors. */
+ ENV.registerFlag('CHECK_COMPUTATION_FOR_ERRORS', function () { return true; });
+ /** Whether the backend needs to wrap input to imageBitmap. */
+ ENV.registerFlag('WRAP_TO_IMAGEBITMAP', function () { return false; });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function inferShape(val, dtype) {
+ var firstElem = val;
+ if (isTypedArray(val)) {
+ return dtype === 'string' ? [] : [val.length];
+ }
+ if (!Array.isArray(val)) {
+ return []; // Scalar.
+ }
+ var shape = [];
+ while (Array.isArray(firstElem) ||
+ isTypedArray(firstElem) && dtype !== 'string') {
+ shape.push(firstElem.length);
+ firstElem = firstElem[0];
+ }
+ if (Array.isArray(val) &&
+ env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {
+ deepAssertShapeConsistency(val, shape, []);
+ }
+ return shape;
+ }
+ function deepAssertShapeConsistency(val, shape, indices) {
+ indices = indices || [];
+ if (!(Array.isArray(val)) && !isTypedArray(val)) {
+ assert(shape.length === 0, function () { return "Element arr[" + indices.join('][') + "] is a primitive, " +
+ ("but should be an array/TypedArray of " + shape[0] + " elements"); });
+ return;
+ }
+ assert(shape.length > 0, function () { return "Element arr[" + indices.join('][') + "] should be a primitive, " +
+ ("but is an array of " + val.length + " elements"); });
+ assert(val.length === shape[0], function () { return "Element arr[" + indices.join('][') + "] should have " + shape[0] + " " +
+ ("elements, but has " + val.length + " elements"); });
+ var subShape = shape.slice(1);
+ for (var i = 0; i < val.length; ++i) {
+ deepAssertShapeConsistency(val[i], subShape, indices.concat(i));
+ }
+ }
+ function assertDtype(expectedDtype, actualDType, argName, functionName) {
+ if (expectedDtype === 'string_or_numeric') {
+ return;
+ }
+ if (expectedDtype == null) {
+ throw new Error("Expected dtype cannot be null.");
+ }
+ if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||
+ expectedDtype === 'numeric' && actualDType === 'string') {
+ throw new Error("Argument '" + argName + "' passed to '" + functionName + "' must " +
+ ("be " + expectedDtype + " tensor, but got " + actualDType + " tensor"));
+ }
+ }
+ function convertToTensor(x, argName, functionName, parseAsDtype) {
+ if (parseAsDtype === void 0) { parseAsDtype = 'numeric'; }
+ if (x instanceof Tensor) {
+ assertDtype(parseAsDtype, x.dtype, argName, functionName);
+ return x;
+ }
+ var inferredDtype = inferDtype(x);
+ // If the user expects a bool/int/float, use that info to update the
+ // inferredDtype when it is not a string.
+ if (inferredDtype !== 'string' &&
+ ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {
+ inferredDtype = parseAsDtype;
+ }
+ assertDtype(parseAsDtype, inferredDtype, argName, functionName);
+ if ((x == null) ||
+ (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&
+ typeof x !== 'boolean' && typeof x !== 'string')) {
+ var type = x == null ? 'null' : x.constructor.name;
+ throw new Error("Argument '" + argName + "' passed to '" + functionName + "' must be a " +
+ ("Tensor or TensorLike, but got '" + type + "'"));
+ }
+ var inferredShape = inferShape(x, inferredDtype);
+ if (!isTypedArray(x) && !Array.isArray(x)) {
+ x = [x];
+ }
+ var skipTypedArray = true;
+ var values = inferredDtype !== 'string' ?
+ toTypedArray(x, inferredDtype) :
+ flatten(x, [], skipTypedArray);
+ return ENGINE.makeTensor(values, inferredShape, inferredDtype);
+ }
+ function convertToTensorArray(arg, argName, functionName, parseAsDtype) {
+ if (parseAsDtype === void 0) { parseAsDtype = 'numeric'; }
+ if (!Array.isArray(arg)) {
+ throw new Error("Argument " + argName + " passed to " + functionName + " must be a " +
+ '`Tensor[]` or `TensorLike[]`');
+ }
+ var tensors = arg;
+ return tensors.map(function (t, i) { return convertToTensor(t, argName + "[" + i + "]", functionName, parseAsDtype); });
+ }
+
+ var OP_SCOPE_SUFFIX = '__op';
+ /**
+ * Used for wrapping functions that perform math operations on
+ * Tensors. The function will be wrapped in a named scope that cleans all
+ * memory usage after the function is done.
+ */
+ function op(f) {
+ var keys = Object.keys(f);
+ if (keys.length !== 1) {
+ throw new Error("Please provide an object with a single key " +
+ "(operation name) mapping to a function. Got an object with " +
+ (keys.length + " keys."));
+ }
+ var opName = keys[0];
+ var fn = f[opName];
+ // Strip the underscore from the end of the function name.
+ if (opName.endsWith('_')) {
+ opName = opName.substring(0, opName.length - 1);
+ }
+ // add an __op suffix to distinguish ops from kernels in tf.profile
+ opName = opName + OP_SCOPE_SUFFIX;
+ // tslint:disable-next-line:no-any
+ var f2 = function () {
+ var args = [];
+ for (var _i = 0; _i < arguments.length; _i++) {
+ args[_i] = arguments[_i];
+ }
+ ENGINE.startScope(opName);
+ try {
+ var result = fn.apply(void 0, __spread(args));
+ if (isPromise(result)) {
+ console.error('Cannot return a Promise inside of tidy.');
+ }
+ ENGINE.endScope(result);
+ return result;
+ }
+ catch (ex) {
+ ENGINE.endScope(null);
+ throw ex;
+ }
+ };
+ Object.defineProperty(f2, 'name', { value: opName, configurable: true });
+ // tslint:disable-next-line:no-any
+ return f2;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts two real numbers to a complex number.
+ *
+ * Given a tensor `real` representing the real part of a complex number, and a
+ * tensor `imag` representing the imaginary part of a complex number, this
+ * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],
+ * where r represents the real part and i represents the imag part.
+ *
+ * The input tensors real and imag must have the same shape.
+ *
+ * ```js
+ * const real = tf.tensor1d([2.25, 3.25]);
+ * const imag = tf.tensor1d([4.75, 5.75]);
+ * const complex = tf.complex(real, imag);
+ *
+ * complex.print();
+ * ```
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function complex_(real, imag) {
+ var $real = convertToTensor(real, 'real', 'complex');
+ var $imag = convertToTensor(imag, 'imag', 'complex');
+ assertShapesMatch($real.shape, $imag.shape, "real and imag shapes, " + $real.shape + " and " + $imag.shape + ", " +
+ "must match in call to tf.complex().");
+ var inputs = { real: $real, imag: $imag };
+ return ENGINE.runKernel(Complex, inputs);
+ }
+ var complex = op({ complex_: complex_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /** This is shared code across all tensor creation methods. */
+ function makeTensor(values, shape, inferredShape, dtype) {
+ if (dtype == null) {
+ dtype = inferDtype(values);
+ }
+ if (dtype === 'complex64') {
+ throw new Error("Cannot construct a complex64 tensor directly. " +
+ "Please use tf.complex(real, imag).");
+ }
+ if (!isTypedArray(values) && !Array.isArray(values) &&
+ typeof values !== 'number' && typeof values !== 'boolean' &&
+ typeof values !== 'string') {
+ throw new Error('values passed to tensor(values) must be a number/boolean/string or ' +
+ 'an array of numbers/booleans/strings, or a TypedArray');
+ }
+ if (shape != null) {
+ assertNonNegativeIntegerDimensions(shape);
+ var providedSize_1 = sizeFromShape(shape);
+ var inferredSize_1 = sizeFromShape(inferredShape);
+ assert(providedSize_1 === inferredSize_1, function () { return "Based on the provided shape, [" + shape + "], the tensor should have " +
+ (providedSize_1 + " values but has " + inferredSize_1); });
+ for (var i = 0; i < inferredShape.length; ++i) {
+ var inferred = inferredShape[i];
+ var flatDimsDontMatch = i === inferredShape.length - 1 ?
+ inferred !== sizeFromShape(shape.slice(i)) :
+ true;
+ assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, function () { return "Error creating a new Tensor. Inferred shape " +
+ ("(" + inferredShape + ") does not match the provided ") +
+ ("shape (" + shape + "). "); });
+ }
+ }
+ if (!isTypedArray(values) && !Array.isArray(values)) {
+ values = [values];
+ }
+ shape = shape || inferredShape;
+ values = dtype !== 'string' ?
+ toTypedArray(values, dtype) :
+ flatten(values, [], true);
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * ```js
+ * // Pass an array of values to create a vector.
+ * tf.tensor([1, 2, 3, 4]).print();
+ * ```
+ *
+ * ```js
+ * // Pass a nested array of values to make a matrix or a higher
+ * // dimensional tensor.
+ * tf.tensor([[1, 2], [3, 4]]).print();
+ * ```
+ *
+ * ```js
+ * // Pass a flat array and specify a shape yourself.
+ * tf.tensor([1, 2, 3, 4], [2, 2]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`. If the values are strings,
+ * they will be encoded as utf-8 and kept as `Uint8Array[]`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor(values, shape, dtype) {
+ var inferredShape = inferShape(values, dtype);
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /* Type definitions for exporting and importing of models. */
+ /**
+ * A map from Tensor dtype to number of bytes per element of the Tensor.
+ */
+ var DTYPE_VALUE_SIZE_MAP = {
+ 'float32': 4,
+ 'float16': 2,
+ 'int32': 4,
+ 'uint16': 2,
+ 'uint8': 1,
+ 'bool': 1,
+ 'complex64': 8
+ };
+
+ /** Number of bytes reserved for the length of the string. (32bit integer). */
+ var NUM_BYTES_STRING_LENGTH = 4;
+ /**
+ * Encode a map from names to weight values as an ArrayBuffer, along with an
+ * `Array` of `WeightsManifestEntry` as specification of the encoded weights.
+ *
+ * This function does not perform sharding.
+ *
+ * This function is the reverse of `decodeWeights`.
+ *
+ * @param tensors A map ("dict") from names to tensors.
+ * @param group Group to which the weights belong (optional).
+ * @returns A `Promise` of
+ * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s
+ * concatenated.
+ * - An `Array` of `WeightManifestEntry`s, carrying information including
+ * tensor names, `dtype`s and shapes.
+ * @throws Error: on unsupported tensor `dtype`.
+ */
+ function encodeWeights(tensors, group) {
+ return __awaiter(this, void 0, void 0, function () {
+ var specs, dataPromises, names, _loop_1, i, tensorValues;
+ var _this = this;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ specs = [];
+ dataPromises = [];
+ names = Array.isArray(tensors) ?
+ tensors.map(function (tensor) { return tensor.name; }) :
+ Object.keys(tensors);
+ _loop_1 = function (i) {
+ var name = names[i];
+ var t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];
+ if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&
+ t.dtype !== 'string' && t.dtype !== 'complex64') {
+ throw new Error("Unsupported dtype in weight '" + name + "': " + t.dtype);
+ }
+ var spec = { name: name, shape: t.shape, dtype: t.dtype };
+ if (t.dtype === 'string') {
+ var utf8bytes = new Promise(function (resolve) { return __awaiter(_this, void 0, void 0, function () {
+ var vals, totalNumBytes, bytes, offset, i_1, val, bytesOfLength;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, t.bytes()];
+ case 1:
+ vals = _a.sent();
+ totalNumBytes = vals.reduce(function (p, c) { return p + c.length; }, 0) +
+ NUM_BYTES_STRING_LENGTH * vals.length;
+ bytes = new Uint8Array(totalNumBytes);
+ offset = 0;
+ for (i_1 = 0; i_1 < vals.length; i_1++) {
+ val = vals[i_1];
+ bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);
+ bytes.set(bytesOfLength, offset);
+ offset += NUM_BYTES_STRING_LENGTH;
+ bytes.set(val, offset);
+ offset += val.length;
+ }
+ resolve(bytes);
+ return [2 /*return*/];
+ }
+ });
+ }); });
+ dataPromises.push(utf8bytes);
+ }
+ else {
+ dataPromises.push(t.data());
+ }
+ if (group != null) {
+ spec.group = group;
+ }
+ specs.push(spec);
+ };
+ for (i = 0; i < names.length; ++i) {
+ _loop_1(i);
+ }
+ return [4 /*yield*/, Promise.all(dataPromises)];
+ case 1:
+ tensorValues = _a.sent();
+ return [2 /*return*/, { data: concatenateTypedArrays(tensorValues), specs: specs }];
+ }
+ });
+ });
+ }
+ /**
+ * Decode flat ArrayBuffer as weights.
+ *
+ * This function does not handle sharding.
+ *
+ * This function is the reverse of `encodeWeights`.
+ *
+ * @param buffer A flat ArrayBuffer carrying the binary values of the tensors
+ * concatenated in the order specified in `specs`.
+ * @param specs Specifications of the names, dtypes and shapes of the tensors
+ * whose value are encoded by `buffer`.
+ * @return A map from tensor name to tensor value, with the names corresponding
+ * to names in `specs`.
+ * @throws Error, if any of the tensors has unsupported dtype.
+ */
+ function decodeWeights(buffer, specs) {
+ var e_1, _a;
+ // TODO(adarob, cais): Support quantization.
+ var out = {};
+ var float16Decode;
+ var offset = 0;
+ try {
+ for (var specs_1 = __values(specs), specs_1_1 = specs_1.next(); !specs_1_1.done; specs_1_1 = specs_1.next()) {
+ var spec = specs_1_1.value;
+ var name = spec.name;
+ var dtype = spec.dtype;
+ var shape = spec.shape;
+ var size = sizeFromShape(shape);
+ var values = void 0;
+ if ('quantization' in spec) {
+ var quantization = spec.quantization;
+ if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
+ if (!('min' in quantization && 'scale' in quantization)) {
+ throw new Error("Weight " + spec.name + " with quantization " + quantization.dtype + " " +
+ "doesn't have corresponding metadata min and scale.");
+ }
+ }
+ else if (quantization.dtype === 'float16') {
+ if (dtype !== 'float32') {
+ throw new Error("Weight " + spec.name + " is quantized with " + quantization.dtype + " " +
+ ("which only supports weights of type float32 not " + dtype + "."));
+ }
+ }
+ else {
+ throw new Error("Weight " + spec.name + " has unknown " +
+ ("quantization dtype " + quantization.dtype + ". ") +
+ "Supported quantization dtypes are: " +
+ "'uint8', 'uint16', and 'float16'.");
+ }
+ var quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];
+ var byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);
+ var quantizedArray = (quantization.dtype === 'uint8') ?
+ new Uint8Array(byteBuffer) :
+ new Uint16Array(byteBuffer);
+ if (dtype === 'float32') {
+ if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
+ values = new Float32Array(quantizedArray.length);
+ for (var i = 0; i < quantizedArray.length; i++) {
+ var v = quantizedArray[i];
+ values[i] = v * quantization.scale + quantization.min;
+ }
+ }
+ else if (quantization.dtype === 'float16') {
+ if (float16Decode === undefined) {
+ float16Decode = getFloat16Decoder();
+ }
+ values = float16Decode(quantizedArray);
+ }
+ else {
+ throw new Error("Unsupported quantization type " + quantization.dtype + " " +
+ "for weight type float32.");
+ }
+ }
+ else if (dtype === 'int32') {
+ if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {
+ throw new Error("Unsupported quantization type " + quantization.dtype + " " +
+ "for weight type int32.");
+ }
+ values = new Int32Array(quantizedArray.length);
+ for (var i = 0; i < quantizedArray.length; i++) {
+ var v = quantizedArray[i];
+ values[i] = Math.round(v * quantization.scale + quantization.min);
+ }
+ }
+ else {
+ throw new Error("Unsupported dtype in weight '" + name + "': " + dtype);
+ }
+ offset += size * quantizationSizeFactor;
+ }
+ else if (dtype === 'string') {
+ var size_1 = sizeFromShape(spec.shape);
+ values = [];
+ for (var i = 0; i < size_1; i++) {
+ var byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];
+ offset += NUM_BYTES_STRING_LENGTH;
+ var bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));
+ values.push(bytes);
+ offset += byteLength;
+ }
+ }
+ else {
+ var dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];
+ var byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);
+ if (dtype === 'float32') {
+ values = new Float32Array(byteBuffer);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(byteBuffer);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(byteBuffer);
+ }
+ else if (dtype === 'complex64') {
+ values = new Float32Array(byteBuffer);
+ var real = new Float32Array(values.length / 2);
+ var image = new Float32Array(values.length / 2);
+ for (var i = 0; i < real.length; i++) {
+ real[i] = values[i * 2];
+ image[i] = values[i * 2 + 1];
+ }
+ var realTensor = tensor(real, shape, 'float32');
+ var imageTensor = tensor(image, shape, 'float32');
+ out[name] = complex(realTensor, imageTensor);
+ realTensor.dispose();
+ imageTensor.dispose();
+ }
+ else {
+ throw new Error("Unsupported dtype in weight '" + name + "': " + dtype);
+ }
+ offset += size * dtypeFactor;
+ }
+ if (dtype !== 'complex64') {
+ out[name] = tensor(values, shape, dtype);
+ }
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (specs_1_1 && !specs_1_1.done && (_a = specs_1.return)) _a.call(specs_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ return out;
+ }
+ /**
+ * Concatenate TypedArrays into an ArrayBuffer.
+ */
+ function concatenateTypedArrays(xs) {
+ // TODO(adarob, cais): Support quantization.
+ if (xs === null) {
+ throw new Error("Invalid input value: " + JSON.stringify(xs));
+ }
+ var totalByteLength = 0;
+ // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'
+ // can have a different byte length from that of the `TypedArray` itself,
+ // for example, when the `TypedArray` is created from an offset in an
+ // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match
+ // the `TypedArray` in byte length. If an element of `xs` does not show
+ // this property, a new `TypedArray` that satisfy this property will be
+ // constructed and pushed into `normalizedXs`.
+ var normalizedXs = [];
+ xs.forEach(function (x) {
+ totalByteLength += x.byteLength;
+ // tslint:disable:no-any
+ normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :
+ new x.constructor(x));
+ if (!(x instanceof Float32Array || x instanceof Int32Array ||
+ x instanceof Uint8Array)) {
+ throw new Error("Unsupported TypedArray subtype: " + x.constructor.name);
+ }
+ // tslint:enable:no-any
+ });
+ var y = new Uint8Array(totalByteLength);
+ var offset = 0;
+ normalizedXs.forEach(function (x) {
+ y.set(new Uint8Array(x.buffer), offset);
+ offset += x.byteLength;
+ });
+ return y.buffer;
+ }
+ // Use Buffer on Node.js instead of Blob/atob/btoa
+ var useNodeBuffer = typeof Buffer !== 'undefined' &&
+ (typeof Blob === 'undefined' || typeof atob === 'undefined' ||
+ typeof btoa === 'undefined');
+ /**
+ * Calculate the byte length of a JavaScript string.
+ *
+ * Note that a JavaScript string can contain wide characters, therefore the
+ * length of the string is not necessarily equal to the byte length.
+ *
+ * @param str Input string.
+ * @returns Byte length.
+ */
+ function stringByteLength(str) {
+ if (useNodeBuffer) {
+ return Buffer.byteLength(str);
+ }
+ return new Blob([str]).size;
+ }
+ /**
+ * Encode an ArrayBuffer as a base64 encoded string.
+ *
+ * @param buffer `ArrayBuffer` to be converted.
+ * @returns A string that base64-encodes `buffer`.
+ */
+ function arrayBufferToBase64String(buffer) {
+ if (useNodeBuffer) {
+ return Buffer.from(buffer).toString('base64');
+ }
+ var buf = new Uint8Array(buffer);
+ var s = '';
+ for (var i = 0, l = buf.length; i < l; i++) {
+ s += String.fromCharCode(buf[i]);
+ }
+ return btoa(s);
+ }
+ /**
+ * Decode a base64 string as an ArrayBuffer.
+ *
+ * @param str Base64 string.
+ * @returns Decoded `ArrayBuffer`.
+ */
+ function base64StringToArrayBuffer(str) {
+ if (useNodeBuffer) {
+ var buf = Buffer.from(str, 'base64');
+ return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);
+ }
+ var s = atob(str);
+ var buffer = new Uint8Array(s.length);
+ for (var i = 0; i < s.length; ++i) {
+ buffer.set([s.charCodeAt(i)], i);
+ }
+ return buffer.buffer;
+ }
+ /**
+ * Concatenate a number of ArrayBuffers into one.
+ *
+ * @param buffers A number of array buffers to concatenate.
+ * @returns Result of concatenating `buffers` in order.
+ */
+ function concatenateArrayBuffers(buffers) {
+ if (buffers.length === 1) {
+ return buffers[0];
+ }
+ var totalByteLength = 0;
+ buffers.forEach(function (buffer) {
+ totalByteLength += buffer.byteLength;
+ });
+ var temp = new Uint8Array(totalByteLength);
+ var offset = 0;
+ buffers.forEach(function (buffer) {
+ temp.set(new Uint8Array(buffer), offset);
+ offset += buffer.byteLength;
+ });
+ return temp.buffer;
+ }
+ /**
+ * Get the basename of a path.
+ *
+ * Behaves in a way analogous to Linux's basename command.
+ *
+ * @param path
+ */
+ function basename(path) {
+ var SEPARATOR = '/';
+ path = path.trim();
+ while (path.endsWith(SEPARATOR)) {
+ path = path.slice(0, path.length - 1);
+ }
+ var items = path.split(SEPARATOR);
+ return items[items.length - 1];
+ }
+ /**
+ * Create `ModelJSON` from `ModelArtifacts`.
+ *
+ * @param artifacts Model artifacts, describing the model and its weights.
+ * @param manifest Weight manifest, describing where the weights of the
+ * `ModelArtifacts` are stored, and some metadata about them.
+ * @returns Object representing the `model.json` file describing the model
+ * artifacts and weights
+ */
+ function getModelJSONForModelArtifacts(artifacts, manifest) {
+ var result = {
+ modelTopology: artifacts.modelTopology,
+ format: artifacts.format,
+ generatedBy: artifacts.generatedBy,
+ convertedBy: artifacts.convertedBy,
+ weightsManifest: manifest
+ };
+ if (artifacts.signature != null) {
+ result.signature = artifacts.signature;
+ }
+ if (artifacts.userDefinedMetadata != null) {
+ result.userDefinedMetadata = artifacts.userDefinedMetadata;
+ }
+ if (artifacts.modelInitializer != null) {
+ result.modelInitializer = artifacts.modelInitializer;
+ }
+ if (artifacts.trainingConfig != null) {
+ result.trainingConfig = artifacts.trainingConfig;
+ }
+ return result;
+ }
+ /**
+ * Create `ModelArtifacts` from a JSON file.
+ *
+ * @param modelJSON Object containing the parsed JSON of `model.json`
+ * @param loadWeights Function that takes the JSON file's weights manifest,
+ * reads weights from the listed path(s), and returns a Promise of the
+ * weight manifest entries along with the weights data.
+ * @returns A Promise of the `ModelArtifacts`, as described by the JSON file.
+ */
+ function getModelArtifactsForJSON(modelJSON, loadWeights) {
+ return __awaiter(this, void 0, void 0, function () {
+ var modelArtifacts, _a, weightSpecs, weightData;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ modelArtifacts = {
+ modelTopology: modelJSON.modelTopology,
+ format: modelJSON.format,
+ generatedBy: modelJSON.generatedBy,
+ convertedBy: modelJSON.convertedBy
+ };
+ if (modelJSON.trainingConfig != null) {
+ modelArtifacts.trainingConfig = modelJSON.trainingConfig;
+ }
+ if (!(modelJSON.weightsManifest != null)) return [3 /*break*/, 2];
+ return [4 /*yield*/, loadWeights(modelJSON.weightsManifest)];
+ case 1:
+ _a = __read.apply(void 0, [_b.sent(), 2]), weightSpecs = _a[0], weightData = _a[1];
+ modelArtifacts.weightSpecs = weightSpecs;
+ modelArtifacts.weightData = weightData;
+ _b.label = 2;
+ case 2:
+ if (modelJSON.signature != null) {
+ modelArtifacts.signature = modelJSON.signature;
+ }
+ if (modelJSON.userDefinedMetadata != null) {
+ modelArtifacts.userDefinedMetadata = modelJSON.userDefinedMetadata;
+ }
+ if (modelJSON.modelInitializer != null) {
+ modelArtifacts.modelInitializer = modelJSON.modelInitializer;
+ }
+ return [2 /*return*/, modelArtifacts];
+ }
+ });
+ });
+ }
+ /**
+ * Populate ModelArtifactsInfo fields for a model with JSON topology.
+ * @param modelArtifacts
+ * @returns A ModelArtifactsInfo object.
+ */
+ function getModelArtifactsInfoForJSON(modelArtifacts) {
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error('Expected JSON model topology, received ArrayBuffer.');
+ }
+ return {
+ dateSaved: new Date(),
+ modelTopologyType: 'JSON',
+ modelTopologyBytes: modelArtifacts.modelTopology == null ?
+ 0 :
+ stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),
+ weightSpecsBytes: modelArtifacts.weightSpecs == null ?
+ 0 :
+ stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),
+ weightDataBytes: modelArtifacts.weightData == null ?
+ 0 :
+ modelArtifacts.weightData.byteLength,
+ };
+ }
+ /**
+ * Computes mantisa table for casting Float16 to Float32
+ * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ *
+ * @returns Uint32Array, 2048 mantissa lookup values.
+ */
+ function computeFloat16MantisaTable() {
+ var convertMantissa = function (i) {
+ var m = i << 13;
+ var e = 0;
+ while ((m & 0x00800000) === 0) {
+ e -= 0x00800000;
+ m <<= 1;
+ }
+ m &= ~0x00800000;
+ e += 0x38800000;
+ return m | e;
+ };
+ var mantisaTable = new Uint32Array(2048);
+ mantisaTable[0] = 0;
+ for (var i = 1; i < 1024; i++) {
+ mantisaTable[i] = convertMantissa(i);
+ }
+ for (var i = 1024; i < 2048; i++) {
+ mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);
+ }
+ return mantisaTable;
+ }
+ /**
+ * Computes exponent table for casting Float16 to Float32
+ * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ *
+ * @returns Uint32Array, 64 exponent lookup values.
+ */
+ function computeFloat16ExponentTable() {
+ var exponentTable = new Uint32Array(64);
+ exponentTable[0] = 0;
+ exponentTable[31] = 0x47800000;
+ exponentTable[32] = 0x80000000;
+ exponentTable[63] = 0xc7800000;
+ for (var i = 1; i < 31; i++) {
+ exponentTable[i] = i << 23;
+ }
+ for (var i = 33; i < 63; i++) {
+ exponentTable[i] = 0x80000000 + ((i - 32) << 23);
+ }
+ return exponentTable;
+ }
+ /**
+ * Computes offset table for casting Float16 to Float32
+ * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ *
+ * @returns Uint32Array, 6d offset values.
+ */
+ function computeFloat16OffsetTable() {
+ var offsetTable = new Uint32Array(64);
+ for (var i = 0; i < 64; i++) {
+ offsetTable[i] = 1024;
+ }
+ offsetTable[0] = offsetTable[32] = 0;
+ return offsetTable;
+ }
+ /**
+ * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values
+ * to a Float32Array.
+ *
+ * @returns Function (buffer: Uint16Array) => Float32Array which decodes
+ * the Uint16Array of Float16 bytes to a Float32Array.
+ */
+ function getFloat16Decoder() {
+ // Algorithm is based off of
+ // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ // Cache lookup tables
+ var mantisaTable = computeFloat16MantisaTable();
+ var exponentTable = computeFloat16ExponentTable();
+ var offsetTable = computeFloat16OffsetTable();
+ return function (quantizedArray) {
+ var buffer = new ArrayBuffer(4 * quantizedArray.length);
+ var bufferUint32View = new Uint32Array(buffer);
+ for (var index = 0; index < quantizedArray.length; index++) {
+ var float16Bits = quantizedArray[index];
+ var float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +
+ exponentTable[float16Bits >> 10];
+ bufferUint32View[index] = float32Bits;
+ }
+ return new Float32Array(buffer);
+ };
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var IORouterRegistry = /** @class */ (function () {
+ function IORouterRegistry() {
+ this.saveRouters = [];
+ this.loadRouters = [];
+ }
+ IORouterRegistry.getInstance = function () {
+ if (IORouterRegistry.instance == null) {
+ IORouterRegistry.instance = new IORouterRegistry();
+ }
+ return IORouterRegistry.instance;
+ };
+ /**
+ * Register a save-handler router.
+ *
+ * @param saveRouter A function that maps a URL-like string onto an instance
+ * of `IOHandler` with the `save` method defined or `null`.
+ */
+ IORouterRegistry.registerSaveRouter = function (saveRouter) {
+ IORouterRegistry.getInstance().saveRouters.push(saveRouter);
+ };
+ /**
+ * Register a load-handler router.
+ *
+ * @param loadRouter A function that maps a URL-like string onto an instance
+ * of `IOHandler` with the `load` method defined or `null`.
+ */
+ IORouterRegistry.registerLoadRouter = function (loadRouter) {
+ IORouterRegistry.getInstance().loadRouters.push(loadRouter);
+ };
+ /**
+ * Look up IOHandler for saving, given a URL-like string.
+ *
+ * @param url
+ * @returns If only one match is found, an instance of IOHandler with the
+ * `save` method defined. If no match is found, `null`.
+ * @throws Error, if more than one match is found.
+ */
+ IORouterRegistry.getSaveHandlers = function (url) {
+ return IORouterRegistry.getHandlers(url, 'save');
+ };
+ /**
+ * Look up IOHandler for loading, given a URL-like string.
+ *
+ * @param url
+ * @param loadOptions Optional, custom load options.
+ * @returns All valid handlers for `url`, given the currently registered
+ * handler routers.
+ */
+ IORouterRegistry.getLoadHandlers = function (url, loadOptions) {
+ return IORouterRegistry.getHandlers(url, 'load', loadOptions);
+ };
+ IORouterRegistry.getHandlers = function (url, handlerType, loadOptions) {
+ var validHandlers = [];
+ var routers = handlerType === 'load' ?
+ IORouterRegistry.getInstance().loadRouters :
+ IORouterRegistry.getInstance().saveRouters;
+ routers.forEach(function (router) {
+ var handler = router(url, loadOptions);
+ if (handler !== null) {
+ validHandlers.push(handler);
+ }
+ });
+ return validHandlers;
+ };
+ return IORouterRegistry;
+ }());
+ var registerSaveRouter = function (loudRouter) { return IORouterRegistry.registerSaveRouter(loudRouter); };
+ var registerLoadRouter = function (loudRouter) { return IORouterRegistry.registerLoadRouter(loudRouter); };
+ var getSaveHandlers = function (url) { return IORouterRegistry.getSaveHandlers(url); };
+ var getLoadHandlers = function (url, loadOptions) { return IORouterRegistry.getLoadHandlers(url, loadOptions); };
+
+ var DATABASE_NAME = 'tensorflowjs';
+ var DATABASE_VERSION = 1;
+ // Model data and ModelArtifactsInfo (metadata) are stored in two separate
+ // stores for efficient access of the list of stored models and their metadata.
+ // 1. The object store for model data: topology, weights and weight manifests.
+ var MODEL_STORE_NAME = 'models_store';
+ // 2. The object store for ModelArtifactsInfo, including meta-information such
+ // as the type of topology (JSON vs binary), byte size of the topology, byte
+ // size of the weights, etc.
+ var INFO_STORE_NAME = 'model_info_store';
+ function getIndexedDBFactory() {
+ if (!env().getBool('IS_BROWSER')) {
+ // TODO(cais): Add more info about what IOHandler subtypes are available.
+ // Maybe point to a doc page on the web and/or automatically determine
+ // the available IOHandlers and print them in the error message.
+ throw new Error('Failed to obtain IndexedDB factory because the current environment' +
+ 'is not a web browser.');
+ }
+ // tslint:disable-next-line:no-any
+ var theWindow = typeof window === 'undefined' ? self : window;
+ var factory = theWindow.indexedDB || theWindow.mozIndexedDB ||
+ theWindow.webkitIndexedDB || theWindow.msIndexedDB ||
+ theWindow.shimIndexedDB;
+ if (factory == null) {
+ throw new Error('The current browser does not appear to support IndexedDB.');
+ }
+ return factory;
+ }
+ function setUpDatabase(openRequest) {
+ var db = openRequest.result;
+ db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' });
+ db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' });
+ }
+ /**
+ * IOHandler subclass: Browser IndexedDB.
+ *
+ * See the doc string of `browserIndexedDB` for more details.
+ */
+ var BrowserIndexedDB = /** @class */ (function () {
+ function BrowserIndexedDB(modelPath) {
+ this.indexedDB = getIndexedDBFactory();
+ if (modelPath == null || !modelPath) {
+ throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.');
+ }
+ this.modelPath = modelPath;
+ }
+ BrowserIndexedDB.prototype.save = function (modelArtifacts) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ // TODO(cais): Support saving GraphDef models.
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +
+ 'in binary formats yet.');
+ }
+ return [2 /*return*/, this.databaseAction(this.modelPath, modelArtifacts)];
+ });
+ });
+ };
+ BrowserIndexedDB.prototype.load = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ return [2 /*return*/, this.databaseAction(this.modelPath)];
+ });
+ });
+ };
+ /**
+ * Perform database action to put model artifacts into or read model artifacts
+ * from IndexedDB object store.
+ *
+ * Whether the action is put or get depends on whether `modelArtifacts` is
+ * specified. If it is specified, the action will be put; otherwise the action
+ * will be get.
+ *
+ * @param modelPath A unique string path for the model.
+ * @param modelArtifacts If specified, it will be the model artifacts to be
+ * stored in IndexedDB.
+ * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`
+ * of `ModelArtifacts`, if the action is get.
+ */
+ BrowserIndexedDB.prototype.databaseAction = function (modelPath, modelArtifacts) {
+ var _this = this;
+ return new Promise(function (resolve, reject) {
+ var openRequest = _this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
+ openRequest.onupgradeneeded = function () { return setUpDatabase(openRequest); };
+ openRequest.onsuccess = function () {
+ var db = openRequest.result;
+ if (modelArtifacts == null) {
+ // Read model out from object store.
+ var modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');
+ var modelStore = modelTx.objectStore(MODEL_STORE_NAME);
+ var getRequest_1 = modelStore.get(_this.modelPath);
+ getRequest_1.onsuccess = function () {
+ if (getRequest_1.result == null) {
+ db.close();
+ return reject(new Error("Cannot find model with path '" + _this.modelPath + "' " +
+ "in IndexedDB."));
+ }
+ else {
+ resolve(getRequest_1.result.modelArtifacts);
+ }
+ };
+ getRequest_1.onerror = function (error) {
+ db.close();
+ return reject(getRequest_1.error);
+ };
+ modelTx.oncomplete = function () { return db.close(); };
+ }
+ else {
+ // Put model into object store.
+ var modelArtifactsInfo_1 = getModelArtifactsInfoForJSON(modelArtifacts);
+ // First, put ModelArtifactsInfo into info store.
+ var infoTx_1 = db.transaction(INFO_STORE_NAME, 'readwrite');
+ var infoStore_1 = infoTx_1.objectStore(INFO_STORE_NAME);
+ var putInfoRequest_1 = infoStore_1.put({ modelPath: _this.modelPath, modelArtifactsInfo: modelArtifactsInfo_1 });
+ var modelTx_1;
+ putInfoRequest_1.onsuccess = function () {
+ // Second, put model data into model store.
+ modelTx_1 = db.transaction(MODEL_STORE_NAME, 'readwrite');
+ var modelStore = modelTx_1.objectStore(MODEL_STORE_NAME);
+ var putModelRequest = modelStore.put({
+ modelPath: _this.modelPath,
+ modelArtifacts: modelArtifacts,
+ modelArtifactsInfo: modelArtifactsInfo_1
+ });
+ putModelRequest.onsuccess = function () { return resolve({ modelArtifactsInfo: modelArtifactsInfo_1 }); };
+ putModelRequest.onerror = function (error) {
+ // If the put-model request fails, roll back the info entry as
+ // well.
+ infoStore_1 = infoTx_1.objectStore(INFO_STORE_NAME);
+ var deleteInfoRequest = infoStore_1.delete(_this.modelPath);
+ deleteInfoRequest.onsuccess = function () {
+ db.close();
+ return reject(putModelRequest.error);
+ };
+ deleteInfoRequest.onerror = function (error) {
+ db.close();
+ return reject(putModelRequest.error);
+ };
+ };
+ };
+ putInfoRequest_1.onerror = function (error) {
+ db.close();
+ return reject(putInfoRequest_1.error);
+ };
+ infoTx_1.oncomplete = function () {
+ if (modelTx_1 == null) {
+ db.close();
+ }
+ else {
+ modelTx_1.oncomplete = function () { return db.close(); };
+ }
+ };
+ }
+ };
+ openRequest.onerror = function (error) { return reject(openRequest.error); };
+ });
+ };
+ return BrowserIndexedDB;
+ }());
+ BrowserIndexedDB.URL_SCHEME = 'indexeddb://';
+ var indexedDBRouter = function (url) {
+ if (!env().getBool('IS_BROWSER')) {
+ return null;
+ }
+ else {
+ if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {
+ return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));
+ }
+ else {
+ return null;
+ }
+ }
+ };
+ IORouterRegistry.registerSaveRouter(indexedDBRouter);
+ IORouterRegistry.registerLoadRouter(indexedDBRouter);
+ /**
+ * Creates a browser IndexedDB IOHandler for saving and loading models.
+ *
+ * ```js
+ * const model = tf.sequential();
+ * model.add(
+ * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
+ *
+ * const saveResult = await model.save('indexeddb://MyModel'));
+ * console.log(saveResult);
+ * ```
+ *
+ * @param modelPath A unique identifier for the model to be saved. Must be a
+ * non-empty string.
+ * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),
+ * which can be used with, e.g., `tf.Model.save`.
+ */
+ function browserIndexedDB(modelPath) {
+ return new BrowserIndexedDB(modelPath);
+ }
+ function maybeStripScheme$1(key) {
+ return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?
+ key.slice(BrowserIndexedDB.URL_SCHEME.length) :
+ key;
+ }
+ var BrowserIndexedDBManager = /** @class */ (function () {
+ function BrowserIndexedDBManager() {
+ this.indexedDB = getIndexedDBFactory();
+ }
+ BrowserIndexedDBManager.prototype.listModels = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var _this = this;
+ return __generator(this, function (_a) {
+ return [2 /*return*/, new Promise(function (resolve, reject) {
+ var openRequest = _this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
+ openRequest.onupgradeneeded = function () { return setUpDatabase(openRequest); };
+ openRequest.onsuccess = function () {
+ var db = openRequest.result;
+ var tx = db.transaction(INFO_STORE_NAME, 'readonly');
+ var store = tx.objectStore(INFO_STORE_NAME);
+ // tslint:disable:max-line-length
+ // Need to cast `store` as `any` here because TypeScript's DOM
+ // library does not have the `getAll()` method even though the
+ // method is supported in the latest version of most mainstream
+ // browsers:
+ // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll
+ // tslint:enable:max-line-length
+ // tslint:disable-next-line:no-any
+ var getAllInfoRequest = store.getAll();
+ getAllInfoRequest.onsuccess = function () {
+ var e_1, _a;
+ var out = {};
+ try {
+ for (var _b = __values(getAllInfoRequest.result), _c = _b.next(); !_c.done; _c = _b.next()) {
+ var item = _c.value;
+ out[item.modelPath] = item.modelArtifactsInfo;
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ resolve(out);
+ };
+ getAllInfoRequest.onerror = function (error) {
+ db.close();
+ return reject(getAllInfoRequest.error);
+ };
+ tx.oncomplete = function () { return db.close(); };
+ };
+ openRequest.onerror = function (error) { return reject(openRequest.error); };
+ })];
+ });
+ });
+ };
+ BrowserIndexedDBManager.prototype.removeModel = function (path) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _this = this;
+ return __generator(this, function (_a) {
+ path = maybeStripScheme$1(path);
+ return [2 /*return*/, new Promise(function (resolve, reject) {
+ var openRequest = _this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
+ openRequest.onupgradeneeded = function () { return setUpDatabase(openRequest); };
+ openRequest.onsuccess = function () {
+ var db = openRequest.result;
+ var infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');
+ var infoStore = infoTx.objectStore(INFO_STORE_NAME);
+ var getInfoRequest = infoStore.get(path);
+ var modelTx;
+ getInfoRequest.onsuccess = function () {
+ if (getInfoRequest.result == null) {
+ db.close();
+ return reject(new Error("Cannot find model with path '" + path + "' " +
+ "in IndexedDB."));
+ }
+ else {
+ // First, delete the entry in the info store.
+ var deleteInfoRequest = infoStore.delete(path);
+ var deleteModelData_1 = function () {
+ // Second, delete the entry in the model store.
+ modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');
+ var modelStore = modelTx.objectStore(MODEL_STORE_NAME);
+ var deleteModelRequest = modelStore.delete(path);
+ deleteModelRequest.onsuccess = function () { return resolve(getInfoRequest.result.modelArtifactsInfo); };
+ deleteModelRequest.onerror = function (error) { return reject(getInfoRequest.error); };
+ };
+ // Proceed with deleting model data regardless of whether deletion
+ // of info data succeeds or not.
+ deleteInfoRequest.onsuccess = deleteModelData_1;
+ deleteInfoRequest.onerror = function (error) {
+ deleteModelData_1();
+ db.close();
+ return reject(getInfoRequest.error);
+ };
+ }
+ };
+ getInfoRequest.onerror = function (error) {
+ db.close();
+ return reject(getInfoRequest.error);
+ };
+ infoTx.oncomplete = function () {
+ if (modelTx == null) {
+ db.close();
+ }
+ else {
+ modelTx.oncomplete = function () { return db.close(); };
+ }
+ };
+ };
+ openRequest.onerror = function (error) { return reject(openRequest.error); };
+ })];
+ });
+ });
+ };
+ return BrowserIndexedDBManager;
+ }());
+
+ var PATH_SEPARATOR = '/';
+ var PATH_PREFIX = 'tensorflowjs_models';
+ var INFO_SUFFIX = 'info';
+ var MODEL_TOPOLOGY_SUFFIX = 'model_topology';
+ var WEIGHT_SPECS_SUFFIX = 'weight_specs';
+ var WEIGHT_DATA_SUFFIX = 'weight_data';
+ var MODEL_METADATA_SUFFIX = 'model_metadata';
+ function getModelKeys(path) {
+ return {
+ info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),
+ topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),
+ weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),
+ weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),
+ modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)
+ };
+ }
+ function removeItems(keys) {
+ var e_1, _a;
+ try {
+ for (var _b = __values(Object.values(keys)), _c = _b.next(); !_c.done; _c = _b.next()) {
+ var key = _c.value;
+ window.localStorage.removeItem(key);
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ }
+ /**
+ * Get model path from a local-storage key.
+ *
+ * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'
+ *
+ * @param key
+ */
+ function getModelPathFromKey(key) {
+ var items = key.split(PATH_SEPARATOR);
+ if (items.length < 3) {
+ throw new Error("Invalid key format: " + key);
+ }
+ return items.slice(1, items.length - 1).join(PATH_SEPARATOR);
+ }
+ function maybeStripScheme(key) {
+ return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?
+ key.slice(BrowserLocalStorage.URL_SCHEME.length) :
+ key;
+ }
+ /**
+ * IOHandler subclass: Browser Local Storage.
+ *
+ * See the doc string to `browserLocalStorage` for more details.
+ */
+ var BrowserLocalStorage = /** @class */ (function () {
+ function BrowserLocalStorage(modelPath) {
+ if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||
+ typeof window.localStorage === 'undefined') {
+ // TODO(cais): Add more info about what IOHandler subtypes are
+ // available.
+ // Maybe point to a doc page on the web and/or automatically determine
+ // the available IOHandlers and print them in the error message.
+ throw new Error('The current environment does not support local storage.');
+ }
+ this.LS = window.localStorage;
+ if (modelPath == null || !modelPath) {
+ throw new Error('For local storage, modelPath must not be null, undefined or empty.');
+ }
+ this.modelPath = modelPath;
+ this.keys = getModelKeys(this.modelPath);
+ }
+ /**
+ * Save model artifacts to browser local storage.
+ *
+ * See the documentation to `browserLocalStorage` for details on the saved
+ * artifacts.
+ *
+ * @param modelArtifacts The model artifacts to be stored.
+ * @returns An instance of SaveResult.
+ */
+ BrowserLocalStorage.prototype.save = function (modelArtifacts) {
+ return __awaiter(this, void 0, void 0, function () {
+ var topology, weightSpecs, modelArtifactsInfo, metadata;
+ return __generator(this, function (_a) {
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +
+ 'in binary formats yet.');
+ }
+ else {
+ topology = JSON.stringify(modelArtifacts.modelTopology);
+ weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);
+ modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);
+ try {
+ this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));
+ this.LS.setItem(this.keys.topology, topology);
+ this.LS.setItem(this.keys.weightSpecs, weightSpecs);
+ this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(modelArtifacts.weightData));
+ metadata = {
+ format: modelArtifacts.format,
+ generatedBy: modelArtifacts.generatedBy,
+ convertedBy: modelArtifacts.convertedBy,
+ signature: modelArtifacts.signature != null ?
+ modelArtifacts.signature :
+ undefined,
+ userDefinedMetadata: modelArtifacts.userDefinedMetadata != null ?
+ modelArtifacts.userDefinedMetadata :
+ undefined,
+ modelInitializer: modelArtifacts.modelInitializer != null ?
+ modelArtifacts.modelInitializer :
+ undefined,
+ trainingConfig: modelArtifacts.trainingConfig != null ?
+ modelArtifacts.trainingConfig :
+ undefined
+ };
+ this.LS.setItem(this.keys.modelMetadata, JSON.stringify(metadata));
+ return [2 /*return*/, { modelArtifactsInfo: modelArtifactsInfo }];
+ }
+ catch (err) {
+ // If saving failed, clean up all items saved so far.
+ removeItems(this.keys);
+ throw new Error("Failed to save model '" + this.modelPath + "' to local storage: " +
+ "size quota being exceeded is a possible cause of this failure: " +
+ ("modelTopologyBytes=" + modelArtifactsInfo.modelTopologyBytes + ", ") +
+ ("weightSpecsBytes=" + modelArtifactsInfo.weightSpecsBytes + ", ") +
+ ("weightDataBytes=" + modelArtifactsInfo.weightDataBytes + "."));
+ }
+ }
+ return [2 /*return*/];
+ });
+ });
+ };
+ /**
+ * Load a model from local storage.
+ *
+ * See the documentation to `browserLocalStorage` for details on the saved
+ * artifacts.
+ *
+ * @returns The loaded model (if loading succeeds).
+ */
+ BrowserLocalStorage.prototype.load = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var info, out, topology, weightSpecs, metadataString, metadata, weightDataBase64;
+ return __generator(this, function (_a) {
+ info = JSON.parse(this.LS.getItem(this.keys.info));
+ if (info == null) {
+ throw new Error("In local storage, there is no model with name '" + this.modelPath + "'");
+ }
+ if (info.modelTopologyType !== 'JSON') {
+ throw new Error('BrowserLocalStorage does not support loading non-JSON model ' +
+ 'topology yet.');
+ }
+ out = {};
+ topology = JSON.parse(this.LS.getItem(this.keys.topology));
+ if (topology == null) {
+ throw new Error("In local storage, the topology of model '" + this.modelPath + "' " +
+ "is missing.");
+ }
+ out.modelTopology = topology;
+ weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));
+ if (weightSpecs == null) {
+ throw new Error("In local storage, the weight specs of model '" + this.modelPath + "' " +
+ "are missing.");
+ }
+ out.weightSpecs = weightSpecs;
+ metadataString = this.LS.getItem(this.keys.modelMetadata);
+ if (metadataString != null) {
+ metadata = JSON.parse(metadataString);
+ out.format = metadata.format;
+ out.generatedBy = metadata.generatedBy;
+ out.convertedBy = metadata.convertedBy;
+ if (metadata.signature != null) {
+ out.signature = metadata.signature;
+ }
+ if (metadata.userDefinedMetadata != null) {
+ out.userDefinedMetadata = metadata.userDefinedMetadata;
+ }
+ if (metadata.modelInitializer != null) {
+ out.modelInitializer = metadata.modelInitializer;
+ }
+ if (metadata.trainingConfig != null) {
+ out.trainingConfig = metadata.trainingConfig;
+ }
+ }
+ weightDataBase64 = this.LS.getItem(this.keys.weightData);
+ if (weightDataBase64 == null) {
+ throw new Error("In local storage, the binary weight values of model " +
+ ("'" + this.modelPath + "' are missing."));
+ }
+ out.weightData = base64StringToArrayBuffer(weightDataBase64);
+ return [2 /*return*/, out];
+ });
+ });
+ };
+ return BrowserLocalStorage;
+ }());
+ BrowserLocalStorage.URL_SCHEME = 'localstorage://';
+ var localStorageRouter = function (url) {
+ if (!env().getBool('IS_BROWSER')) {
+ return null;
+ }
+ else {
+ if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {
+ return browserLocalStorage(url.slice(BrowserLocalStorage.URL_SCHEME.length));
+ }
+ else {
+ return null;
+ }
+ }
+ };
+ IORouterRegistry.registerSaveRouter(localStorageRouter);
+ IORouterRegistry.registerLoadRouter(localStorageRouter);
+ /**
+ * Factory function for local storage IOHandler.
+ *
+ * This `IOHandler` supports both `save` and `load`.
+ *
+ * For each model's saved artifacts, four items are saved to local storage.
+ * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the
+ * model, such as date saved, type of the topology, size in bytes, etc.
+ * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-
+ * style models, this is a stringized JSON.
+ * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the
+ * model, can be used to decode the saved binary weight values (see
+ * item below).
+ * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary
+ * weight values, stored as a base64-encoded string.
+ *
+ * Saving may throw an `Error` if the total size of the artifacts exceed the
+ * browser-specific quota.
+ *
+ * @param modelPath A unique identifier for the model to be saved. Must be a
+ * non-empty string.
+ * @returns An instance of `IOHandler`, which can be used with, e.g.,
+ * `tf.Model.save`.
+ */
+ function browserLocalStorage(modelPath) {
+ return new BrowserLocalStorage(modelPath);
+ }
+ var BrowserLocalStorageManager = /** @class */ (function () {
+ function BrowserLocalStorageManager() {
+ assert(env().getBool('IS_BROWSER'), function () { return 'Current environment is not a web browser'; });
+ assert(typeof window === 'undefined' ||
+ typeof window.localStorage !== 'undefined', function () { return 'Current browser does not appear to support localStorage'; });
+ this.LS = window.localStorage;
+ }
+ BrowserLocalStorageManager.prototype.listModels = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var out, prefix, suffix, i, key, modelPath;
+ return __generator(this, function (_a) {
+ out = {};
+ prefix = PATH_PREFIX + PATH_SEPARATOR;
+ suffix = PATH_SEPARATOR + INFO_SUFFIX;
+ for (i = 0; i < this.LS.length; ++i) {
+ key = this.LS.key(i);
+ if (key.startsWith(prefix) && key.endsWith(suffix)) {
+ modelPath = getModelPathFromKey(key);
+ out[modelPath] = JSON.parse(this.LS.getItem(key));
+ }
+ }
+ return [2 /*return*/, out];
+ });
+ });
+ };
+ BrowserLocalStorageManager.prototype.removeModel = function (path) {
+ return __awaiter(this, void 0, void 0, function () {
+ var keys, info;
+ return __generator(this, function (_a) {
+ path = maybeStripScheme(path);
+ keys = getModelKeys(path);
+ if (this.LS.getItem(keys.info) == null) {
+ throw new Error("Cannot find model at path '" + path + "'");
+ }
+ info = JSON.parse(this.LS.getItem(keys.info));
+ removeItems(keys);
+ return [2 /*return*/, info];
+ });
+ });
+ };
+ return BrowserLocalStorageManager;
+ }());
+
+ var URL_SCHEME_SUFFIX = '://';
+ var ModelStoreManagerRegistry = /** @class */ (function () {
+ function ModelStoreManagerRegistry() {
+ this.managers = {};
+ }
+ ModelStoreManagerRegistry.getInstance = function () {
+ if (ModelStoreManagerRegistry.instance == null) {
+ ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();
+ }
+ return ModelStoreManagerRegistry.instance;
+ };
+ /**
+ * Register a save-handler router.
+ *
+ * @param saveRouter A function that maps a URL-like string onto an instance
+ * of `IOHandler` with the `save` method defined or `null`.
+ */
+ ModelStoreManagerRegistry.registerManager = function (scheme, manager) {
+ assert(scheme != null, function () { return 'scheme must not be undefined or null.'; });
+ if (scheme.endsWith(URL_SCHEME_SUFFIX)) {
+ scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));
+ }
+ assert(scheme.length > 0, function () { return 'scheme must not be an empty string.'; });
+ var registry = ModelStoreManagerRegistry.getInstance();
+ assert(registry.managers[scheme] == null, function () { return "A model store manager is already registered for scheme '" + scheme + "'."; });
+ registry.managers[scheme] = manager;
+ };
+ ModelStoreManagerRegistry.getManager = function (scheme) {
+ var manager = this.getInstance().managers[scheme];
+ if (manager == null) {
+ throw new Error("Cannot find model manager for scheme '" + scheme + "'");
+ }
+ return manager;
+ };
+ ModelStoreManagerRegistry.getSchemes = function () {
+ return Object.keys(this.getInstance().managers);
+ };
+ return ModelStoreManagerRegistry;
+ }());
+ /**
+ * Helper method for parsing a URL string into a scheme and a path.
+ *
+ * @param url E.g., 'localstorage://my-model'
+ * @returns A dictionary with two fields: scheme and path.
+ * Scheme: e.g., 'localstorage' in the example above.
+ * Path: e.g., 'my-model' in the example above.
+ */
+ function parseURL(url) {
+ if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {
+ throw new Error("The url string provided does not contain a scheme. " +
+ "Supported schemes are: " +
+ ("" + ModelStoreManagerRegistry.getSchemes().join(',')));
+ }
+ return {
+ scheme: url.split(URL_SCHEME_SUFFIX)[0],
+ path: url.split(URL_SCHEME_SUFFIX)[1],
+ };
+ }
+ function cloneModelInternal(sourceURL, destURL, deleteSource) {
+ if (deleteSource === void 0) { deleteSource = false; }
+ return __awaiter(this, void 0, void 0, function () {
+ var loadHandlers, loadHandler, saveHandlers, saveHandler, sourceScheme, sourcePath, sameMedium, modelArtifacts, saveResult;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ assert(sourceURL !== destURL, function () { return "Old path and new path are the same: '" + sourceURL + "'"; });
+ loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);
+ assert(loadHandlers.length > 0, function () { return "Copying failed because no load handler is found for source URL " + sourceURL + "."; });
+ assert(loadHandlers.length < 2, function () { return "Copying failed because more than one (" + loadHandlers.length + ") " +
+ ("load handlers for source URL " + sourceURL + "."); });
+ loadHandler = loadHandlers[0];
+ saveHandlers = IORouterRegistry.getSaveHandlers(destURL);
+ assert(saveHandlers.length > 0, function () { return "Copying failed because no save handler is found for destination " +
+ ("URL " + destURL + "."); });
+ assert(saveHandlers.length < 2, function () { return "Copying failed because more than one (" + loadHandlers.length + ") " +
+ ("save handlers for destination URL " + destURL + "."); });
+ saveHandler = saveHandlers[0];
+ sourceScheme = parseURL(sourceURL).scheme;
+ sourcePath = parseURL(sourceURL).path;
+ sameMedium = sourceScheme === parseURL(sourceURL).scheme;
+ return [4 /*yield*/, loadHandler.load()];
+ case 1:
+ modelArtifacts = _a.sent();
+ if (!(deleteSource && sameMedium)) return [3 /*break*/, 3];
+ return [4 /*yield*/, ModelStoreManagerRegistry.getManager(sourceScheme)
+ .removeModel(sourcePath)];
+ case 2:
+ _a.sent();
+ _a.label = 3;
+ case 3: return [4 /*yield*/, saveHandler.save(modelArtifacts)];
+ case 4:
+ saveResult = _a.sent();
+ if (!(deleteSource && !sameMedium)) return [3 /*break*/, 6];
+ return [4 /*yield*/, ModelStoreManagerRegistry.getManager(sourceScheme)
+ .removeModel(sourcePath)];
+ case 5:
+ _a.sent();
+ _a.label = 6;
+ case 6: return [2 /*return*/, saveResult.modelArtifactsInfo];
+ }
+ });
+ });
+ }
+ /**
+ * List all models stored in registered storage mediums.
+ *
+ * For a web browser environment, the registered mediums are Local Storage and
+ * IndexedDB.
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Delete the model.
+ * await tf.io.removeModel('localstorage://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ * ```
+ *
+ * @returns A `Promise` of a dictionary mapping URLs of existing models to
+ * their model artifacts info. URLs include medium-specific schemes, e.g.,
+ * 'indexeddb://my/model/1'. Model artifacts info include type of the
+ * model's topology, byte sizes of the topology, weights, etc.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function listModels() {
+ return __awaiter(this, void 0, void 0, function () {
+ var schemes, out, schemes_1, schemes_1_1, scheme, schemeOut, path, url, e_1_1;
+ var e_1, _a;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ schemes = ModelStoreManagerRegistry.getSchemes();
+ out = {};
+ _b.label = 1;
+ case 1:
+ _b.trys.push([1, 6, 7, 8]);
+ schemes_1 = __values(schemes), schemes_1_1 = schemes_1.next();
+ _b.label = 2;
+ case 2:
+ if (!!schemes_1_1.done) return [3 /*break*/, 5];
+ scheme = schemes_1_1.value;
+ return [4 /*yield*/, ModelStoreManagerRegistry.getManager(scheme).listModels()];
+ case 3:
+ schemeOut = _b.sent();
+ for (path in schemeOut) {
+ url = scheme + URL_SCHEME_SUFFIX + path;
+ out[url] = schemeOut[path];
+ }
+ _b.label = 4;
+ case 4:
+ schemes_1_1 = schemes_1.next();
+ return [3 /*break*/, 2];
+ case 5: return [3 /*break*/, 8];
+ case 6:
+ e_1_1 = _b.sent();
+ e_1 = { error: e_1_1 };
+ return [3 /*break*/, 8];
+ case 7:
+ try {
+ if (schemes_1_1 && !schemes_1_1.done && (_a = schemes_1.return)) _a.call(schemes_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ return [7 /*endfinally*/];
+ case 8: return [2 /*return*/, out];
+ }
+ });
+ });
+ }
+ /**
+ * Remove a model specified by URL from a reigstered storage medium.
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Delete the model.
+ * await tf.io.removeModel('localstorage://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ * ```
+ *
+ * @param url A URL to a stored model, with a scheme prefix, e.g.,
+ * 'localstorage://my-model-1', 'indexeddb://my/model/2'.
+ * @returns ModelArtifactsInfo of the deleted model (if and only if deletion
+ * is successful).
+ * @throws Error if deletion fails, e.g., if no model exists at `path`.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function removeModel(url) {
+ return __awaiter(this, void 0, void 0, function () {
+ var schemeAndPath, manager;
+ return __generator(this, function (_a) {
+ schemeAndPath = parseURL(url);
+ manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);
+ return [2 /*return*/, manager.removeModel(schemeAndPath.path)];
+ });
+ });
+ }
+ /**
+ * Copy a model from one URL to another.
+ *
+ * This function supports:
+ *
+ * 1. Copying within a storage medium, e.g.,
+ * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`
+ * 2. Copying between two storage mediums, e.g.,
+ * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Copy the model, from Local Storage to IndexedDB.
+ * await tf.io.copyModel(
+ * 'localstorage://demo/management/model1',
+ * 'indexeddb://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Remove both models.
+ * await tf.io.removeModel('localstorage://demo/management/model1');
+ * await tf.io.removeModel('indexeddb://demo/management/model1');
+ * ```
+ *
+ * @param sourceURL Source URL of copying.
+ * @param destURL Destination URL of copying.
+ * @returns ModelArtifactsInfo of the copied model (if and only if copying
+ * is successful).
+ * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or
+ * if `oldPath` and `newPath` are identical.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function copyModel(sourceURL, destURL) {
+ return __awaiter(this, void 0, void 0, function () {
+ var deleteSource;
+ return __generator(this, function (_a) {
+ deleteSource = false;
+ return [2 /*return*/, cloneModelInternal(sourceURL, destURL, deleteSource)];
+ });
+ });
+ }
+ /**
+ * Move a model from one URL to another.
+ *
+ * This function supports:
+ *
+ * 1. Moving within a storage medium, e.g.,
+ * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`
+ * 2. Moving between two storage mediums, e.g.,
+ * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`
+ *
+ * ```js
+ * // First create and save a model.
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * await model.save('localstorage://demo/management/model1');
+ *
+ * // Then list existing models.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Move the model, from Local Storage to IndexedDB.
+ * await tf.io.moveModel(
+ * 'localstorage://demo/management/model1',
+ * 'indexeddb://demo/management/model1');
+ *
+ * // List models again.
+ * console.log(JSON.stringify(await tf.io.listModels()));
+ *
+ * // Remove the moved model.
+ * await tf.io.removeModel('indexeddb://demo/management/model1');
+ * ```
+ *
+ * @param sourceURL Source URL of moving.
+ * @param destURL Destination URL of moving.
+ * @returns ModelArtifactsInfo of the copied model (if and only if copying
+ * is successful).
+ * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or
+ * if `oldPath` and `newPath` are identical.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Management',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function moveModel(sourceURL, destURL) {
+ return __awaiter(this, void 0, void 0, function () {
+ var deleteSource;
+ return __generator(this, function (_a) {
+ deleteSource = true;
+ return [2 /*return*/, cloneModelInternal(sourceURL, destURL, deleteSource)];
+ });
+ });
+ }
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var PlatformBrowser = /** @class */ (function () {
+ function PlatformBrowser() {
+ }
+ PlatformBrowser.prototype.fetch = function (path, init) {
+ return fetch(path, init);
+ };
+ PlatformBrowser.prototype.now = function () {
+ return performance.now();
+ };
+ PlatformBrowser.prototype.encode = function (text, encoding) {
+ if (encoding !== 'utf-8' && encoding !== 'utf8') {
+ throw new Error("Browser's encoder only supports utf-8, but got " + encoding);
+ }
+ if (this.textEncoder == null) {
+ this.textEncoder = new TextEncoder();
+ }
+ return this.textEncoder.encode(text);
+ };
+ PlatformBrowser.prototype.decode = function (bytes, encoding) {
+ return new TextDecoder(encoding).decode(bytes);
+ };
+ return PlatformBrowser;
+ }());
+ if (env().get('IS_BROWSER')) {
+ env().setPlatform('browser', new PlatformBrowser());
+ // Register LocalStorage IOHandler
+ try {
+ ModelStoreManagerRegistry.registerManager(BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());
+ }
+ catch (err) {
+ }
+ // Register IndexedDB IOHandler
+ try {
+ ModelStoreManagerRegistry.registerManager(BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());
+ }
+ catch (err) {
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // We are wrapping this within an object so it can be stubbed by Jasmine.
+ var getNodeFetch = {
+ // tslint:disable-next-line:no-require-imports
+ importFetch: function () { return require('node-fetch'); }
+ };
+ var systemFetch;
+ var PlatformNode = /** @class */ (function () {
+ function PlatformNode() {
+ // tslint:disable-next-line:no-require-imports
+ this.util = require('util');
+ // According to the spec, the built-in encoder can do only UTF-8 encoding.
+ // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder
+ this.textEncoder = new this.util.TextEncoder();
+ }
+ PlatformNode.prototype.fetch = function (path, requestInits) {
+ if (env().global.fetch != null) {
+ return env().global.fetch(path, requestInits);
+ }
+ if (systemFetch == null) {
+ systemFetch = getNodeFetch.importFetch();
+ }
+ return systemFetch(path, requestInits);
+ };
+ PlatformNode.prototype.now = function () {
+ var time = process.hrtime();
+ return time[0] * 1000 + time[1] / 1000000;
+ };
+ PlatformNode.prototype.encode = function (text, encoding) {
+ if (encoding !== 'utf-8' && encoding !== 'utf8') {
+ throw new Error("Node built-in encoder only supports utf-8, but got " + encoding);
+ }
+ return this.textEncoder.encode(text);
+ };
+ PlatformNode.prototype.decode = function (bytes, encoding) {
+ if (bytes.length === 0) {
+ return '';
+ }
+ return new this.util.TextDecoder(encoding).decode(bytes);
+ };
+ return PlatformNode;
+ }());
+ if (env().get('IS_NODE')) {
+ env().setPlatform('node', new PlatformNode());
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.
+ *
+ * The values are stored in CPU as `TypedArray`. Fill the buffer using
+ * `buffer.set()`, or by modifying directly `buffer.values`.
+ *
+ * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with
+ * those values.
+ *
+ * ```js
+ * // Create a buffer and set values at particular indices.
+ * const buffer = tf.buffer([2, 2]);
+ * buffer.set(3, 0, 0);
+ * buffer.set(5, 1, 0);
+ *
+ * // Convert the buffer back to a tensor.
+ * buffer.toTensor().print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The dtype of the buffer. Defaults to 'float32'.
+ * @param values The values of the buffer as `TypedArray`. Defaults to
+ * zeros.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function buffer(shape, dtype, values) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ dtype = dtype || 'float32';
+ assertNonNegativeIntegerDimensions(shape);
+ return new TensorBuffer(shape, dtype, values);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Casts a `tf.Tensor` to a new dtype.
+ *
+ * ```js
+ * const x = tf.tensor1d([1.5, 2.5, 3]);
+ * tf.cast(x, 'int32').print();
+ * ```
+ * @param x The input tensor to be casted.
+ * @param dtype The dtype to cast the input tensor to.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function cast_(x, dtype) {
+ var $x = convertToTensor(x, 'x', 'cast');
+ // Sanity checks.
+ if (!isValidDtype(dtype)) {
+ throw new Error("Failed to cast to unknown dtype " + dtype);
+ }
+ if (dtype === 'string' && $x.dtype !== 'string' ||
+ dtype !== 'string' && $x.dtype === 'string') {
+ throw new Error('Only strings can be casted to strings');
+ }
+ var inputs = { x: $x };
+ var attrs = { dtype: dtype };
+ return ENGINE.runKernel(Cast, inputs, attrs);
+ }
+ var cast = op({ cast_: cast_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new tensor with the same values and shape as the specified
+ * tensor.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2]);
+ *
+ * x.clone().print();
+ * ```
+ *
+ * @param x The tensor to clone.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function clone_(x) {
+ var $x = convertToTensor(x, 'x', 'clone', 'string_or_numeric');
+ var inputs = { x: $x };
+ // Note this op is called tf.identity in python. Hence the kernel name used
+ // here.
+ return ENGINE.runKernel(Identity, inputs);
+ }
+ var clone = op({ clone_: clone_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Prints information about the `tf.Tensor` including its data.
+ *
+ * ```js
+ * const verbose = true;
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);
+ * ```
+ * @param x The tensor to be printed.
+ * @param verbose Whether to print verbose information about the ` Tensor`,
+ * including dtype and size.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function print(x, verbose) {
+ if (verbose === void 0) { verbose = false; }
+ console.log(x.toString(verbose));
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ getOrMakeEngine();
+ var opHandler = {
+ buffer: buffer,
+ cast: cast,
+ clone: clone,
+ print: print
+ };
+ setOpHandler(opHandler);
+
+ var DEFAULT_FILE_NAME_PREFIX = 'model';
+ var DEFAULT_JSON_EXTENSION_NAME = '.json';
+ var DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';
+ function defer(f) {
+ return new Promise(function (resolve) { return setTimeout(resolve); }).then(f);
+ }
+ var BrowserDownloads = /** @class */ (function () {
+ function BrowserDownloads(fileNamePrefix) {
+ if (!env().getBool('IS_BROWSER')) {
+ // TODO(cais): Provide info on what IOHandlers are available under the
+ // current environment.
+ throw new Error('browserDownloads() cannot proceed because the current environment ' +
+ 'is not a browser.');
+ }
+ if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {
+ fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);
+ }
+ if (fileNamePrefix == null || fileNamePrefix.length === 0) {
+ fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;
+ }
+ this.modelJsonFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;
+ this.weightDataFileName =
+ fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;
+ }
+ BrowserDownloads.prototype.save = function (modelArtifacts) {
+ return __awaiter(this, void 0, void 0, function () {
+ var weightsURL, weightsManifest, modelJSON, modelJsonURL, jsonAnchor_1, weightDataAnchor_1;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ if (typeof (document) === 'undefined') {
+ throw new Error('Browser downloads are not supported in ' +
+ 'this environment since `document` is not present');
+ }
+ weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: 'application/octet-stream' }));
+ if (!(modelArtifacts.modelTopology instanceof ArrayBuffer)) return [3 /*break*/, 1];
+ throw new Error('BrowserDownloads.save() does not support saving model topology ' +
+ 'in binary formats yet.');
+ case 1:
+ weightsManifest = [{
+ paths: ['./' + this.weightDataFileName],
+ weights: modelArtifacts.weightSpecs
+ }];
+ modelJSON = getModelJSONForModelArtifacts(modelArtifacts, weightsManifest);
+ modelJsonURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelJSON)], { type: 'application/json' }));
+ jsonAnchor_1 = this.modelJsonAnchor == null ?
+ document.createElement('a') :
+ this.modelJsonAnchor;
+ jsonAnchor_1.download = this.modelJsonFileName;
+ jsonAnchor_1.href = modelJsonURL;
+ // Trigger downloads by evoking a click event on the download anchors.
+ // When multiple downloads are started synchronously, Firefox will only
+ // save the last one.
+ return [4 /*yield*/, defer(function () { return jsonAnchor_1.dispatchEvent(new MouseEvent('click')); })];
+ case 2:
+ // Trigger downloads by evoking a click event on the download anchors.
+ // When multiple downloads are started synchronously, Firefox will only
+ // save the last one.
+ _a.sent();
+ if (!(modelArtifacts.weightData != null)) return [3 /*break*/, 4];
+ weightDataAnchor_1 = this.weightDataAnchor == null ?
+ document.createElement('a') :
+ this.weightDataAnchor;
+ weightDataAnchor_1.download = this.weightDataFileName;
+ weightDataAnchor_1.href = weightsURL;
+ return [4 /*yield*/, defer(function () { return weightDataAnchor_1.dispatchEvent(new MouseEvent('click')); })];
+ case 3:
+ _a.sent();
+ _a.label = 4;
+ case 4: return [2 /*return*/, { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) }];
+ }
+ });
+ });
+ };
+ return BrowserDownloads;
+ }());
+ BrowserDownloads.URL_SCHEME = 'downloads://';
+ var BrowserFiles = /** @class */ (function () {
+ function BrowserFiles(files) {
+ if (files == null || files.length < 1) {
+ throw new Error("When calling browserFiles, at least 1 file is required, " +
+ ("but received " + files));
+ }
+ this.jsonFile = files[0];
+ this.weightsFiles = files.slice(1);
+ }
+ BrowserFiles.prototype.load = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var _this = this;
+ return __generator(this, function (_a) {
+ return [2 /*return*/, new Promise(function (resolve, reject) {
+ var jsonReader = new FileReader();
+ jsonReader.onload = function (event) {
+ // tslint:disable-next-line:no-any
+ var modelJSON = JSON.parse(event.target.result);
+ var modelTopology = modelJSON.modelTopology;
+ if (modelTopology == null) {
+ reject(new Error("modelTopology field is missing from file " + _this.jsonFile.name));
+ return;
+ }
+ var weightsManifest = modelJSON.weightsManifest;
+ if (weightsManifest == null) {
+ reject(new Error("weightManifest field is missing from file " + _this.jsonFile.name));
+ return;
+ }
+ if (_this.weightsFiles.length === 0) {
+ resolve({ modelTopology: modelTopology });
+ return;
+ }
+ var modelArtifactsPromise = getModelArtifactsForJSON(modelJSON, function (weightsManifest) { return _this.loadWeights(weightsManifest); });
+ resolve(modelArtifactsPromise);
+ };
+ jsonReader.onerror = function (error) { return reject("Failed to read model topology and weights manifest JSON " +
+ ("from file '" + _this.jsonFile.name + "'. BrowserFiles supports loading ") +
+ "Keras-style tf.Model artifacts only."); };
+ jsonReader.readAsText(_this.jsonFile);
+ })];
+ });
+ });
+ };
+ BrowserFiles.prototype.loadWeights = function (weightsManifest) {
+ var e_1, _a;
+ var _this = this;
+ var weightSpecs = [];
+ var paths = [];
+ try {
+ for (var weightsManifest_1 = __values(weightsManifest), weightsManifest_1_1 = weightsManifest_1.next(); !weightsManifest_1_1.done; weightsManifest_1_1 = weightsManifest_1.next()) {
+ var entry = weightsManifest_1_1.value;
+ weightSpecs.push.apply(weightSpecs, __spread(entry.weights));
+ paths.push.apply(paths, __spread(entry.paths));
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (weightsManifest_1_1 && !weightsManifest_1_1.done && (_a = weightsManifest_1.return)) _a.call(weightsManifest_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ var pathToFile = this.checkManifestAndWeightFiles(weightsManifest);
+ var promises = paths.map(function (path) { return _this.loadWeightsFile(path, pathToFile[path]); });
+ return Promise.all(promises).then(function (buffers) { return [weightSpecs, concatenateArrayBuffers(buffers)]; });
+ };
+ BrowserFiles.prototype.loadWeightsFile = function (path, file) {
+ return new Promise(function (resolve, reject) {
+ var weightFileReader = new FileReader();
+ weightFileReader.onload = function (event) {
+ // tslint:disable-next-line:no-any
+ var weightData = event.target.result;
+ resolve(weightData);
+ };
+ weightFileReader.onerror = function (error) { return reject("Failed to weights data from file of path '" + path + "'."); };
+ weightFileReader.readAsArrayBuffer(file);
+ });
+ };
+ /**
+ * Check the compatibility between weights manifest and weight files.
+ */
+ BrowserFiles.prototype.checkManifestAndWeightFiles = function (manifest) {
+ var e_2, _a;
+ var _this = this;
+ var basenames = [];
+ var fileNames = this.weightsFiles.map(function (file) { return basename(file.name); });
+ var pathToFile = {};
+ try {
+ for (var manifest_1 = __values(manifest), manifest_1_1 = manifest_1.next(); !manifest_1_1.done; manifest_1_1 = manifest_1.next()) {
+ var group = manifest_1_1.value;
+ group.paths.forEach(function (path) {
+ var pathBasename = basename(path);
+ if (basenames.indexOf(pathBasename) !== -1) {
+ throw new Error("Duplicate file basename found in weights manifest: " +
+ ("'" + pathBasename + "'"));
+ }
+ basenames.push(pathBasename);
+ if (fileNames.indexOf(pathBasename) === -1) {
+ throw new Error("Weight file with basename '" + pathBasename + "' is not provided.");
+ }
+ else {
+ pathToFile[path] = _this.weightsFiles[fileNames.indexOf(pathBasename)];
+ }
+ });
+ }
+ }
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
+ finally {
+ try {
+ if (manifest_1_1 && !manifest_1_1.done && (_a = manifest_1.return)) _a.call(manifest_1);
+ }
+ finally { if (e_2) throw e_2.error; }
+ }
+ if (basenames.length !== this.weightsFiles.length) {
+ throw new Error("Mismatch in the number of files in weights manifest " +
+ ("(" + basenames.length + ") and the number of weight files provided ") +
+ ("(" + this.weightsFiles.length + ")."));
+ }
+ return pathToFile;
+ };
+ return BrowserFiles;
+ }());
+ var browserDownloadsRouter = function (url) {
+ if (!env().getBool('IS_BROWSER')) {
+ return null;
+ }
+ else {
+ if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {
+ return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));
+ }
+ else {
+ return null;
+ }
+ }
+ };
+ IORouterRegistry.registerSaveRouter(browserDownloadsRouter);
+ /**
+ * Creates an IOHandler that triggers file downloads from the browser.
+ *
+ * The returned `IOHandler` instance can be used as model exporting methods such
+ * as `tf.Model.save` and supports only saving.
+ *
+ * ```js
+ * const model = tf.sequential();
+ * model.add(tf.layers.dense(
+ * {units: 1, inputShape: [10], activation: 'sigmoid'}));
+ * const saveResult = await model.save('downloads://mymodel');
+ * // This will trigger downloading of two files:
+ * // 'mymodel.json' and 'mymodel.weights.bin'.
+ * console.log(saveResult);
+ * ```
+ *
+ * @param fileNamePrefix Prefix name of the files to be downloaded. For use with
+ * `tf.Model`, `fileNamePrefix` should follow either of the following two
+ * formats:
+ * 1. `null` or `undefined`, in which case the default file
+ * names will be used:
+ * - 'model.json' for the JSON file containing the model topology and
+ * weights manifest.
+ * - 'model.weights.bin' for the binary file containing the binary weight
+ * values.
+ * 2. A single string or an Array of a single string, as the file name prefix.
+ * For example, if `'foo'` is provided, the downloaded JSON
+ * file and binary weights file will be named 'foo.json' and
+ * 'foo.weights.bin', respectively.
+ * @param config Additional configuration for triggering downloads.
+ * @returns An instance of `BrowserDownloads` `IOHandler`.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Loading',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function browserDownloads(fileNamePrefix) {
+ if (fileNamePrefix === void 0) { fileNamePrefix = 'model'; }
+ return new BrowserDownloads(fileNamePrefix);
+ }
+ /**
+ * Creates an IOHandler that loads model artifacts from user-selected files.
+ *
+ * This method can be used for loading from files such as user-selected files
+ * in the browser.
+ * When used in conjunction with `tf.loadLayersModel`, an instance of
+ * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
+ *
+ * ```js
+ * // Note: This code snippet won't run properly without the actual file input
+ * // elements in the HTML DOM.
+ *
+ * // Suppose there are two HTML file input (`<input type="file" ...>`)
+ * // elements.
+ * const uploadJSONInput = document.getElementById('upload-json');
+ * const uploadWeightsInput = document.getElementById('upload-weights');
+ * const model = await tf.loadLayersModel(tf.io.browserFiles(
+ * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));
+ * ```
+ *
+ * @param files `File`s to load from. Currently, this function supports only
+ * loading from files that contain Keras-style models (i.e., `tf.Model`s), for
+ * which an `Array` of `File`s is expected (in that order):
+ * - A JSON file containing the model topology and weight manifest.
+ * - Optionally, One or more binary files containing the binary weights.
+ * These files must have names that match the paths in the `weightsManifest`
+ * contained by the aforementioned JSON file, or errors will be thrown
+ * during loading. These weights files have the same format as the ones
+ * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`
+ * Python PIP package. If no weights files are provided, only the model
+ * topology will be loaded from the JSON file above.
+ * @returns An instance of `Files` `IOHandler`.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Loading',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function browserFiles(files) {
+ return new BrowserFiles(files);
+ }
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Monitor Promise.all progress, fire onProgress callback function.
+ *
+ * @param promises Promise list going to be monitored
+ * @param onProgress Callback function. Fired when a promise resolved.
+ * @param startFraction Optional fraction start. Default to 0.
+ * @param endFraction Optional fraction end. Default to 1.
+ */
+ function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {
+ checkPromises(promises);
+ startFraction = startFraction == null ? 0 : startFraction;
+ endFraction = endFraction == null ? 1 : endFraction;
+ checkFraction(startFraction, endFraction);
+ var resolvedPromise = 0;
+ var registerMonitor = function (promise) {
+ promise.then(function (value) {
+ var fraction = startFraction +
+ ++resolvedPromise / promises.length * (endFraction - startFraction);
+ // pass fraction as parameter to callback function.
+ onProgress(fraction);
+ return value;
+ });
+ return promise;
+ };
+ function checkPromises(promises) {
+ assert(promises != null && Array.isArray(promises) && promises.length > 0, function () { return 'promises must be a none empty array'; });
+ }
+ function checkFraction(startFraction, endFraction) {
+ assert(startFraction >= 0 && startFraction <= 1, function () { return "Progress fraction must be in range [0, 1], but " +
+ ("got startFraction " + startFraction); });
+ assert(endFraction >= 0 && endFraction <= 1, function () { return "Progress fraction must be in range [0, 1], but " +
+ ("got endFraction " + endFraction); });
+ assert(endFraction >= startFraction, function () { return "startFraction must be no more than endFraction, but " +
+ ("got startFraction " + startFraction + " and endFraction ") +
+ ("" + endFraction); });
+ }
+ return Promise.all(promises.map(registerMonitor));
+ }
+
+ /**
+ * Reads binary weights data from a number of URLs.
+ *
+ * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.
+ * @param requestOptions RequestInit (options) for the HTTP requests.
+ * @param fetchFunc Optional overriding value for the `window.fetch` function.
+ * @param onProgress Optional, progress callback function, fired periodically
+ * before the load is completed.
+ * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same
+ * length as `fetchURLs`.
+ */
+ function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {
+ return __awaiter(this, void 0, void 0, function () {
+ var fetchFunc, requests, fetchStartFraction, fetchEndFraction, responses, _a, bufferPromises, bufferStartFraction, bufferEndFraction, buffers, _b;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ if (loadOptions == null) {
+ loadOptions = {};
+ }
+ fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :
+ loadOptions.fetchFunc;
+ requests = fetchURLs.map(function (fetchURL) { return fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true }); });
+ fetchStartFraction = 0;
+ fetchEndFraction = 0.5;
+ if (!(loadOptions.onProgress == null)) return [3 /*break*/, 2];
+ return [4 /*yield*/, Promise.all(requests)];
+ case 1:
+ _a = _c.sent();
+ return [3 /*break*/, 4];
+ case 2: return [4 /*yield*/, monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction)];
+ case 3:
+ _a = _c.sent();
+ _c.label = 4;
+ case 4:
+ responses = _a;
+ bufferPromises = responses.map(function (response) { return response.arrayBuffer(); });
+ bufferStartFraction = 0.5;
+ bufferEndFraction = 1;
+ if (!(loadOptions.onProgress == null)) return [3 /*break*/, 6];
+ return [4 /*yield*/, Promise.all(bufferPromises)];
+ case 5:
+ _b = _c.sent();
+ return [3 /*break*/, 8];
+ case 6: return [4 /*yield*/, monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction)];
+ case 7:
+ _b = _c.sent();
+ _c.label = 8;
+ case 8:
+ buffers = _b;
+ return [2 /*return*/, buffers];
+ }
+ });
+ });
+ }
+ /**
+ * Reads a weights manifest JSON configuration, fetches the weights and
+ * returns them as `Tensor`s.
+ *
+ * @param manifest The weights manifest JSON.
+ * @param filePathPrefix The path prefix for filenames given in the manifest.
+ * Defaults to the empty string.
+ * @param weightNames The names of the weights to be fetched.
+ */
+ function loadWeights(manifest, filePathPrefix, weightNames, requestInit) {
+ if (filePathPrefix === void 0) { filePathPrefix = ''; }
+ return __awaiter(this, void 0, void 0, function () {
+ var fetchWeights, loadWeights;
+ return __generator(this, function (_a) {
+ fetchWeights = function (fetchUrls) { return loadWeightsAsArrayBuffer(fetchUrls, { requestInit: requestInit }); };
+ loadWeights = weightsLoaderFactory(fetchWeights);
+ return [2 /*return*/, loadWeights(manifest, filePathPrefix, weightNames)];
+ });
+ });
+ }
+ /**
+ * Creates a function, which reads a weights manifest JSON configuration,
+ * fetches the weight files using the specified function and returns them as
+ * `Tensor`s.
+ *
+ * ```js
+ * // example for creating a nodejs weight loader, which reads the weight files
+ * // from disk using fs.readFileSync
+ *
+ * import * as fs from 'fs'
+ *
+ * const fetchWeightsFromDisk = (filePaths: string[]) =>
+ * filePaths.map(filePath => fs.readFileSync(filePath).buffer)
+ *
+ * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)
+ *
+ * const manifest = JSON.parse(
+ * fs.readFileSync('./my_model-weights_manifest').toString()
+ * )
+ * const weightMap = await loadWeights(manifest, './')
+ * ```
+ * @param fetchWeightsFunction The function used for fetching the weight files.
+ * @returns Weight loading function.
+ */
+ function weightsLoaderFactory(fetchWeightsFunction) {
+ var _this = this;
+ return function (manifest, filePathPrefix, weightNames) {
+ if (filePathPrefix === void 0) { filePathPrefix = ''; }
+ return __awaiter(_this, void 0, void 0, function () {
+ var groupIndicesToFetchMap, groupWeightsToFetch, weightsFound, allManifestWeightNames, weightsNotFound, groupIndicesToFetch, fetchUrls, buffers, weightsTensorMap, bufferIndexOffset;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ groupIndicesToFetchMap = manifest.map(function () { return false; });
+ groupWeightsToFetch = {};
+ weightsFound = weightNames != null ? weightNames.map(function () { return false; }) : [];
+ allManifestWeightNames = [];
+ manifest.forEach(function (manifestGroupConfig, groupIndex) {
+ var groupOffset = 0;
+ manifestGroupConfig.weights.forEach(function (weightsEntry) {
+ var rawDtype = ('quantization' in weightsEntry) ?
+ weightsEntry.quantization.dtype :
+ weightsEntry.dtype;
+ var weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *
+ sizeFromShape(weightsEntry.shape);
+ var enqueueWeightsForFetchingFn = function () {
+ groupIndicesToFetchMap[groupIndex] = true;
+ if (groupWeightsToFetch[groupIndex] == null) {
+ groupWeightsToFetch[groupIndex] = [];
+ }
+ groupWeightsToFetch[groupIndex].push({
+ manifestEntry: weightsEntry,
+ groupOffset: groupOffset,
+ sizeBytes: weightsBytes
+ });
+ };
+ if (weightNames != null) {
+ weightNames.forEach(function (weightName, weightIndex) {
+ if (weightName === weightsEntry.name) {
+ enqueueWeightsForFetchingFn();
+ weightsFound[weightIndex] = true;
+ }
+ });
+ }
+ else {
+ enqueueWeightsForFetchingFn();
+ }
+ allManifestWeightNames.push(weightsEntry.name);
+ groupOffset += weightsBytes;
+ });
+ });
+ if (!weightsFound.every(function (found) { return found; })) {
+ weightsNotFound = weightNames.filter(function (_, i) { return !weightsFound[i]; });
+ throw new Error("Could not find weights in manifest with names: " +
+ (weightsNotFound.join(', ') + ". \n") +
+ "Manifest JSON has weights with names: " +
+ (allManifestWeightNames.join(', ') + "."));
+ }
+ groupIndicesToFetch = groupIndicesToFetchMap.reduce(function (accumulator, shouldFetch, i) {
+ if (shouldFetch) {
+ accumulator.push(i);
+ }
+ return accumulator;
+ }, []);
+ fetchUrls = [];
+ groupIndicesToFetch.forEach(function (i) {
+ manifest[i].paths.forEach(function (filepath) {
+ var fetchUrl = filePathPrefix +
+ (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;
+ fetchUrls.push(fetchUrl);
+ });
+ });
+ return [4 /*yield*/, fetchWeightsFunction(fetchUrls)];
+ case 1:
+ buffers = _a.sent();
+ weightsTensorMap = {};
+ bufferIndexOffset = 0;
+ groupIndicesToFetch.forEach(function (i) {
+ var numBuffers = manifest[i].paths.length;
+ var groupBytes = 0;
+ for (var i_1 = 0; i_1 < numBuffers; i_1++) {
+ groupBytes += buffers[bufferIndexOffset + i_1].byteLength;
+ }
+ // Create a buffer for the whole group.
+ var groupBuffer = new ArrayBuffer(groupBytes);
+ var groupByteBuffer = new Uint8Array(groupBuffer);
+ var groupBufferOffset = 0;
+ for (var i_2 = 0; i_2 < numBuffers; i_2++) {
+ var buffer = new Uint8Array(buffers[bufferIndexOffset + i_2]);
+ groupByteBuffer.set(buffer, groupBufferOffset);
+ groupBufferOffset += buffer.byteLength;
+ }
+ var weightsEntries = groupWeightsToFetch[i];
+ weightsEntries.forEach(function (weightsEntry) {
+ var byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes);
+ var nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);
+ for (var name in nameToTensorMap) {
+ weightsTensorMap[name] = nameToTensorMap[name];
+ }
+ });
+ bufferIndexOffset += numBuffers;
+ });
+ return [2 /*return*/, weightsTensorMap];
+ }
+ });
+ });
+ };
+ }
+
+ var OCTET_STREAM_MIME_TYPE = 'application/octet-stream';
+ var JSON_TYPE = 'application/json';
+ var HTTPRequest = /** @class */ (function () {
+ function HTTPRequest(path, loadOptions) {
+ this.DEFAULT_METHOD = 'POST';
+ if (loadOptions == null) {
+ loadOptions = {};
+ }
+ this.weightPathPrefix = loadOptions.weightPathPrefix;
+ this.onProgress = loadOptions.onProgress;
+ this.weightUrlConverter = loadOptions.weightUrlConverter;
+ if (loadOptions.fetchFunc != null) {
+ assert(typeof loadOptions.fetchFunc === 'function', function () { return 'Must pass a function that matches the signature of ' +
+ '`fetch` (see ' +
+ 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)'; });
+ this.fetch = loadOptions.fetchFunc;
+ }
+ else {
+ this.fetch = env().platform.fetch;
+ }
+ assert(path != null && path.length > 0, function () { return 'URL path for http must not be null, undefined or ' +
+ 'empty.'; });
+ if (Array.isArray(path)) {
+ assert(path.length === 2, function () { return 'URL paths for http must have a length of 2, ' +
+ ("(actual length is " + path.length + ")."); });
+ }
+ this.path = path;
+ if (loadOptions.requestInit != null &&
+ loadOptions.requestInit.body != null) {
+ throw new Error('requestInit is expected to have no pre-existing body, but has one.');
+ }
+ this.requestInit = loadOptions.requestInit || {};
+ }
+ HTTPRequest.prototype.save = function (modelArtifacts) {
+ return __awaiter(this, void 0, void 0, function () {
+ var init, weightsManifest, modelTopologyAndWeightManifest, response;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' +
+ 'in binary formats yet.');
+ }
+ init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);
+ init.body = new FormData();
+ weightsManifest = [{
+ paths: ['./model.weights.bin'],
+ weights: modelArtifacts.weightSpecs,
+ }];
+ modelTopologyAndWeightManifest = getModelJSONForModelArtifacts(modelArtifacts, weightsManifest);
+ init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json');
+ if (modelArtifacts.weightData != null) {
+ init.body.append('model.weights.bin', new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin');
+ }
+ return [4 /*yield*/, this.fetch(this.path, init)];
+ case 1:
+ response = _a.sent();
+ if (response.ok) {
+ return [2 /*return*/, {
+ modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),
+ responses: [response],
+ }];
+ }
+ else {
+ throw new Error("BrowserHTTPRequest.save() failed due to HTTP response status " +
+ (response.status + "."));
+ }
+ }
+ });
+ });
+ };
+ /**
+ * Load model artifacts via HTTP request(s).
+ *
+ * See the documentation to `tf.io.http` for details on the saved
+ * artifacts.
+ *
+ * @returns The loaded model artifacts (if loading succeeds).
+ */
+ HTTPRequest.prototype.load = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var modelConfigRequest, modelJSON, message, modelTopology, weightsManifest;
+ var _this = this;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.fetch(this.path, this.requestInit)];
+ case 1:
+ modelConfigRequest = _a.sent();
+ if (!modelConfigRequest.ok) {
+ throw new Error("Request to " + this.path + " failed with status code " +
+ (modelConfigRequest.status + ". Please verify this URL points to ") +
+ "the model JSON of the model to load.");
+ }
+ _a.label = 2;
+ case 2:
+ _a.trys.push([2, 4, , 5]);
+ return [4 /*yield*/, modelConfigRequest.json()];
+ case 3:
+ modelJSON = _a.sent();
+ return [3 /*break*/, 5];
+ case 4:
+ _a.sent();
+ message = "Failed to parse model JSON of response from " + this.path + ".";
+ // TODO(nsthorat): Remove this after some time when we're comfortable that
+ // .pb files are mostly gone.
+ if (this.path.endsWith('.pb')) {
+ message += ' Your path contains a .pb file extension. ' +
+ 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +
+ 'in favor of .json models. You can re-convert your Python ' +
+ 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +
+ 'or you can convert your.pb models with the \'pb2json\'' +
+ 'NPM script in the tensorflow/tfjs-converter repository.';
+ }
+ else {
+ message += ' Please make sure the server is serving valid ' +
+ 'JSON for this request.';
+ }
+ throw new Error(message);
+ case 5:
+ modelTopology = modelJSON.modelTopology;
+ weightsManifest = modelJSON.weightsManifest;
+ if (modelTopology == null && weightsManifest == null) {
+ throw new Error("The JSON from HTTP path " + this.path + " contains neither model " +
+ "topology or manifest for weights.");
+ }
+ return [2 /*return*/, getModelArtifactsForJSON(modelJSON, function (weightsManifest) { return _this.loadWeights(weightsManifest); })];
+ }
+ });
+ });
+ };
+ HTTPRequest.prototype.loadWeights = function (weightsManifest) {
+ return __awaiter(this, void 0, void 0, function () {
+ var weightPath, _a, prefix, suffix, pathPrefix, weightSpecs, weightsManifest_1, weightsManifest_1_1, entry, fetchURLs, urlPromises, weightsManifest_2, weightsManifest_2_1, weightsGroup, _b, _c, path, _d, _e, _f, buffers;
+ var e_2, _g, e_3, _h, e_4, _j;
+ return __generator(this, function (_k) {
+ switch (_k.label) {
+ case 0:
+ weightPath = Array.isArray(this.path) ? this.path[1] : this.path;
+ _a = __read(parseUrl(weightPath), 2), prefix = _a[0], suffix = _a[1];
+ pathPrefix = this.weightPathPrefix || prefix;
+ weightSpecs = [];
+ try {
+ for (weightsManifest_1 = __values(weightsManifest), weightsManifest_1_1 = weightsManifest_1.next(); !weightsManifest_1_1.done; weightsManifest_1_1 = weightsManifest_1.next()) {
+ entry = weightsManifest_1_1.value;
+ weightSpecs.push.apply(weightSpecs, __spread(entry.weights));
+ }
+ }
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
+ finally {
+ try {
+ if (weightsManifest_1_1 && !weightsManifest_1_1.done && (_g = weightsManifest_1.return)) _g.call(weightsManifest_1);
+ }
+ finally { if (e_2) throw e_2.error; }
+ }
+ fetchURLs = [];
+ urlPromises = [];
+ try {
+ for (weightsManifest_2 = __values(weightsManifest), weightsManifest_2_1 = weightsManifest_2.next(); !weightsManifest_2_1.done; weightsManifest_2_1 = weightsManifest_2.next()) {
+ weightsGroup = weightsManifest_2_1.value;
+ try {
+ for (_b = (e_4 = void 0, __values(weightsGroup.paths)), _c = _b.next(); !_c.done; _c = _b.next()) {
+ path = _c.value;
+ if (this.weightUrlConverter != null) {
+ urlPromises.push(this.weightUrlConverter(path));
+ }
+ else {
+ fetchURLs.push(pathPrefix + path + suffix);
+ }
+ }
+ }
+ catch (e_4_1) { e_4 = { error: e_4_1 }; }
+ finally {
+ try {
+ if (_c && !_c.done && (_j = _b.return)) _j.call(_b);
+ }
+ finally { if (e_4) throw e_4.error; }
+ }
+ }
+ }
+ catch (e_3_1) { e_3 = { error: e_3_1 }; }
+ finally {
+ try {
+ if (weightsManifest_2_1 && !weightsManifest_2_1.done && (_h = weightsManifest_2.return)) _h.call(weightsManifest_2);
+ }
+ finally { if (e_3) throw e_3.error; }
+ }
+ if (!this.weightUrlConverter) return [3 /*break*/, 2];
+ _e = (_d = fetchURLs.push).apply;
+ _f = [fetchURLs];
+ return [4 /*yield*/, Promise.all(urlPromises)];
+ case 1:
+ _e.apply(_d, _f.concat([__spread.apply(void 0, [_k.sent()])]));
+ _k.label = 2;
+ case 2: return [4 /*yield*/, loadWeightsAsArrayBuffer(fetchURLs, {
+ requestInit: this.requestInit,
+ fetchFunc: this.fetch,
+ onProgress: this.onProgress
+ })];
+ case 3:
+ buffers = _k.sent();
+ return [2 /*return*/, [weightSpecs, concatenateArrayBuffers(buffers)]];
+ }
+ });
+ });
+ };
+ return HTTPRequest;
+ }());
+ HTTPRequest.URL_SCHEME_REGEX = /^https?:\/\//;
+ /**
+ * Extract the prefix and suffix of the url, where the prefix is the path before
+ * the last file, and suffix is the search params after the last file.
+ * ```
+ * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'
+ * [prefix, suffix] = parseUrl(url)
+ * // prefix = 'http://tfhub.dev/model/1/'
+ * // suffix = '?tfjs-format=file'
+ * ```
+ * @param url the model url to be parsed.
+ */
+ function parseUrl(url) {
+ var lastSlash = url.lastIndexOf('/');
+ var lastSearchParam = url.lastIndexOf('?');
+ var prefix = url.substring(0, lastSlash);
+ var suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';
+ return [prefix + '/', suffix];
+ }
+ function isHTTPScheme(url) {
+ return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;
+ }
+ var httpRouter = function (url, loadOptions) {
+ if (typeof fetch === 'undefined' &&
+ (loadOptions == null || loadOptions.fetchFunc == null)) {
+ // `http` uses `fetch` or `node-fetch`, if one wants to use it in
+ // an environment that is not the browser or node they have to setup a
+ // global fetch polyfill.
+ return null;
+ }
+ else {
+ var isHTTP = true;
+ if (Array.isArray(url)) {
+ isHTTP = url.every(function (urlItem) { return isHTTPScheme(urlItem); });
+ }
+ else {
+ isHTTP = isHTTPScheme(url);
+ }
+ if (isHTTP) {
+ return http(url, loadOptions);
+ }
+ }
+ return null;
+ };
+ IORouterRegistry.registerSaveRouter(httpRouter);
+ IORouterRegistry.registerLoadRouter(httpRouter);
+ /**
+ * Creates an IOHandler subtype that sends model artifacts to HTTP server.
+ *
+ * An HTTP request of the `multipart/form-data` mime type will be sent to the
+ * `path` URL. The form data includes artifacts that represent the topology
+ * and/or weights of the model. In the case of Keras-style `tf.Model`, two
+ * blobs (files) exist in form-data:
+ * - A JSON file consisting of `modelTopology` and `weightsManifest`.
+ * - A binary weights file consisting of the concatenated weight values.
+ * These files are in the same format as the one generated by
+ * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).
+ *
+ * The following code snippet exemplifies the client-side code that uses this
+ * function:
+ *
+ * ```js
+ * const model = tf.sequential();
+ * model.add(
+ * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
+ *
+ * const saveResult = await model.save(tf.io.http(
+ * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));
+ * console.log(saveResult);
+ * ```
+ *
+ * If the default `POST` method is to be used, without any custom parameters
+ * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:
+ *
+ * ```js
+ * const saveResult = await model.save('http://model-server:5000/upload');
+ * ```
+ *
+ * The following GitHub Gist
+ * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864
+ * implements a server based on [flask](https://github.com/pallets/flask) that
+ * can receive the request. Upon receiving the model artifacts via the requst,
+ * this particular server reconsistutes instances of [Keras
+ * Models](https://keras.io/models/model/) in memory.
+ *
+ *
+ * @param path A URL path to the model.
+ * Can be an absolute HTTP path (e.g.,
+ * 'http://localhost:8000/model-upload)') or a relative path (e.g.,
+ * './model-upload').
+ * @param requestInit Request configurations to be used when sending
+ * HTTP request to server using `fetch`. It can contain fields such as
+ * `method`, `credentials`, `headers`, `mode`, etc. See
+ * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request
+ * for more information. `requestInit` must not have a body, because the
+ * body will be set by TensorFlow.js. File blobs representing the model
+ * topology (filename: 'model.json') and the weights of the model (filename:
+ * 'model.weights.bin') will be appended to the body. If `requestInit` has a
+ * `body`, an Error will be thrown.
+ * @param loadOptions Optional configuration for the loading. It includes the
+ * following fields:
+ * - weightPathPrefix Optional, this specifies the path prefix for weight
+ * files, by default this is calculated from the path param.
+ * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,
+ * the `fetch` from node-fetch can be used here.
+ * - onProgress Optional, progress callback function, fired periodically
+ * before the load is completed.
+ * @returns An instance of `IOHandler`.
+ *
+ * @doc {
+ * heading: 'Models',
+ * subheading: 'Loading',
+ * namespace: 'io',
+ * ignoreCI: true
+ * }
+ */
+ function http(path, loadOptions) {
+ return new HTTPRequest(path, loadOptions);
+ }
+ /**
+ * Deprecated. Use `tf.io.http`.
+ * @param path
+ * @param loadOptions
+ */
+ function browserHTTPRequest(path, loadOptions) {
+ return http(path, loadOptions);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var PassthroughLoader = /** @class */ (function () {
+ function PassthroughLoader(modelArtifacts) {
+ this.modelArtifacts = modelArtifacts;
+ }
+ PassthroughLoader.prototype.load = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ return [2 /*return*/, this.modelArtifacts];
+ });
+ });
+ };
+ return PassthroughLoader;
+ }());
+ var PassthroughSaver = /** @class */ (function () {
+ function PassthroughSaver(saveHandler) {
+ this.saveHandler = saveHandler;
+ }
+ PassthroughSaver.prototype.save = function (modelArtifacts) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ return [2 /*return*/, this.saveHandler(modelArtifacts)];
+ });
+ });
+ };
+ return PassthroughSaver;
+ }());
+ /**
+ * Creates an IOHandler that loads model artifacts from memory.
+ *
+ * When used in conjunction with `tf.loadLayersModel`, an instance of
+ * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
+ *
+ * ```js
+ * const model = await tf.loadLayersModel(tf.io.fromMemory(
+ * modelTopology, weightSpecs, weightData));
+ * ```
+ *
+ * @param modelArtifacts a object containing model topology (i.e., parsed from
+ * the JSON format).
+ * @param weightSpecs An array of `WeightsManifestEntry` objects describing the
+ * names, shapes, types, and quantization of the weight data.
+ * @param weightData A single `ArrayBuffer` containing the weight data,
+ * concatenated in the order described by the weightSpecs.
+ * @param trainingConfig Model training configuration. Optional.
+ *
+ * @returns A passthrough `IOHandler` that simply loads the provided data.
+ */
+ function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {
+ if (arguments.length === 1) {
+ var isModelArtifacts = modelArtifacts.modelTopology != null ||
+ modelArtifacts.weightSpecs != null;
+ if (isModelArtifacts) {
+ return new PassthroughLoader(modelArtifacts);
+ }
+ else {
+ // Legacy support: with only modelTopology.
+ // TODO(cais): Remove this deprecated API.
+ console.warn('Please call tf.io.fromMemory() with only one argument. ' +
+ 'The argument should be of type ModelArtifacts. ' +
+ 'The multi-argument signature of tf.io.fromMemory() has been ' +
+ 'deprecated and will be removed in a future release.');
+ return new PassthroughLoader({ modelTopology: modelArtifacts });
+ }
+ }
+ else {
+ // Legacy support.
+ // TODO(cais): Remove this deprecated API.
+ console.warn('Please call tf.io.fromMemory() with only one argument. ' +
+ 'The argument should be of type ModelArtifacts. ' +
+ 'The multi-argument signature of tf.io.fromMemory() has been ' +
+ 'deprecated and will be removed in a future release.');
+ return new PassthroughLoader({
+ modelTopology: modelArtifacts,
+ weightSpecs: weightSpecs,
+ weightData: weightData,
+ trainingConfig: trainingConfig
+ });
+ }
+ }
+ /**
+ * Creates an IOHandler that passes saved model artifacts to a callback.
+ *
+ * ```js
+ * function handleSave(artifacts) {
+ * // ... do something with the artifacts ...
+ * return {modelArtifactsInfo: {...}, ...};
+ * }
+ *
+ * const saveResult = model.save(tf.io.withSaveHandler(handleSave));
+ * ```
+ *
+ * @param saveHandler A function that accepts a `ModelArtifacts` and returns a
+ * `SaveResult`.
+ */
+ function withSaveHandler(saveHandler) {
+ return new PassthroughSaver(saveHandler);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var io = {
+ __proto__: null,
+ browserFiles: browserFiles,
+ browserHTTPRequest: browserHTTPRequest,
+ concatenateArrayBuffers: concatenateArrayBuffers,
+ decodeWeights: decodeWeights,
+ encodeWeights: encodeWeights,
+ fromMemory: fromMemory,
+ getLoadHandlers: getLoadHandlers,
+ getModelArtifactsForJSON: getModelArtifactsForJSON,
+ getModelArtifactsInfoForJSON: getModelArtifactsInfoForJSON,
+ getSaveHandlers: getSaveHandlers,
+ http: http,
+ isHTTPScheme: isHTTPScheme,
+ loadWeights: loadWeights,
+ registerLoadRouter: registerLoadRouter,
+ registerSaveRouter: registerSaveRouter,
+ weightsLoaderFactory: weightsLoaderFactory,
+ withSaveHandler: withSaveHandler,
+ copyModel: copyModel,
+ listModels: listModels,
+ moveModel: moveModel,
+ removeModel: removeModel
+ };
+
+ /**
+ * Computes the dot product of two matrices, A * B. These must be matrices.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2], [1, 2]);
+ * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * a.matMul(b).print(); // or tf.matMul(a, b)
+ * ```
+ * @param a First matrix in dot product operation.
+ * @param b Second matrix in dot product operation.
+ * @param transposeA If true, `a` is transposed before multiplication.
+ * @param transposeB If true, `b` is transposed before multiplication.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function matMul_(a, b, transposeA, transposeB) {
+ var _a;
+ if (transposeA === void 0) { transposeA = false; }
+ if (transposeB === void 0) { transposeB = false; }
+ var $a = convertToTensor(a, 'a', 'matMul');
+ var $b = convertToTensor(b, 'b', 'matMul');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ var attrs = { transposeA: transposeA, transposeB: transposeB };
+ return ENGINE.runKernel(BatchMatMul, inputs, attrs);
+ }
+ var matMul$1 = op({ matMul_: matMul_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take
+ * value `onValue` (defaults to 1), while all other locations take value
+ * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank
+ * `R+1` with the last axis of size `depth`.
+ *
+ * ```js
+ * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();
+ * ```
+ *
+ * @param indices `tf.Tensor` of indices with dtype `int32`.
+ * @param depth The depth of the one hot dimension.
+ * @param onValue A number used to fill in the output when the index matches
+ * the location.
+ * @param offValue A number used to fill in the output when the index does
+ * not match the location.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function oneHot_(indices, depth, onValue, offValue) {
+ if (onValue === void 0) { onValue = 1; }
+ if (offValue === void 0) { offValue = 0; }
+ if (depth < 2) {
+ throw new Error("Error in oneHot: depth must be >=2, but it is " + depth);
+ }
+ var $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');
+ var inputs = { indices: $indices };
+ var attrs = { depth: depth, onValue: onValue, offValue: offValue };
+ return ENGINE.runKernel(OneHot, inputs, attrs);
+ }
+ var oneHot = op({ oneHot_: oneHot_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.
+ *
+ * The returned `tf.Tensor`'s dimension `i` will correspond to the input
+ * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,
+ * where `n` is the rank of the input `tf.Tensor`. Hence by default, this
+ * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);
+ *
+ * a.transpose().print(); // or tf.transpose(a)
+ * ```
+ *
+ * @param x The tensor to transpose.
+ * @param perm The permutation of the dimensions of a.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function transpose_(x, perm) {
+ var $x = convertToTensor(x, 'x', 'transpose');
+ if (perm == null) {
+ perm = $x.shape.map(function (s, i) { return i; }).reverse();
+ }
+ assert($x.rank === perm.length, function () { return "Error in transpose: rank of input " + $x.rank + " " +
+ ("must match length of perm " + perm + "."); });
+ perm.forEach(function (axis) {
+ assert(axis >= 0 && axis < $x.rank, function () { return "All entries in 'perm' must be between 0 and " + ($x.rank - 1) +
+ (" but got " + perm); });
+ });
+ if ($x.rank <= 1) {
+ return $x.clone();
+ }
+ var inputs = { x: $x };
+ var attrs = { perm: perm };
+ return ENGINE.runKernel(Transpose, inputs, attrs);
+ }
+ var transpose = op({ transpose_: transpose_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the confusion matrix from true labels and predicted labels.
+ *
+ * ```js
+ * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');
+ * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');
+ * const numClasses = 3;
+ * const out = tf.math.confusionMatrix(labels, predictions, numClasses);
+ * out.print();
+ * // Expected output matrix:
+ * // [[2, 0, 0],
+ * // [0, 1, 1],
+ * // [0, 0, 1]]
+ * ```
+ *
+ * @param labels The target labels, assumed to be 0-based integers
+ * for the classes. The shape is `[numExamples]`, where
+ * `numExamples` is the number of examples included.
+ * @param predictions The predicted classes, assumed to be
+ * 0-based integers for the classes. Must have the same shape as `labels`.
+ * @param numClasses Number of all classes, as an integer.
+ * Its value must be larger than the largest element in `labels` and
+ * `predictions`.
+ * @returns The confusion matrix as a int32-type 2D tensor. The value at
+ * row `r` and column `c` is the number of times examples of actual class
+ * `r` were predicted as class `c`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function confusionMatrix_(labels, predictions, numClasses) {
+ var $labels = convertToTensor(labels, 'labels', 'confusionMatrix');
+ var $predictions = convertToTensor(predictions, 'predictions', 'confusionMatrix');
+ assert(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), function () { return "If provided, numClasses must be a positive integer, " +
+ ("but got " + numClasses); });
+ assert($labels.rank === 1, function () { return "Expected the rank of labels to be 1, but got " + $labels.rank; });
+ assert($predictions.rank === 1, function () { return "Expected the rank of predictions to be 1, " +
+ ("but got " + $predictions.rank); });
+ assert($labels.shape[0] === $predictions.shape[0], function () { return "Mismatch in the number of examples: " +
+ ($labels.shape[0] + " vs. " + $predictions.shape[0] + ". ") +
+ "Labels and predictions should have the same number of elements."; });
+ assert(numClasses > 0 && Number.isInteger(numClasses), function () { return "numClasses is required to be a positive integer, but got " +
+ ("" + numClasses); });
+ // TODO(cais): In the future, if oneHot supports tensors inputs for
+ // `numClasses`, `confusionMatrix` can make `numClasses` optional.
+ var oneHotLabels = oneHot(cast($labels, 'int32'), numClasses);
+ var oneHotPredictions = oneHot(cast($predictions, 'int32'), numClasses);
+ var oneHotLabelsT = transpose(oneHotLabels);
+ var product = matMul$1(oneHotLabelsT, oneHotPredictions);
+ return cast(product, 'int32');
+ }
+ var confusionMatrix = op({ confusionMatrix_: confusionMatrix_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var math = {
+ __proto__: null,
+ confusionMatrix: confusionMatrix
+ };
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the dimensions in the input shape that are broadcasted to
+ * produce the provided output shape.
+ *
+ * The returned dimensions are 0-indexed and sorted. An example:
+ * inShape = [4, 1, 3]
+ * outShape = [5, 4, 3, 3]
+ * result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.
+ */
+ function getBroadcastDims(inShape, outShape) {
+ var inRank = inShape.length;
+ var dims = [];
+ for (var i = 0; i < inRank; i++) {
+ var dim = inRank - 1 - i;
+ var a = inShape[dim] || 1;
+ var b = outShape[outShape.length - 1 - i] || 1;
+ if (b > 1 && a === 1) {
+ dims.unshift(dim);
+ }
+ }
+ return dims;
+ }
+ /**
+ * Returns the axes in the output space that should be reduced to produce
+ * the input space.
+ */
+ function getReductionAxes(inShape, outShape) {
+ var result = [];
+ for (var i = 0; i < outShape.length; i++) {
+ var inDim = inShape[inShape.length - i - 1];
+ var outAxis = outShape.length - i - 1;
+ var outDim = outShape[outAxis];
+ if (inDim == null || (inDim === 1 && outDim > 1)) {
+ result.unshift(outAxis);
+ }
+ }
+ return result;
+ }
+ function assertAndGetBroadcastShape(shapeA, shapeB) {
+ var result = [];
+ var l = Math.max(shapeA.length, shapeB.length);
+ for (var i = 0; i < l; i++) {
+ var a = shapeA[shapeA.length - i - 1];
+ if (a == null) {
+ a = 1;
+ }
+ var b = shapeB[shapeB.length - i - 1];
+ if (b == null) {
+ b = 1;
+ }
+ if (a === 1) {
+ result.unshift(b);
+ }
+ else if (b === 1) {
+ result.unshift(a);
+ }
+ else if (a !== b) {
+ var errMsg = "Operands could not be broadcast together with shapes " +
+ (shapeA + " and " + shapeB + ".");
+ throw Error(errMsg);
+ }
+ else {
+ result.unshift(a);
+ }
+ }
+ return result;
+ }
+
+ var broadcast_util = {
+ __proto__: null,
+ getBroadcastDims: getBroadcastDims,
+ getReductionAxes: getReductionAxes,
+ assertAndGetBroadcastShape: assertAndGetBroadcastShape
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor3d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. If not provided, it is inferred from
+ * `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor3d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 3) {
+ throw new Error('tensor3d() requires shape to have three numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 3 && inferredShape.length !== 1) {
+ throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor3d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ var fromPixels2DContext;
+ /**
+ * Creates a `tf.Tensor` from an image.
+ *
+ * ```js
+ * const image = new ImageData(1, 1);
+ * image.data[0] = 100;
+ * image.data[1] = 150;
+ * image.data[2] = 200;
+ * image.data[3] = 255;
+ *
+ * tf.browser.fromPixels(image).print();
+ * ```
+ *
+ * @param pixels The input image to construct the tensor from. The
+ * supported image types are all 4-channel. You can also pass in an image
+ * object with following attributes:
+ * `{data: Uint8Array; width: number; height: number}`
+ * @param numChannels The number of channels of the output tensor. A
+ * numChannels value less than 4 allows you to ignore channels. Defaults to
+ * 3 (ignores alpha channel of input image).
+ *
+ * @returns A Tensor3D with the shape `[height, width, numChannels]`.
+ *
+ * Note: fromPixels can be lossy in some cases, same image may result in
+ * slightly different tensor values, if rendered by different rendering
+ * engines. This means that results from different browsers, or even same
+ * browser with CPU and GPU rendering engines can be different. See discussion
+ * in details:
+ * https://github.com/tensorflow/tfjs/issues/5482
+ *
+ * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}
+ */
+ function fromPixels_(pixels, numChannels) {
+ if (numChannels === void 0) { numChannels = 3; }
+ // Sanity checks.
+ if (numChannels > 4) {
+ throw new Error('Cannot construct Tensor with more than 4 channels from pixels.');
+ }
+ if (pixels == null) {
+ throw new Error('pixels passed to tf.browser.fromPixels() can not be null');
+ }
+ var isPixelData = false;
+ var isImageData = false;
+ var isVideo = false;
+ var isImage = false;
+ var isCanvasLike = false;
+ var isImageBitmap = false;
+ if (pixels.data instanceof Uint8Array) {
+ isPixelData = true;
+ }
+ else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {
+ isImageData = true;
+ }
+ else if (typeof (HTMLVideoElement) !== 'undefined' &&
+ pixels instanceof HTMLVideoElement) {
+ isVideo = true;
+ }
+ else if (typeof (HTMLImageElement) !== 'undefined' &&
+ pixels instanceof HTMLImageElement) {
+ isImage = true;
+ // tslint:disable-next-line: no-any
+ }
+ else if (pixels.getContext != null) {
+ isCanvasLike = true;
+ }
+ else if (typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) {
+ isImageBitmap = true;
+ }
+ else {
+ throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' +
+ "HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData " +
+ "in browser, or OffscreenCanvas, ImageData in webworker" +
+ " or {data: Uint32Array, width: number, height: number}, " +
+ ("but was " + pixels.constructor.name));
+ }
+ if (isVideo) {
+ var HAVE_CURRENT_DATA_READY_STATE = 2;
+ if (isVideo &&
+ pixels.readyState <
+ HAVE_CURRENT_DATA_READY_STATE) {
+ throw new Error('The video element has not loaded data yet. Please wait for ' +
+ '`loadeddata` event on the <video> element.');
+ }
+ }
+ // If the current backend has 'FromPixels' registered, it has a more
+ // efficient way of handling pixel uploads, so we call that.
+ var kernel = getKernel(FromPixels, ENGINE.backendName);
+ if (kernel != null) {
+ var inputs = { pixels: pixels };
+ var attrs = { numChannels: numChannels };
+ return ENGINE.runKernel(FromPixels, inputs, attrs);
+ }
+ var _a = __read(isVideo ?
+ [
+ pixels.videoWidth,
+ pixels.videoHeight
+ ] :
+ [pixels.width, pixels.height], 2), width = _a[0], height = _a[1];
+ var vals;
+ if (isCanvasLike) {
+ vals =
+ // tslint:disable-next-line:no-any
+ pixels.getContext('2d').getImageData(0, 0, width, height).data;
+ }
+ else if (isImageData || isPixelData) {
+ vals = pixels.data;
+ }
+ else if (isImage || isVideo || isImageBitmap) {
+ if (fromPixels2DContext == null) {
+ if (typeof document === 'undefined') {
+ if (typeof OffscreenCanvas !== 'undefined' &&
+ typeof OffscreenCanvasRenderingContext2D !== 'undefined') {
+ // @ts-ignore
+ fromPixels2DContext = new OffscreenCanvas(1, 1).getContext('2d');
+ }
+ else {
+ throw new Error('Cannot parse input in current context. ' +
+ 'Reason: OffscreenCanvas Context2D rendering is not supported.');
+ }
+ }
+ else {
+ fromPixels2DContext = document.createElement('canvas').getContext('2d');
+ }
+ }
+ fromPixels2DContext.canvas.width = width;
+ fromPixels2DContext.canvas.height = height;
+ fromPixels2DContext.drawImage(pixels, 0, 0, width, height);
+ vals = fromPixels2DContext.getImageData(0, 0, width, height).data;
+ }
+ var values;
+ if (numChannels === 4) {
+ values = new Int32Array(vals);
+ }
+ else {
+ var numPixels = width * height;
+ values = new Int32Array(numPixels * numChannels);
+ for (var i = 0; i < numPixels; i++) {
+ for (var channel = 0; channel < numChannels; ++channel) {
+ values[i * numChannels + channel] = vals[i * 4 + channel];
+ }
+ }
+ }
+ var outShape = [height, width, numChannels];
+ return tensor3d(values, outShape, 'int32');
+ }
+ // Helper functions for |fromPixelsAsync| to check whether the input can
+ // be wrapped into imageBitmap.
+ function isPixelData(pixels) {
+ return (pixels != null) && (pixels.data instanceof Uint8Array);
+ }
+ function isImageBitmapFullySupported() {
+ return typeof window !== 'undefined' &&
+ typeof (ImageBitmap) !== 'undefined' &&
+ window.hasOwnProperty('createImageBitmap');
+ }
+ function isNonEmptyPixels(pixels) {
+ return pixels != null && pixels.width !== 0 && pixels.height !== 0;
+ }
+ function canWrapPixelsToImageBitmap(pixels) {
+ return isImageBitmapFullySupported() && !(pixels instanceof ImageBitmap) &&
+ isNonEmptyPixels(pixels) && !isPixelData(pixels);
+ }
+ /**
+ * Creates a `tf.Tensor` from an image in async way.
+ *
+ * ```js
+ * const image = new ImageData(1, 1);
+ * image.data[0] = 100;
+ * image.data[1] = 150;
+ * image.data[2] = 200;
+ * image.data[3] = 255;
+ *
+ * (await tf.browser.fromPixelsAsync(image)).print();
+ * ```
+ * This API is the async version of fromPixels. The API will first
+ * check |WRAP_TO_IMAGEBITMAP| flag, and try to wrap the input to
+ * imageBitmap if the flag is set to true.
+ *
+ * @param pixels The input image to construct the tensor from. The
+ * supported image types are all 4-channel. You can also pass in an image
+ * object with following attributes:
+ * `{data: Uint8Array; width: number; height: number}`
+ * @param numChannels The number of channels of the output tensor. A
+ * numChannels value less than 4 allows you to ignore channels. Defaults to
+ * 3 (ignores alpha channel of input image).
+ *
+ * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}
+ */
+ function fromPixelsAsync(pixels, numChannels) {
+ if (numChannels === void 0) { numChannels = 3; }
+ return __awaiter(this, void 0, void 0, function () {
+ var inputs, imageBitmap;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ inputs = null;
+ if (!(env().getBool('WRAP_TO_IMAGEBITMAP') &&
+ canWrapPixelsToImageBitmap(pixels))) return [3 /*break*/, 5];
+ imageBitmap = void 0;
+ _a.label = 1;
+ case 1:
+ _a.trys.push([1, 3, , 4]);
+ return [4 /*yield*/, createImageBitmap(pixels, { premultiplyAlpha: 'none' })];
+ case 2:
+ // wrap in try-catch block, because createImageBitmap may not work
+ // properly in some browsers, e.g.
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1335594
+ // tslint:disable-next-line: no-any
+ imageBitmap = _a.sent();
+ return [3 /*break*/, 4];
+ case 3:
+ _a.sent();
+ imageBitmap = null;
+ return [3 /*break*/, 4];
+ case 4:
+ // createImageBitmap will clip the source size.
+ // In some cases, the input will have larger size than its content.
+ // E.g. new Image(10, 10) but with 1 x 1 content. Using
+ // createImageBitmap will clip the size from 10 x 10 to 1 x 1, which
+ // is not correct. We should avoid wrapping such resouce to
+ // imageBitmap.
+ if (imageBitmap != null && imageBitmap.width === pixels.width &&
+ imageBitmap.height === pixels.height) {
+ inputs = imageBitmap;
+ }
+ else {
+ inputs = pixels;
+ }
+ return [3 /*break*/, 6];
+ case 5:
+ inputs = pixels;
+ _a.label = 6;
+ case 6: return [2 /*return*/, fromPixels_(inputs, numChannels)];
+ }
+ });
+ });
+ }
+ /**
+ * Draws a `tf.Tensor` of pixel values to a byte array or optionally a
+ * canvas.
+ *
+ * When the dtype of the input is 'float32', we assume values in the range
+ * [0-1]. Otherwise, when input is 'int32', we assume values in the range
+ * [0-255].
+ *
+ * Returns a promise that resolves when the canvas has been drawn to.
+ *
+ * @param img A rank-2 tensor with shape `[height, width]`, or a rank-3 tensor
+ * of shape `[height, width, numChannels]`. If rank-2, draws grayscale. If
+ * rank-3, must have depth of 1, 3 or 4. When depth of 1, draws
+ * grayscale. When depth of 3, we draw with the first three components of
+ * the depth dimension corresponding to r, g, b and alpha = 1. When depth of
+ * 4, all four components of the depth dimension correspond to r, g, b, a.
+ * @param canvas The canvas to draw to.
+ *
+ * @doc {heading: 'Browser', namespace: 'browser'}
+ */
+ function toPixels(img, canvas) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $img, originalImgTensor, _a, height, width, depth, data, multiplier, bytes, i, rgba, d, value, j, ctx, imageData;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ $img = convertToTensor(img, 'img', 'toPixels');
+ if (!(img instanceof Tensor)) {
+ originalImgTensor = $img;
+ $img = cast(originalImgTensor, 'int32');
+ originalImgTensor.dispose();
+ }
+ if ($img.rank !== 2 && $img.rank !== 3) {
+ throw new Error("toPixels only supports rank 2 or 3 tensors, got rank " + $img.rank + ".");
+ }
+ _a = __read($img.shape.slice(0, 2), 2), height = _a[0], width = _a[1];
+ depth = $img.rank === 2 ? 1 : $img.shape[2];
+ if (depth > 4 || depth === 2) {
+ throw new Error("toPixels only supports depth of size " +
+ ("1, 3 or 4 but got " + depth));
+ }
+ if ($img.dtype !== 'float32' && $img.dtype !== 'int32') {
+ throw new Error("Unsupported type for toPixels: " + $img.dtype + "." +
+ " Please use float32 or int32 tensors.");
+ }
+ return [4 /*yield*/, $img.data()];
+ case 1:
+ data = _b.sent();
+ multiplier = $img.dtype === 'float32' ? 255 : 1;
+ bytes = new Uint8ClampedArray(width * height * 4);
+ for (i = 0; i < height * width; ++i) {
+ rgba = [0, 0, 0, 255];
+ for (d = 0; d < depth; d++) {
+ value = data[i * depth + d];
+ if ($img.dtype === 'float32') {
+ if (value < 0 || value > 1) {
+ throw new Error("Tensor values for a float32 Tensor must be in the " +
+ ("range [0 - 1] but encountered " + value + "."));
+ }
+ }
+ else if ($img.dtype === 'int32') {
+ if (value < 0 || value > 255) {
+ throw new Error("Tensor values for a int32 Tensor must be in the " +
+ ("range [0 - 255] but encountered " + value + "."));
+ }
+ }
+ if (depth === 1) {
+ rgba[0] = value * multiplier;
+ rgba[1] = value * multiplier;
+ rgba[2] = value * multiplier;
+ }
+ else {
+ rgba[d] = value * multiplier;
+ }
+ }
+ j = i * 4;
+ bytes[j + 0] = Math.round(rgba[0]);
+ bytes[j + 1] = Math.round(rgba[1]);
+ bytes[j + 2] = Math.round(rgba[2]);
+ bytes[j + 3] = Math.round(rgba[3]);
+ }
+ if (canvas != null) {
+ canvas.width = width;
+ canvas.height = height;
+ ctx = canvas.getContext('2d');
+ imageData = new ImageData(bytes, width, height);
+ ctx.putImageData(imageData, 0, 0);
+ }
+ if ($img !== img) {
+ $img.dispose();
+ }
+ return [2 /*return*/, bytes];
+ }
+ });
+ });
+ }
+ var fromPixels = op({ fromPixels_: fromPixels_ });
+
+ var browser = {
+ __proto__: null,
+ fromPixelsAsync: fromPixelsAsync,
+ toPixels: toPixels,
+ fromPixels: fromPixels
+ };
+
+ /**
+ * Validate gather nd inputs.
+ *
+ * @param tensor The tensor contains the source values.
+ * @param indices The tensor contains the indices to slice the source.
+ *
+ * @returns [resultShape, numUpdates, sliceSize, strides]
+ */
+ function prepareAndValidate(tensor, indices) {
+ var tensorRank = tensor.shape.length;
+ var indicesRank = indices.shape.length;
+ if (tensorRank < 1) {
+ throw new Error('tf.gatherND() expects the input to be rank 1 or higher,' +
+ (" but the rank was " + tensorRank + "."));
+ }
+ if (indicesRank < 1) {
+ throw new Error('tf.gatherND() expects the indices to be rank 1 or higher,' +
+ (" but the rank was " + indicesRank + "."));
+ }
+ if (indices.dtype !== 'int32') {
+ throw new Error('tf.gatherND() expects the indices to be int32 type,' +
+ (" but the dtype was " + indices.dtype + "."));
+ }
+ if (indices.shape[indicesRank - 1] > tensorRank) {
+ throw new Error('index innermost dimension length must be <= tensor rank; saw: ' +
+ (indices.shape[indicesRank - 1] + " vs. " + tensorRank));
+ }
+ if (sizeFromShape(tensor.shape) === 0) {
+ throw new Error('Requested more than 0 entries, but input is empty.' +
+ (" Input shape: " + tensor.shape + "."));
+ }
+ var indicesShape = indices.shape;
+ var sliceRank = indicesShape[indicesShape.length - 1];
+ // The result shape is
+ // indices.shape[:-1] + params.shape[indices.shape[-1]:]
+ var nResult = 1;
+ for (var i = 0; i < indicesShape.length - 1; ++i) {
+ nResult *= indicesShape[i];
+ }
+ var inputShape = tensor.shape;
+ var resultShape = indicesShape.slice();
+ resultShape.pop();
+ var sliceSize = 1;
+ for (var i = sliceRank; i < tensorRank; ++i) {
+ sliceSize *= inputShape[i];
+ resultShape.push(inputShape[i]);
+ }
+ var strides = __spread(computeStrides(tensor.shape).map(function (stride) { return stride / sliceSize; }), [1]).slice(0, sliceRank);
+ return [resultShape, nResult, sliceSize, strides];
+ }
+
+ var gather_nd_util = {
+ __proto__: null,
+ prepareAndValidate: prepareAndValidate
+ };
+
+ /**
+ * Check whether updates.shape = indices.shape[:batchDim] +
+ * shape[sliceDim:]
+ *
+ * @param x The input tensor.
+ */
+ function validateUpdateShape(shape, indices, updates) {
+ var sliceDim = (indices.rank > 1) ? indices.shape[indices.rank - 1] : 1;
+ var batchDim = (indices.rank > 1) ? indices.rank - 1 : 1;
+ var shapeError = 'Must have updates.shape = indices.shape[:batchDim] + ' +
+ ("shape[sliceDim:], got updates.shape: " + updates.shape) +
+ (", indices.shape: " + indices.shape + ", shape: " + shape) +
+ (", sliceDim: " + sliceDim + ", and batchDim: " + batchDim + ".");
+ if (updates.rank < batchDim) {
+ throw new Error(shapeError + (" update.rank < " + batchDim + ". "));
+ }
+ if (shape.length < sliceDim + (updates.rank - batchDim)) {
+ throw new Error(shapeError +
+ (" Output shape length < " + (sliceDim + (updates.rank - batchDim))));
+ }
+ if (updates.rank !== batchDim + shape.length - sliceDim) {
+ throw new Error(shapeError + (" update.rank != " + (batchDim + shape.length - sliceDim)));
+ }
+ for (var d = 0; d < batchDim; ++d) {
+ if (updates.shape[d] !== indices.shape[d]) {
+ throw new Error(shapeError +
+ (" updates.shape[" + d + "] (" + updates.shape[d] + ") != indices.shape[" + d + "] (" + indices.shape[d] + ")."));
+ }
+ }
+ for (var d = 0; d < updates.rank - batchDim; ++d) {
+ if (updates.shape[d + batchDim] !== shape[d + sliceDim]) {
+ throw new Error(shapeError +
+ (" updates.shape[" + (d + batchDim) + "] (" + updates.shape[d + batchDim] + ") != shape[" + (d + batchDim) + "] (" + shape[d + batchDim] + ")"));
+ }
+ }
+ }
+ /**
+ * Validate scatter nd inputs.
+ *
+ * @param update The tensor contains the update values.
+ * @param indices The tensor contains the indices for the update values.
+ * @param shape The shape of the output tensor.
+ */
+ function validateInput$1(updates, indices, shape) {
+ if (indices.rank < 1) {
+ throw new Error('tf.scatterND() expects the indices to be rank 1 or higher,' +
+ (" but the rank was " + indices.rank + "."));
+ }
+ if (updates.rank < 1) {
+ throw new Error('tf.scatterND() expects the updates to be rank 1 or higher,' +
+ (" but the rank was " + updates.rank + "."));
+ }
+ if (indices.dtype !== 'int32') {
+ throw new Error("The dtype of 'indices' should be int32, but got dtype: " + indices.dtype);
+ }
+ if (shape.length < 1) {
+ throw new Error("Output rank must be greater or equal to 1, but got shape: " + shape);
+ }
+ if (shape.length === 0) {
+ if (indices.size === 0) {
+ throw new Error("Indices specified for empty output. indices shape: " + indices.shape);
+ }
+ if (updates.size === 0) {
+ throw new Error("Updates specified for empty output. updates shape: " + updates.shape);
+ }
+ }
+ validateUpdateShape(shape, indices, updates);
+ }
+ /**
+ * Calculate the shape information for the output.
+ *
+ * @param update The tensor contains the update values.
+ * @param indices The tensor contains the indices for the update values.
+ * @param shape The shape of the output tensor.
+ *
+ * @returns ScatterShapeInfo
+ */
+ function calculateShapes(updates, indices, shape) {
+ // Calculate the number of dimensions in indices
+ var indicesRank = indices.shape.length;
+ var sliceRank = (indicesRank > 1) ? indices.shape[indicesRank - 1] : 1;
+ // Calculate the number of elements that make up each slice of our updated
+ // tensor. This allows us to work with flattened tensors and copy over whole
+ // slices at a time.
+ var totalNd = shape.length;
+ var sliceSize = 1;
+ for (var i = sliceRank; i < totalNd; ++i) {
+ sliceSize *= shape[i];
+ }
+ var safeSliceDim = (sliceRank < 1) ? 1 : sliceRank;
+ var numUpdates = sizeFromShape(indices.shape) / safeSliceDim;
+ var strides = __spread(computeStrides(shape.slice(0, sliceRank)), [1]);
+ var outputSize = sizeFromShape(shape);
+ return { sliceRank: sliceRank, numUpdates: numUpdates, sliceSize: sliceSize, strides: strides, outputSize: outputSize };
+ }
+
+ var scatter_nd_util = {
+ __proto__: null,
+ validateUpdateShape: validateUpdateShape,
+ validateInput: validateInput$1,
+ calculateShapes: calculateShapes
+ };
+
+ var NEW_AXIS = -2;
+ var SHRINK_AXIS = -1;
+ function assertParamsValid(input, begin, size) {
+ var inputRank = input.shape.length;
+ assert(inputRank === begin.length, function () { return "Error in slice" + inputRank + "D: Length of begin " + begin + " must " +
+ ("match the rank of the array (" + inputRank + ")."); });
+ assert(inputRank === size.length, function () { return "Error in slice" + inputRank + "D: Length of size " + size + " must " +
+ ("match the rank of the array (" + inputRank + ")."); });
+ var _loop_1 = function (i) {
+ assert(begin[i] + size[i] <= input.shape[i], function () { return "Error in slice" + inputRank + "D: begin[" + i + "] + size[" + i + "] " +
+ ("(" + (begin[i] + size[i]) + ") would overflow input.shape[" + i + "] (" + input.shape[i] + ")"); });
+ };
+ for (var i = 0; i < inputRank; ++i) {
+ _loop_1(i);
+ }
+ }
+ /** Converts a binary mask to an array of axes. Used in stridedSlice(). */
+ function maskToAxes(mask) {
+ var axes = [];
+ var axis = 0;
+ while (mask > 0) {
+ if (mask & 1) {
+ axes.push(axis);
+ }
+ mask /= 2;
+ axis++;
+ }
+ return axes;
+ }
+ /** Computes the output shape given the strided slice params. */
+ function computeOutShape$2(begin, end, strides) {
+ var size = [];
+ for (var axis = 0; axis < begin.length; axis++) {
+ size[axis] = Math.ceil((end[axis] - begin[axis]) / strides[axis]);
+ }
+ return size;
+ }
+ // Creates full selection at the elided dimensions. If the dimension matches
+ // the ellipsis mask, override the current stride value. Otherwise, insert.
+ function stridesWithElidedDims(strides, ellipsisInsertionIndex, numElidedAxes, inputShape) {
+ var newStrides = __spread(strides);
+ for (var i = newStrides.length; i < inputShape.length; i++) {
+ newStrides.push(1);
+ }
+ for (var i = 0; i < numElidedAxes; i++) {
+ if (i === 0) {
+ newStrides[ellipsisInsertionIndex] = 1;
+ }
+ else {
+ newStrides.splice(ellipsisInsertionIndex, 0 /* num elements to delete */, 1 /* element to add */);
+ newStrides.pop();
+ }
+ }
+ return newStrides;
+ }
+ function unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, normalizedAxis) {
+ if (normalizedAxis <= ellipsisInsertionIndex) {
+ return normalizedAxis;
+ }
+ return normalizedAxis - (numElidedAxes - 1);
+ }
+ function getElidedAxes(numElidedAxes, ellipsisInsertionIndex) {
+ var elidedAxes = [];
+ for (var i = 0; i < numElidedAxes; i++) {
+ elidedAxes.push(ellipsisInsertionIndex + i);
+ }
+ return elidedAxes;
+ }
+ // Normalize the start, end and strides.
+ function getNormalizedAxes(inputShape, ellipsisAxes, numInterpolatedAxes, begin, end, strides, beginMask, endMask, ellipsisMask) {
+ var inputRank = inputShape.length;
+ var normalizedBegin = new Array(inputRank), normalizedEnd = new Array(inputRank), normalizedStrides = new Array(inputRank);
+ if (ellipsisAxes.length && numInterpolatedAxes > 0) {
+ var fullIndex = ellipsisAxes[0];
+ // The ellipsis applies to the masked index as well as any dimensions
+ // that are interpolated.
+ var numElidedAxes = numInterpolatedAxes + 1;
+ normalizedBegin = startIndicesWithElidedDims(beginMask, fullIndex, numElidedAxes, begin, inputShape);
+ normalizedEnd = stopIndicesWithElidedDims(endMask, fullIndex, numElidedAxes, end, inputShape);
+ normalizedStrides =
+ stridesWithElidedDims(strides, fullIndex, numElidedAxes, inputShape);
+ }
+ else {
+ for (var axis = 0; axis < inputRank; axis++) {
+ normalizedBegin[axis] = startForAxis(beginMask, begin, strides, inputShape, axis, ellipsisMask);
+ normalizedEnd[axis] =
+ stopForAxis(endMask, end, strides, inputShape, axis, ellipsisMask);
+ normalizedStrides[axis] = stridesForAxis(strides, axis, ellipsisMask);
+ }
+ }
+ return {
+ begin: normalizedBegin,
+ end: normalizedEnd,
+ strides: normalizedStrides
+ };
+ }
+ // Creates full selection at the elided dimensions. If the dimension matches
+ // the ellipsis mask, override the current start value. Otherwise, insert.
+ function startIndicesWithElidedDims(beginMask, ellipsisInsertionIndex, numElidedAxes, originalBegin, inputShape) {
+ var newIndices = __spread(inputShape);
+ var elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex);
+ for (var axis = 0; axis < newIndices.length; axis++) {
+ if (elidedAxes.indexOf(axis) > -1) {
+ newIndices[axis] = 0;
+ }
+ else {
+ var originalAxis = unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis);
+ var originalValue = originalBegin[originalAxis];
+ if (beginMask & 1 << originalAxis) {
+ originalValue = 0;
+ }
+ newIndices[axis] = originalValue;
+ }
+ }
+ return newIndices;
+ }
+ // Creates full selection at the elided dimensions. If the dimension matches
+ // the ellipsis mask, override the current stop value. Otherwise, insert.
+ function stopIndicesWithElidedDims(endMask, ellipsisInsertionIndex, numElidedAxes, originalEnd, inputShape) {
+ var newIndices = __spread(inputShape);
+ var elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex);
+ for (var axis = 0; axis < newIndices.length; axis++) {
+ if (elidedAxes.indexOf(axis) > -1) {
+ newIndices[axis] = Number.MAX_SAFE_INTEGER;
+ }
+ else {
+ var originalAxis = unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis);
+ var originalValue = originalEnd[originalAxis];
+ if (endMask & 1 << originalAxis) {
+ originalValue = Number.MAX_SAFE_INTEGER;
+ }
+ newIndices[axis] = originalValue;
+ }
+ }
+ for (var i = 0; i < newIndices.length; i++) {
+ // Handle negative indices
+ var axisSize = inputShape[i];
+ if (newIndices[i] < 0) {
+ newIndices[i] += axisSize;
+ }
+ newIndices[i] = clamp(0, newIndices[i], inputShape[i]);
+ }
+ return newIndices;
+ }
+ function stridesForAxis(strides, axis, ellipsisMask) {
+ var stride = strides[axis];
+ if (ellipsisMask & (1 << axis) || stride == null) {
+ stride = 1;
+ }
+ return stride;
+ }
+ function startForAxis(beginMask, startIndices, strides, inputShape, axis, ellipsisMask) {
+ // Begin with the specified index
+ var start = startIndices[axis];
+ var stride = strides[axis] || 1;
+ // Check the axis bit from right of masked axes, or the begin index is not set
+ // for the axis.
+ if (beginMask & 1 << axis || ellipsisMask & 1 << axis || start == null) {
+ if (stride > 0) {
+ // Forward iteration - use the first element. These values will get
+ // clamped below (Note: We could have set them to 0 and axis_size-1, but
+ // use lowest() and max() to maintain symmetry with StopForAxis())
+ start = Number.MIN_SAFE_INTEGER;
+ }
+ else {
+ // Backward iteration - use the last element.
+ start = Number.MAX_SAFE_INTEGER;
+ }
+ }
+ // Handle negative indices
+ var axisSize = inputShape[axis];
+ if (start < 0) {
+ start += axisSize;
+ }
+ // Clamping
+ start = clamp(0, start, axisSize - 1);
+ return start;
+ }
+ function stopForAxis(endMask, stopIndices, strides, inputShape, axis, ellipsisMask) {
+ // Begin with the specified index
+ var stop = stopIndices[axis];
+ var stride = strides[axis] || 1;
+ // Check the axis bit from right of masked axes, or if the stop index is not
+ // set for this axis.
+ if (endMask & (1 << axis) || ellipsisMask & (1 << axis) || stop == null) {
+ if (stride > 0) {
+ // Forward iteration - use the last element. These values will get
+ // clamped below
+ stop = Number.MAX_SAFE_INTEGER;
+ }
+ else {
+ // Backward iteration - use the first element.
+ stop = Number.MIN_SAFE_INTEGER;
+ }
+ }
+ // Handle negative indices
+ var axisSize = inputShape[axis];
+ if (stop < 0) {
+ stop += axisSize;
+ }
+ // Clamping
+ // Because the end index points one past the last element, we need slightly
+ // different clamping ranges depending on the direction.
+ if (stride > 0) {
+ // Forward iteration
+ stop = clamp(0, stop, axisSize);
+ }
+ else {
+ // Backward iteration
+ stop = clamp(-1, stop, axisSize - 1);
+ }
+ return stop;
+ }
+ /**
+ * Returns true if the slice occupies a continous set of elements in the
+ * 'flat' space.
+ */
+ function isSliceContinous(shape, begin, size) {
+ // Index of the first axis that has size > 1.
+ var firstNonOneAxis = size.length;
+ for (var i = 0; i < size.length; i++) {
+ if (size[i] > 1) {
+ firstNonOneAxis = i;
+ break;
+ }
+ }
+ for (var i = firstNonOneAxis + 1; i < size.length; i++) {
+ if (begin[i] > 0 || size[i] !== shape[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ function computeFlatOffset(begin, strides) {
+ var flatOffset = begin.length > 0 ? begin[begin.length - 1] : 1;
+ for (var i = 0; i < begin.length - 1; i++) {
+ flatOffset += begin[i] * strides[i];
+ }
+ return flatOffset;
+ }
+ function parseSliceParams(x, begin, size) {
+ // The following logic allows for more ergonomic calls.
+ var begin_;
+ var xRank = x.shape.length;
+ if (typeof begin === 'number') {
+ begin_ = __spread([begin], new Array(xRank - 1).fill(0));
+ }
+ else if (begin.length < xRank) {
+ begin_ = begin.concat(new Array(xRank - begin.length).fill(0));
+ }
+ else {
+ begin_ = begin.slice();
+ }
+ begin_.forEach(function (d) {
+ assert(d !== -1, function () { return 'slice() does not support negative begin indexing.'; });
+ });
+ var size_;
+ if (size == null) {
+ size_ = new Array(xRank).fill(-1);
+ }
+ else if (typeof size === 'number') {
+ size_ = __spread([size], new Array(xRank - 1).fill(-1));
+ }
+ else if (size.length < xRank) {
+ size_ = size.concat(new Array(xRank - size.length).fill(-1));
+ }
+ else {
+ size_ = size;
+ }
+ size_ = size_.map(function (d, i) {
+ if (d >= 0) {
+ return d;
+ }
+ else {
+ assert(d === -1, function () { return "Negative size values should be exactly -1 but got " +
+ (d + " for the slice() size at index " + i + "."); });
+ return x.shape[i] - begin_[i];
+ }
+ });
+ return [begin_, size_];
+ }
+ // Convert the slicing specification from a sparse representation to a dense
+ // representation. This means that all ellipses and newaxis are expanded out.
+ function sliceInfo(xShape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) {
+ var stridesNonNull;
+ if (strides == null) {
+ stridesNonNull = new Array(begin.length);
+ stridesNonNull.fill(1);
+ }
+ else {
+ stridesNonNull = strides;
+ }
+ // Only one non-zero bit is allowed in ellipsisMask, which means ellipsisMask
+ // is a power of 2. Use bit compares to ensure ellipsisMask is 0 or a power
+ // of 2. When i is a power of 2, i & (i - 1) is always 0.
+ // Also ref:
+ // https://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2
+ if (ellipsisMask != null && (ellipsisMask & (ellipsisMask - 1)) !== 0) {
+ throw new Error('Multiple ellipses in slice is not allowed.');
+ }
+ // Step 1: Account for ellipsis and new axis.
+ // Check for ellipsis and count how many non-newaxis there are after.
+ var ellipsisSeen = false;
+ var sparseSpec = {
+ dims: stridesNonNull.length,
+ numAddAxisAfterEllipsis: 0,
+ begin: begin.slice(),
+ end: end.slice(),
+ strides: stridesNonNull.slice(),
+ beginMask: beginMask,
+ endMask: endMask,
+ ellipsisMask: ellipsisMask,
+ newAxisMask: newAxisMask,
+ shrinkAxisMask: shrinkAxisMask
+ };
+ for (var i = 0; i < sparseSpec.dims; i++) {
+ if (ellipsisSeen && ((1 << i) & newAxisMask) !== 0) {
+ sparseSpec.numAddAxisAfterEllipsis++;
+ }
+ if ((1 << i) & ellipsisMask) {
+ ellipsisSeen = true;
+ }
+ }
+ // If no ellipsis insert one at the end.
+ if (!ellipsisSeen) {
+ sparseSpec.ellipsisMask |= (1 << sparseSpec.dims);
+ sparseSpec.dims++; // this effects loop iteration below
+ }
+ // Step 2: Make a sparse spec into a full index spec.
+ //
+ // The sparse spec deos not correspond to the number of dimensions.
+ // Make a dense spec that cooresponds to the number of dimensions.
+ //
+ // For example suppose foo[...,3:] on foo.shape = [2, 2, 3] then we need to
+ // produce the missing beginMask for the first two dimensions i.e. from
+ // beginMaskSpec = 0, endMaskSpec = 2, we achieve beginMask = 6 (110),
+ // endMask = 7 (111).
+ var denseSpec = {
+ dims: xShape.length,
+ beginMask: 0,
+ endMask: 0,
+ beginValid: false,
+ endValid: false
+ };
+ buildDenseSpec(sparseSpec, denseSpec);
+ // Step 3: Make implicit ranges (non-zero beginMasks and endMasks) explicit
+ // and bounds check.
+ var isIdentity = true;
+ var sliceDim0 = true;
+ var isSimpleSlice = true;
+ var processingShape = [];
+ var finalShape = [];
+ for (var i = 0; i < xShape.length; ++i) {
+ if (denseSpec.strides[i] === 0) {
+ throw Error("strides[" + i + "] must be non-zero");
+ }
+ var shrinkI = !!(denseSpec.shrinkAxisMask & (1 << i));
+ var dimI = xShape[i];
+ if (dimI === -1) {
+ processingShape.push(shrinkI ? 1 : -1);
+ continue;
+ }
+ var masks = [denseSpec.beginMask & (1 << i), denseSpec.endMask & (1 << i)];
+ var validRange = [
+ denseSpec.strides[i] > 0 ? 0 : -1,
+ denseSpec.strides[i] > 0 ? dimI : dimI - 1
+ ];
+ if (shrinkI && denseSpec.strides[i] <= 0) {
+ throw Error('only stride 1 allowed on non-range indexing.');
+ }
+ isSimpleSlice = isSimpleSlice && (denseSpec.strides[i] === 1);
+ var beginAndEndMasked = !!((denseSpec.beginMask & (1 << i)) && (denseSpec.endMask & (1 << i)));
+ if (denseSpec.beginValid && denseSpec.endValid) {
+ if (shrinkI) {
+ // If we are shrinking, the end index is now possibly incorrect. In
+ // particular foo[-1] produces sparseBegin = -1, sparseEnd = 0.
+ // and canonical puts these to n-1 and 0, which implies a degenerate
+ // interval. Fortunately, it is now safe to re-create end as begin + 1.
+ var xFwd = denseSpec.begin[i] < 0 ? dimI + denseSpec.begin[i] :
+ denseSpec.begin[i];
+ denseSpec.begin[i] = xFwd;
+ denseSpec.end[i] = denseSpec.begin[i] + 1;
+ if (xFwd < 0 || xFwd >= dimI) {
+ throw Error("slice index " + denseSpec.begin[i] + " of dimension " + i + " out of bounds.");
+ }
+ }
+ else {
+ denseSpec.begin[i] = canonical(denseSpec.begin[i], 0, denseSpec.strides[i], dimI, masks, validRange);
+ denseSpec.end[i] = canonical(denseSpec.end[i], 1, denseSpec.strides[i], dimI, masks, validRange);
+ }
+ // Update optimization values
+ var takeAllInDimension = denseSpec.strides[i] === 1 &&
+ denseSpec.begin[i] === 0 && denseSpec.end[i] === dimI;
+ isIdentity = isIdentity && takeAllInDimension;
+ sliceDim0 = sliceDim0 &&
+ ((i === 0 && denseSpec.strides[i] === 1) || takeAllInDimension);
+ }
+ else {
+ isIdentity =
+ isIdentity && ((denseSpec.strides[i] === 1) && beginAndEndMasked);
+ sliceDim0 = sliceDim0 &&
+ ((i === 0 && denseSpec.strides[i] === 1) || beginAndEndMasked);
+ }
+ // Compute the processing shape (the intermediate Eigen will produce)
+ var intervalLength = void 0;
+ var knownInterval = false;
+ if (denseSpec.beginValid && denseSpec.endValid) {
+ intervalLength = denseSpec.end[i] - denseSpec.begin[i];
+ knownInterval = true;
+ }
+ else if (shrinkI) {
+ // The dimension is still known as 1 for the processingShape, but will be
+ // discarded for the final shape.
+ intervalLength = 1;
+ knownInterval = true;
+ }
+ else if (beginAndEndMasked) {
+ // Even if we don't have values for begin or end, we do know that this
+ // dimension covers the whole interval. If we have shape information for
+ // this dimension, that tells us the interval length.
+ if (dimI >= 0) {
+ if (denseSpec.strides[i] < 0) {
+ intervalLength = -dimI;
+ }
+ else {
+ intervalLength = dimI;
+ }
+ knownInterval = true;
+ }
+ }
+ if (knownInterval) {
+ var sizeI = void 0;
+ // Hold zero if the interval is degenerate, otherwise account for
+ // remainder
+ if (intervalLength === 0 ||
+ ((intervalLength < 0) !== (denseSpec.strides[i] < 0))) {
+ sizeI = 0;
+ }
+ else {
+ sizeI = Math.trunc(intervalLength / denseSpec.strides[i]) +
+ (intervalLength % denseSpec.strides[i] !== 0 ? 1 : 0);
+ }
+ processingShape.push(sizeI);
+ }
+ else {
+ processingShape.push(-1);
+ }
+ }
+ // Step 4: Compute the final shape
+ //
+ // newAxis will increase dimension by 1 (with a one-size dimension)
+ // slices like foo[3, ...] will reduce dimension by 1.
+ // This cannot be done earlier, because it depends on Step 3.
+ for (var denseDim = 0; denseDim < denseSpec.finalShapeGatherIndices.length; ++denseDim) {
+ var gatherIndex = denseSpec.finalShapeGatherIndices[denseDim];
+ if (gatherIndex >= 0) {
+ finalShape.push(processingShape[gatherIndex]);
+ }
+ else if (gatherIndex === NEW_AXIS) {
+ finalShape.push(1);
+ }
+ }
+ var finalShapeSparse = finalShape.filter(function (dim, i) { return denseSpec.finalShapeGatherIndices[i] !== NEW_AXIS; });
+ return {
+ finalShapeSparse: finalShapeSparse,
+ finalShape: finalShape,
+ isIdentity: isIdentity,
+ sliceDim0: sliceDim0,
+ isSimpleSlice: isSimpleSlice,
+ begin: denseSpec.begin,
+ end: denseSpec.end,
+ strides: denseSpec.strides
+ };
+ }
+ function buildDenseSpec(sparse, dense) {
+ dense.beginMask = 0;
+ dense.endMask = 0;
+ dense.shrinkAxisMask = 0;
+ var fullIndex = 0;
+ dense.beginValid = sparse.begin != null;
+ dense.endValid = sparse.end != null;
+ dense.begin = new Array(dense.dims);
+ dense.end = new Array(dense.dims);
+ dense.strides = new Array(dense.dims);
+ dense.finalShapeGatherIndices = [];
+ dense.finalShapeGatherIndicesSparse = [];
+ dense.inputShapeGatherIndicesSparse = new Array(dense.dims);
+ for (var i = 0; i < sparse.dims; i++) {
+ if ((1 << i) & sparse.ellipsisMask) {
+ // Only the bit that has ellipsis will fall in this condition.
+ // Expand the ellipsis into the appropriate indices
+ // Note: this only works because we guaranteed one ellipsis.
+ var nextIndex = Math.min(dense.dims - (sparse.dims - i) + 1 + sparse.numAddAxisAfterEllipsis, dense.dims);
+ for (; fullIndex < nextIndex; fullIndex++) {
+ // newAxis aren't real axis so you have to skip.
+ dense.begin[fullIndex] = 0;
+ dense.end[fullIndex] = 0;
+ dense.strides[fullIndex] = 1;
+ dense.beginMask |= (1 << fullIndex);
+ dense.endMask |= (1 << fullIndex);
+ dense.finalShapeGatherIndices.push(fullIndex);
+ dense.finalShapeGatherIndicesSparse.push(-1);
+ dense.inputShapeGatherIndicesSparse[fullIndex] = i;
+ }
+ }
+ else if ((1 << i) & sparse.newAxisMask) {
+ // Only the bit that has newAxis will fall in this condition.
+ dense.finalShapeGatherIndices.push(NEW_AXIS);
+ dense.finalShapeGatherIndicesSparse.push(-1);
+ }
+ else {
+ if (fullIndex === dense.begin.length) {
+ throw Error("Index out of range using input dim " + fullIndex + "; input " +
+ ("has only " + dense.dims + " dims, " + dense.begin.length + "."));
+ }
+ // Gather slicing spec into appropriate index.
+ if (sparse.begin != null) {
+ dense.begin[fullIndex] = sparse.begin[i];
+ }
+ if (sparse.end != null) {
+ dense.end[fullIndex] = sparse.end[i];
+ }
+ dense.strides[fullIndex] = sparse.strides[i];
+ if (sparse.beginMask & (1 << i)) {
+ dense.beginMask |= (1 << fullIndex);
+ }
+ if (sparse.endMask & (1 << i)) {
+ dense.endMask |= (1 << fullIndex);
+ }
+ // If shrink, record where to get the dimensionality from (i.e. newAxis)
+ // creates a fake 1 size dimension. Also remember shrink axis (now in
+ // dense form) so we can ignore dense.end below.
+ if (sparse.shrinkAxisMask & (1 << i)) {
+ dense.finalShapeGatherIndices.push(SHRINK_AXIS);
+ dense.finalShapeGatherIndicesSparse.push(-1);
+ dense.shrinkAxisMask |= (1 << fullIndex);
+ }
+ else {
+ dense.finalShapeGatherIndices.push(fullIndex);
+ // Remember that where in the sparse shape the dense dim comes from.
+ dense.finalShapeGatherIndicesSparse.push(i);
+ }
+ dense.inputShapeGatherIndicesSparse[fullIndex] = i;
+ fullIndex++;
+ }
+ }
+ }
+ function canonical(x, c, strideI, dimI, masks, validRange) {
+ if (masks[c]) {
+ return strideI > 0 ? validRange[c] : validRange[(c + 1) & 1];
+ }
+ else {
+ var xFwd = x < 0 ? dimI + x : x; // make negative indices positive
+ return xFwd < validRange[0] ? validRange[0] :
+ xFwd > validRange[1] ? validRange[1] : xFwd;
+ }
+ }
+
+ var slice_util = {
+ __proto__: null,
+ assertParamsValid: assertParamsValid,
+ maskToAxes: maskToAxes,
+ computeOutShape: computeOutShape$2,
+ stridesWithElidedDims: stridesWithElidedDims,
+ getNormalizedAxes: getNormalizedAxes,
+ startIndicesWithElidedDims: startIndicesWithElidedDims,
+ stopIndicesWithElidedDims: stopIndicesWithElidedDims,
+ stridesForAxis: stridesForAxis,
+ startForAxis: startForAxis,
+ stopForAxis: stopForAxis,
+ isSliceContinous: isSliceContinous,
+ computeFlatOffset: computeFlatOffset,
+ parseSliceParams: parseSliceParams,
+ sliceInfo: sliceInfo
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Serializable defines the serialization contract.
+ *
+ * TFJS requires serializable classes to return their className when asked
+ * to avoid issues with minification.
+ */
+ var Serializable = /** @class */ (function () {
+ function Serializable() {
+ }
+ /**
+ * Return the class name for this class to use in serialization contexts.
+ *
+ * Generally speaking this will be the same thing that constructor.name
+ * would have returned. However, the class name needs to be robust
+ * against minification for serialization/deserialization to work properly.
+ *
+ * There's also places such as initializers.VarianceScaling, where
+ * implementation details between different languages led to different
+ * class hierarchies and a non-leaf node is used for serialization purposes.
+ */
+ Serializable.prototype.getClassName = function () {
+ return this.constructor
+ .className;
+ };
+ /**
+ * Creates an instance of T from a ConfigDict.
+ *
+ * This works for most descendants of serializable. A few need to
+ * provide special handling.
+ * @param cls A Constructor for the class to instantiate.
+ * @param config The Configuration for the object.
+ */
+ /** @nocollapse */
+ Serializable.fromConfig = function (cls, config) {
+ return new cls(config);
+ };
+ return Serializable;
+ }());
+ /**
+ * Maps string keys to class constructors.
+ *
+ * Used during (de)serialization from the cross-language JSON format, which
+ * requires the class name in the serialization format matches the class
+ * names as used in Python, should it exist.
+ */
+ var SerializationMap = /** @class */ (function () {
+ function SerializationMap() {
+ this.classNameMap = {};
+ }
+ /**
+ * Returns the singleton instance of the map.
+ */
+ SerializationMap.getMap = function () {
+ if (SerializationMap.instance == null) {
+ SerializationMap.instance = new SerializationMap();
+ }
+ return SerializationMap.instance;
+ };
+ /**
+ * Registers the class as serializable.
+ */
+ SerializationMap.register = function (cls) {
+ SerializationMap.getMap().classNameMap[cls.className] =
+ [cls, cls.fromConfig];
+ };
+ return SerializationMap;
+ }());
+ /**
+ * Register a class with the serialization map of TensorFlow.js.
+ *
+ * This is often used for registering custom Layers, so they can be
+ * serialized and deserialized.
+ *
+ * Example:
+ *
+ * ```js
+ * class MyCustomLayer extends tf.layers.Layer {
+ * static className = 'MyCustomLayer';
+ *
+ * constructor(config) {
+ * super(config);
+ * }
+ * }
+ * tf.serialization.registerClass(MyCustomLayer);
+ * ```
+ *
+ * @param cls The class to be registered. It must have a public static member
+ * called `className` defined and the value must be a non-empty string.
+ *
+ * @doc {heading: 'Models', subheading: 'Serialization', ignoreCI: true}
+ */
+ function registerClass(cls) {
+ assert(cls.className != null, function () { return "Class being registered does not have the static className " +
+ "property defined."; });
+ assert(typeof cls.className === 'string', function () { return "className is required to be a string, but got type " +
+ typeof cls.className; });
+ assert(cls.className.length > 0, function () { return "Class being registered has an empty-string as its className, " +
+ "which is disallowed."; });
+ SerializationMap.register(cls);
+ }
+
+ var serialization = {
+ __proto__: null,
+ Serializable: Serializable,
+ SerializationMap: SerializationMap,
+ registerClass: registerClass
+ };
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var TEST_EPSILON_FLOAT32 = 1e-3;
+ var TEST_EPSILON_FLOAT16 = 1e-1;
+ function expectArraysClose(actual, expected, epsilon) {
+ if (epsilon == null) {
+ epsilon = testEpsilon();
+ }
+ return expectArraysPredicate(actual, expected, function (a, b) { return areClose(a, b, epsilon); });
+ }
+ function testEpsilon() {
+ return ENGINE.backend.floatPrecision() === 32 ? TEST_EPSILON_FLOAT32 :
+ TEST_EPSILON_FLOAT16;
+ }
+ function expectArraysPredicate(actual, expected, predicate) {
+ var checkClassType = true;
+ if (isTypedArray(actual) || isTypedArray(expected)) {
+ checkClassType = false;
+ }
+ if (isTypedArray(actual) && isTypedArray(expected)) {
+ checkClassType = true;
+ }
+ if (checkClassType) {
+ var aType = actual.constructor.name;
+ var bType = expected.constructor.name;
+ if (aType !== bType) {
+ throw new Error("Arrays are of different type. Actual: " + aType + ". " +
+ ("Expected: " + bType));
+ }
+ }
+ if (Array.isArray(actual) && Array.isArray(expected)) {
+ var actualShape = inferShape(actual);
+ var expectedShape = inferShape(expected);
+ if (!arraysEqual(actualShape, expectedShape)) {
+ throw new Error("Arrays have different shapes. " +
+ ("Actual: [" + actualShape + "]. Expected: [" + expectedShape + "]"));
+ }
+ }
+ var actualFlat = isTypedArray(actual) ? actual : flatten(actual);
+ var expectedFlat = isTypedArray(expected) ?
+ expected :
+ flatten(expected);
+ if (actualFlat.length !== expectedFlat.length) {
+ throw new Error("Arrays have different lengths actual: " + actualFlat.length + " vs " +
+ ("expected: " + expectedFlat.length + ".\n") +
+ ("Actual: " + actualFlat + ".\n") +
+ ("Expected: " + expectedFlat + "."));
+ }
+ for (var i = 0; i < expectedFlat.length; ++i) {
+ var a = actualFlat[i];
+ var e = expectedFlat[i];
+ if (!predicate(a, e)) {
+ throw new Error("Arrays differ: actual[" + i + "] = " + a + ", expected[" + i + "] = " + e + ".\n" +
+ ("Actual: " + actualFlat + ".\n") +
+ ("Expected: " + expectedFlat + "."));
+ }
+ }
+ }
+ function expectPromiseToFail(fn, done) {
+ fn().then(function () { return done.fail(); }, function () { return done(); });
+ }
+ function expectArraysEqual(actual, expected) {
+ var exp = typeof expected === 'string' || typeof expected === 'number' ||
+ typeof expected === 'boolean' ?
+ [expected] :
+ expected;
+ if (isString(actual) || isString(actual[0]) ||
+ isString(expected) || isString(expected[0])) {
+ // tslint:disable-next-line: triple-equals
+ return expectArraysPredicate(actual, exp, function (a, b) { return a == b; });
+ }
+ return expectArraysPredicate(actual, expected, function (a, b) { return areClose(a, b, 0); });
+ }
+ function expectNumbersClose(a, e, epsilon) {
+ if (epsilon == null) {
+ epsilon = testEpsilon();
+ }
+ if (!areClose(a, e, epsilon)) {
+ throw new Error("Numbers differ: actual === " + a + ", expected === " + e);
+ }
+ }
+ function areClose(a, e, epsilon) {
+ if (!isFinite(a) && !isFinite(e)) {
+ return true;
+ }
+ if (isNaN(a) || isNaN(e) || Math.abs(a - e) > epsilon) {
+ return false;
+ }
+ return true;
+ }
+ function expectValuesInRange(actual, low, high) {
+ for (var i = 0; i < actual.length; i++) {
+ if (actual[i] < low || actual[i] > high) {
+ throw new Error("Value out of range:" + actual[i] + " low: " + low + ", high: " + high);
+ }
+ }
+ }
+ function expectArrayBuffersEqual(actual, expected) {
+ // Safari & Jasmine don't like comparing ArrayBuffers directly. Wrapping in
+ // a Float32Array solves this issue.
+ expect(new Float32Array(actual)).toEqual(new Float32Array(expected));
+ }
+ /** Encodes strings into utf-8 bytes. */
+ function encodeStrings(a) {
+ for (var i = 0; i < a.length; i++) {
+ var val = a[i];
+ if (Array.isArray(val)) {
+ encodeStrings(val);
+ }
+ else {
+ a[i] = encodeString(val);
+ }
+ }
+ return a;
+ }
+
+ var test_util = {
+ __proto__: null,
+ TEST_EPSILON_FLOAT16: TEST_EPSILON_FLOAT16,
+ expectArraysClose: expectArraysClose,
+ testEpsilon: testEpsilon,
+ expectPromiseToFail: expectPromiseToFail,
+ expectArraysEqual: expectArraysEqual,
+ expectNumbersClose: expectNumbersClose,
+ expectValuesInRange: expectValuesInRange,
+ expectArrayBuffersEqual: expectArrayBuffersEqual,
+ encodeStrings: encodeStrings
+ };
+
+ /** @license See the LICENSE file. */
+ // This code is auto-generated, do not modify this file!
+ var version = '3.12.0';
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Enables production mode which disables correctness checks in favor of
+ * performance.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ function enableProdMode() {
+ env().set('PROD', true);
+ }
+ /**
+ * Enables debug mode which will log information about all executed kernels:
+ * the elapsed time of the kernel execution, as well as the rank, shape, and
+ * size of the output tensor.
+ *
+ * Debug mode will significantly slow down your application as it will
+ * download the result of every operation to the CPU. This should not be used in
+ * production. Debug mode does not affect the timing information of the kernel
+ * execution as we do not measure download time in the kernel execution time.
+ *
+ * See also: `tf.profile`, `tf.memory`.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ function enableDebugMode() {
+ env().set('DEBUG', true);
+ }
+ /** Globally disables deprecation warnings */
+ function disableDeprecationWarnings() {
+ env().set('DEPRECATION_WARNINGS_ENABLED', false);
+ console.warn("TensorFlow.js deprecation warnings have been disabled.");
+ }
+ /** Warn users about deprecated functionality. */
+ function deprecationWarn(msg) {
+ if (env().getBool('DEPRECATION_WARNINGS_ENABLED')) {
+ console.warn(msg + ' You can disable deprecation warnings with ' +
+ 'tf.disableDeprecationWarnings().');
+ }
+ }
+ /**
+ * Dispose all variables kept in backend engine.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ function disposeVariables() {
+ ENGINE.disposeVariables();
+ }
+ /**
+ * It returns the global engine that keeps track of all tensors and backends.
+ *
+ * @doc {heading: 'Environment'}
+ */
+ function engine() {
+ return ENGINE;
+ }
+ /**
+ * Returns memory info at the current time in the program. The result is an
+ * object with the following properties:
+ *
+ * - `numBytes`: Number of bytes allocated (undisposed) at this time.
+ * - `numTensors`: Number of unique tensors allocated.
+ * - `numDataBuffers`: Number of unique data buffers allocated
+ * (undisposed) at this time, which is ≤ the number of tensors
+ * (e.g. `a.reshape(newShape)` makes a new Tensor that shares the same
+ * data buffer with `a`).
+ * - `unreliable`: True if the memory usage is unreliable. See `reasons` when
+ * `unreliable` is true.
+ * - `reasons`: `string[]`, reasons why the memory is unreliable, present if
+ * `unreliable` is true.
+ *
+ * WebGL Properties:
+ * - `numBytesInGPU`: Number of bytes allocated (undisposed) in the GPU only at
+ * this time.
+ *
+ * @doc {heading: 'Performance', subheading: 'Memory'}
+ */
+ function memory() {
+ return ENGINE.memory();
+ }
+ /**
+ * Executes the provided function `f()` and returns a promise that resolves
+ * with information about the function's memory use:
+ * - `newBytes`: the number of new bytes allocated
+ * - `newTensors`: the number of new tensors created
+ * - `peakBytes`: the peak number of bytes allocated
+ * - `kernels`: an array of objects for each kernel involved that reports
+ * their input and output shapes, number of bytes used, and number of new
+ * tensors created.
+ * - `kernelNames`: an array of unique strings with just the names of the
+ * kernels in the `kernels` array.
+ *
+ * ```js
+ * const profile = await tf.profile(() => {
+ * const x = tf.tensor1d([1, 2, 3]);
+ * let x2 = x.square();
+ * x2.dispose();
+ * x2 = x.square();
+ * x2.dispose();
+ * return x;
+ * });
+ *
+ * console.log(`newBytes: ${profile.newBytes}`);
+ * console.log(`newTensors: ${profile.newTensors}`);
+ * console.log(`byte usage over all kernels: ${profile.kernels.map(k =>
+ * k.totalBytesSnapshot)}`);
+ * ```
+ *
+ *
+ * @doc {heading: 'Performance', subheading: 'Profile'}
+ */
+ function profile(f) {
+ return ENGINE.profile(f);
+ }
+ /**
+ * Executes the provided function `fn` and after it is executed, cleans up all
+ * intermediate tensors allocated by `fn` except those returned by `fn`.
+ * `fn` must not return a Promise (async functions not allowed). The returned
+ * result can be a complex object.
+ *
+ * Using this method helps avoid memory leaks. In general, wrap calls to
+ * operations in `tf.tidy` for automatic memory cleanup.
+ *
+ * NOTE: Variables do *not* get cleaned up when inside a tidy(). If you want to
+ * dispose variables, please use `tf.disposeVariables` or call dispose()
+ * directly on variables.
+ *
+ * ```js
+ * // y = 2 ^ 2 + 1
+ * const y = tf.tidy(() => {
+ * // a, b, and one will be cleaned up when the tidy ends.
+ * const one = tf.scalar(1);
+ * const a = tf.scalar(2);
+ * const b = a.square();
+ *
+ * console.log('numTensors (in tidy): ' + tf.memory().numTensors);
+ *
+ * // The value returned inside the tidy function will return
+ * // through the tidy, in this case to the variable y.
+ * return b.add(one);
+ * });
+ *
+ * console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
+ * y.print();
+ * ```
+ *
+ * @param nameOrFn The name of the closure, or the function to execute.
+ * If a name is provided, the 2nd argument should be the function.
+ * If debug mode is on, the timing and the memory usage of the function
+ * will be tracked and displayed on the console using the provided name.
+ * @param fn The function to execute.
+ *
+ * @doc {heading: 'Performance', subheading: 'Memory'}
+ */
+ function tidy(nameOrFn, fn) {
+ return ENGINE.tidy(nameOrFn, fn);
+ }
+ /**
+ * Disposes any `tf.Tensor`s found within the provided object.
+ *
+ * @param container an object that may be a `tf.Tensor` or may directly
+ * contain `tf.Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. If
+ * the object is not a `tf.Tensor` or does not contain `Tensors`, nothing
+ * happens. In general it is safe to pass any object here, except that
+ * `Promise`s are not supported.
+ *
+ * @doc {heading: 'Performance', subheading: 'Memory'}
+ */
+ function dispose(container) {
+ var tensors = getTensorsInContainer(container);
+ tensors.forEach(function (tensor) { return tensor.dispose(); });
+ }
+ /**
+ * Keeps a `tf.Tensor` generated inside a `tf.tidy` from being disposed
+ * automatically.
+ *
+ * ```js
+ * let b;
+ * const y = tf.tidy(() => {
+ * const one = tf.scalar(1);
+ * const a = tf.scalar(2);
+ *
+ * // b will not be cleaned up by the tidy. a and one will be cleaned up
+ * // when the tidy ends.
+ * b = tf.keep(a.square());
+ *
+ * console.log('numTensors (in tidy): ' + tf.memory().numTensors);
+ *
+ * // The value returned inside the tidy function will return
+ * // through the tidy, in this case to the variable y.
+ * return b.add(one);
+ * });
+ *
+ * console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
+ * console.log('y:');
+ * y.print();
+ * console.log('b:');
+ * b.print();
+ * ```
+ *
+ * @param result The tensor to keep from being disposed.
+ *
+ * @doc {heading: 'Performance', subheading: 'Memory'}
+ */
+ function keep(result) {
+ return ENGINE.keep(result);
+ }
+ /**
+ * Executes `f()` and returns a promise that resolves with timing
+ * information.
+ *
+ * The result is an object with the following properties:
+ *
+ * - `wallMs`: Wall execution time.
+ * - `kernelMs`: Kernel execution time, ignoring data transfer. If using the
+ * WebGL backend and the query timer extension is not available, this will
+ * return an error object.
+ * - On `WebGL` The following additional properties exist:
+ * - `uploadWaitMs`: CPU blocking time on texture uploads.
+ * - `downloadWaitMs`: CPU blocking time on texture downloads (readPixels).
+ *
+ * ```js
+ * const x = tf.randomNormal([20, 20]);
+ * const time = await tf.time(() => x.matMul(x));
+ *
+ * console.log(`kernelMs: ${time.kernelMs}, wallTimeMs: ${time.wallMs}`);
+ * ```
+ *
+ * @param f The function to execute and time.
+ *
+ * @doc {heading: 'Performance', subheading: 'Timing'}
+ */
+ function time(f) {
+ return ENGINE.time(f);
+ }
+ /**
+ * Sets the backend (cpu, webgl, wasm, etc) responsible for creating tensors and
+ * executing operations on those tensors. Returns a promise that resolves
+ * to a boolean if the backend initialization was successful.
+ *
+ * Note this disposes the current backend, if any, as well as any tensors
+ * associated with it. A new backend is initialized, even if it is of the
+ * same type as the previous one.
+ *
+ * @param backendName The name of the backend. Currently supports
+ * `'webgl'|'cpu'` in the browser, `'tensorflow'` under node.js
+ * (requires tfjs-node), and `'wasm'` (requires tfjs-backend-wasm).
+ *
+ * @doc {heading: 'Backends'}
+ */
+ function setBackend(backendName) {
+ return ENGINE.setBackend(backendName);
+ }
+ /**
+ * Returns a promise that resolves when the currently selected backend (or the
+ * highest priority one) has initialized. Await this promise when you are using
+ * a backend that has async initialization.
+ *
+ * @doc {heading: 'Backends'}
+ */
+ function ready() {
+ return ENGINE.ready();
+ }
+ /**
+ * Returns the current backend name (cpu, webgl, etc). The backend is
+ * responsible for creating tensors and executing operations on those tensors.
+ *
+ * @doc {heading: 'Backends'}
+ */
+ function getBackend() {
+ return ENGINE.backendName;
+ }
+ /**
+ * Removes a backend and the registered factory.
+ *
+ * @doc {heading: 'Backends'}
+ */
+ function removeBackend(name) {
+ ENGINE.removeBackend(name);
+ }
+ /**
+ * Finds the backend registered under the provided name. Returns null if the
+ * name is not in the registry, or the registration hasn't finished yet.
+ */
+ function findBackend(name) {
+ return ENGINE.findBackend(name);
+ }
+ /**
+ * Finds the backend factory registered under the provided name. Returns a
+ * function that produces a new backend when called. Returns null if the name
+ * is not in the registry.
+ */
+ function findBackendFactory(name) {
+ return ENGINE.findBackendFactory(name);
+ }
+ /**
+ * Registers a global backend. The registration should happen when importing
+ * a module file (e.g. when importing `backend_webgl.ts`), and is used for
+ * modular builds (e.g. custom tfjs bundle with only webgl support).
+ *
+ * @param factory The backend factory function. When called, it should
+ * return a backend instance, or a promise of an instance.
+ * @param priority The priority of the backend (higher = more important).
+ * In case multiple backends are registered, the priority is used to find
+ * the best backend. Defaults to 1.
+ * @return False if there is already a registered backend under this name, true
+ * if not.
+ *
+ * @doc {heading: 'Backends'}
+ */
+ function registerBackend(name, factory, priority) {
+ if (priority === void 0) { priority = 1; }
+ return ENGINE.registerBackend(name, factory, priority);
+ }
+ /**
+ * Gets the current backend. If no backends have been initialized, this will
+ * attempt to initialize the best backend. Will throw an error if the highest
+ * priority backend has async initialization, in which case, you should call
+ * 'await tf.ready()' before running other code.
+ *
+ * @doc {heading: 'Backends'}
+ */
+ function backend() {
+ return ENGINE.backend;
+ }
+ /**
+ * Sets the global platform.
+ *
+ * @param platformName The name of this platform.
+ * @param platform A platform implementation.
+ */
+ function setPlatform(platformName, platform) {
+ env().setPlatform(platformName, platform);
+ }
+
+ /**
+ * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
+ *
+ * a.add(b).print(); // or tf.add(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast add a with b.
+ * const a = tf.scalar(5);
+ * const b = tf.tensor1d([10, 20, 30, 40]);
+ *
+ * a.add(b).print(); // or tf.add(a, b)
+ * ```
+ * @param a The first `tf.Tensor` to add.
+ * @param b The second `tf.Tensor` to add. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function add_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'add');
+ var $b = convertToTensor(b, 'b', 'add');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Add, inputs);
+ }
+ var add = op({ add_: add_ });
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
+ * The result is rounded with floor function.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.floorDiv(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ *
+ * a.floorDiv(b).print(); // or tf.floorDiv(a, b)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function floorDiv_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'floorDiv');
+ var $b = convertToTensor(b, 'b', 'floorDiv');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(FloorDiv, inputs);
+ }
+ var floorDiv = op({ floorDiv_: floorDiv_ });
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.div(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ *
+ * a.div(b).print(); // or tf.div(a, b)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function div_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'div');
+ var $b = convertToTensor(b, 'b', 'div');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ if ($a.dtype === 'int32' && $b.dtype === 'int32') {
+ return floorDiv($a, $b);
+ }
+ var inputs = { a: $a, b: $b };
+ var attrs = {};
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(RealDiv, inputs, attrs);
+ }
+ var div = op({ div_: div_ });
+
+ /**
+ * Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.
+ *
+ * We also expose `tf.mulStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.tensor1d([2, 3, 4, 5]);
+ *
+ * a.mul(b).print(); // or tf.mul(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast mul a with b.
+ * const a = tf.tensor1d([1, 2, 3, 4]);
+ * const b = tf.scalar(5);
+ *
+ * a.mul(b).print(); // or tf.mul(a, b)
+ * ```
+ * @param a The first tensor to multiply.
+ * @param b The second tensor to multiply. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function mul_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'mul');
+ var $b = convertToTensor(b, 'b', 'mul');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Multiply, inputs);
+ }
+ var mul = op({ mul_: mul_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes absolute value element-wise: `abs(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.abs().print(); // or tf.abs(x)
+ * ```
+ * @param x The input `tf.Tensor`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function abs_(x) {
+ var $x = convertToTensor(x, 'x', 'abs');
+ if ($x.dtype === 'complex64') {
+ var inputs = { x: $x };
+ return ENGINE.runKernel(ComplexAbs, inputs);
+ }
+ else {
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Abs, inputs);
+ }
+ }
+ var abs = op({ abs_: abs_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes acos of the input `tf.Tensor` element-wise: `acos(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.acos().print(); // or tf.acos(x)
+ * ```
+ * @param x The input tensor.
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function acos_(x) {
+ var $x = convertToTensor(x, 'x', 'acos');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Acos, inputs);
+ }
+ var acos = op({ acos_: acos_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise:
+ * `acosh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([10, 1, 3, 5.7]);
+ *
+ * x.acosh().print(); // or tf.acosh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function acosh_(x) {
+ var $x = convertToTensor(x, 'x', 'acosh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Acosh, inputs);
+ }
+ var acosh = op({ acosh_: acosh_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ *
+ * tf.addN([a, b, c]).print();
+ * ```
+ * @param tensors A list of tensors with the same shape and dtype.
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function addN_(tensors) {
+ assert(Array.isArray(tensors), function () { return 'The argument passed to tf.addN() must be a list of tensors'; });
+ assert(tensors.length >= 1, function () { return "Must pass at least one tensor to tf.addN(), but got " +
+ ("" + tensors.length); });
+ var $tensors = tensors.map(function (t, i) { return convertToTensor(t, "tensors" + i, 'addN'); });
+ var firstTensor = $tensors[0];
+ $tensors.forEach(function (t) {
+ if (t.dtype !== firstTensor.dtype) {
+ throw new Error('All tensors passed to tf.addN() must have the same dtype');
+ }
+ });
+ $tensors.forEach(function (t) {
+ if (!arraysEqual(t.shape, firstTensor.shape)) {
+ throw new Error('All tensors passed to tf.addN() must have the same shape');
+ }
+ });
+ var inputs = $tensors;
+ return ENGINE.runKernel(AddN, inputs);
+ }
+ var addN = op({ addN_: addN_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the logical and of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 1, 1], 'bool');
+ *
+ * x.all().print(); // or tf.all(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
+ *
+ * const axis = 1;
+ * x.all(axis).print(); // or tf.all(x, axis)
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype bool.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function all_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'all', 'bool');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(All, inputs, attrs);
+ }
+ var all = op({ all_: all_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the logical or of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 1, 1], 'bool');
+ *
+ * x.any().print(); // or tf.any(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
+ *
+ * const axis = 1;
+ * x.any(axis).print(); // or tf.any(x, axis)
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype bool.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function any_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'any', 'bool');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Any, inputs, attrs);
+ }
+ // tslint:disable-next-line:variable-name
+ var any = op({ any_: any_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the indices of the maximum values along an `axis`.
+ *
+ * The result has the same shape as `input` with the dimension along `axis`
+ * removed.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.argMax().print(); // or tf.argMax(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
+ *
+ * const axis = 1;
+ * x.argMax(axis).print(); // or tf.argMax(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function argMax_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'argMax');
+ var inputs = { x: $x };
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(ArgMax, inputs, attrs);
+ }
+ var argMax = op({ argMax_: argMax_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the indices of the minimum values along an `axis`.
+ *
+ * The result has the same shape as `input` with the dimension along `axis`
+ * removed.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.argMin().print(); // or tf.argMin(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
+ *
+ * const axis = 1;
+ * x.argMin(axis).print(); // or tf.argMin(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function argMin_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'argMin');
+ var inputs = { x: $x };
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(ArgMin, inputs, attrs);
+ }
+ var argMin = op({ argMin_: argMin_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes asin of the input `tf.Tensor` element-wise: `asin(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.asin().print(); // or tf.asin(x)
+ * ```
+ * @param x The input tensor.
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function asin_(x) {
+ var $x = convertToTensor(x, 'x', 'asin');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Asin, inputs);
+ }
+ var asin = op({ asin_: asin_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise:
+ * `asinh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.asinh().print(); // or tf.asinh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function asinh_(x) {
+ var $x = convertToTensor(x, 'x', 'asinh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Asinh, inputs);
+ }
+ var asinh = op({ asinh_: asinh_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes atan of the input `tf.Tensor` element-wise: `atan(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.atan().print(); // or tf.atan(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function atan_(x) {
+ var $x = convertToTensor(x, 'x', 'atan');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Atan, inputs);
+ }
+ var atan = op({ atan_: atan_ });
+
+ /**
+ * Computes arctangent of `tf.Tensor`s a / b element-wise: `atan2(a, b)`.
+ * Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1.0, 1.0, -1.0, .7]);
+ * const b = tf.tensor1d([2.0, 13.0, 3.5, .21]);
+ *
+ * tf.atan2(a, b).print()
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function atan2_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'atan2');
+ var $b = convertToTensor(b, 'b', 'atan2');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Atan2, inputs);
+ }
+ var atan2 = op({ atan2_: atan2_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise:
+ * `atanh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, .1, -.1, .7]);
+ *
+ * x.atanh().print(); // or tf.atanh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function atanh_(x) {
+ var $x = convertToTensor(x, 'x', 'atanh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Atanh, inputs);
+ }
+ var atanh = op({ atanh_: atanh_ });
+
+ /**
+ *
+ * @param inputShape Input tensor shape is of the following dimensions:
+ * `[batch, height, width, inChannels]`.
+ * @param filterShape The filter shape is of the following dimensions:
+ * `[filterHeight, filterWidth, depth]`.
+ * @param strides The strides of the sliding window for each dimension of the
+ * input tensor: `[strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat The data format of the input and output data.
+ * Defaults to 'NHWC'.
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`.
+ * Defaults to `[1, 1]`. If `dilations` is a single number, then
+ * `dilationHeight == dilationWidth`.
+ */
+ function computeDilation2DInfo(inputShape, filterShape, strides, pad, dataFormat, dilations) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ // `computerConv2DInfo` require filterShape to be in the dimension of:
+ // `[filterHeight, filterWidth, depth, outDepth]`, dilation2d doesn't have
+ // outDepth, it should have the same depth as the input.
+ // Input shape: [batch, height, width, inChannels]
+ var inputChannels = inputShape[3];
+ var $filterShape = __spread(filterShape, [inputChannels]);
+ var $dataFormat = convertConv2DDataFormat(dataFormat);
+ return computeConv2DInfo(inputShape, $filterShape, strides, dilations, pad, null /* roundingMode */, null /* depthWise */, $dataFormat);
+ }
+ function computePool2DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'channelsLast'; }
+ var _a = __read(parseTupleParam(filterSize), 2), filterHeight = _a[0], filterWidth = _a[1];
+ var filterShape;
+ if (dataFormat === 'channelsLast') {
+ filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]];
+ }
+ else if (dataFormat === 'channelsFirst') {
+ filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]];
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ return computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, false, dataFormat);
+ }
+ /**
+ * Computes the information for a forward pass of a pooling3D operation.
+ */
+ function computePool3DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ var _a = __read(parse3TupleParam(filterSize), 3), filterDepth = _a[0], filterHeight = _a[1], filterWidth = _a[2];
+ var filterShape;
+ var $dataFormat;
+ if (dataFormat === 'NDHWC') {
+ $dataFormat = 'channelsLast';
+ filterShape =
+ [filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]];
+ }
+ else if (dataFormat === 'NCDHW') {
+ $dataFormat = 'channelsFirst';
+ filterShape =
+ [filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]];
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ return computeConv3DInfo(inShape, filterShape, strides, dilations, pad, false, $dataFormat, roundingMode);
+ }
+ /**
+ * Computes the information for a forward pass of a convolution/pooling
+ * operation.
+ */
+ function computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, depthwise, dataFormat) {
+ var _a, _b;
+ if (depthwise === void 0) { depthwise = false; }
+ if (dataFormat === void 0) { dataFormat = 'channelsLast'; }
+ var _c = __read([-1, -1, -1, -1], 4), batchSize = _c[0], inHeight = _c[1], inWidth = _c[2], inChannels = _c[3];
+ if (dataFormat === 'channelsLast') {
+ _a = __read(inShape, 4), batchSize = _a[0], inHeight = _a[1], inWidth = _a[2], inChannels = _a[3];
+ }
+ else if (dataFormat === 'channelsFirst') {
+ _b = __read(inShape, 4), batchSize = _b[0], inChannels = _b[1], inHeight = _b[2], inWidth = _b[3];
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ var _d = __read(filterShape, 4), filterHeight = _d[0], filterWidth = _d[1], filterChannels = _d[3];
+ var _e = __read(parseTupleParam(strides), 2), strideHeight = _e[0], strideWidth = _e[1];
+ var _f = __read(parseTupleParam(dilations), 2), dilationHeight = _f[0], dilationWidth = _f[1];
+ var effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
+ var effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
+ var _g = getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat), padInfo = _g.padInfo, outHeight = _g.outHeight, outWidth = _g.outWidth;
+ var outChannels = depthwise ? filterChannels * inChannels : filterChannels;
+ var outShape;
+ if (dataFormat === 'channelsFirst') {
+ outShape = [batchSize, outChannels, outHeight, outWidth];
+ }
+ else if (dataFormat === 'channelsLast') {
+ outShape = [batchSize, outHeight, outWidth, outChannels];
+ }
+ return {
+ batchSize: batchSize,
+ dataFormat: dataFormat,
+ inHeight: inHeight,
+ inWidth: inWidth,
+ inChannels: inChannels,
+ outHeight: outHeight,
+ outWidth: outWidth,
+ outChannels: outChannels,
+ padInfo: padInfo,
+ strideHeight: strideHeight,
+ strideWidth: strideWidth,
+ filterHeight: filterHeight,
+ filterWidth: filterWidth,
+ effectiveFilterHeight: effectiveFilterHeight,
+ effectiveFilterWidth: effectiveFilterWidth,
+ dilationHeight: dilationHeight,
+ dilationWidth: dilationWidth,
+ inShape: inShape,
+ outShape: outShape,
+ filterShape: filterShape
+ };
+ }
+ /**
+ * Computes the information for a forward pass of a 3D convolution/pooling
+ * operation.
+ */
+ function computeConv3DInfo(inShape, filterShape, strides, dilations, pad, depthwise, dataFormat, roundingMode) {
+ var _a, _b;
+ if (depthwise === void 0) { depthwise = false; }
+ if (dataFormat === void 0) { dataFormat = 'channelsLast'; }
+ var _c = __read([-1, -1, -1, -1, -1], 5), batchSize = _c[0], inDepth = _c[1], inHeight = _c[2], inWidth = _c[3], inChannels = _c[4];
+ if (dataFormat === 'channelsLast') {
+ _a = __read(inShape, 5), batchSize = _a[0], inDepth = _a[1], inHeight = _a[2], inWidth = _a[3], inChannels = _a[4];
+ }
+ else if (dataFormat === 'channelsFirst') {
+ _b = __read(inShape, 5), batchSize = _b[0], inChannels = _b[1], inDepth = _b[2], inHeight = _b[3], inWidth = _b[4];
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ var _d = __read(filterShape, 5), filterDepth = _d[0], filterHeight = _d[1], filterWidth = _d[2], filterChannels = _d[4];
+ var _e = __read(parse3TupleParam(strides), 3), strideDepth = _e[0], strideHeight = _e[1], strideWidth = _e[2];
+ var _f = __read(parse3TupleParam(dilations), 3), dilationDepth = _f[0], dilationHeight = _f[1], dilationWidth = _f[2];
+ var effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth);
+ var effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
+ var effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
+ var _g = get3DPadAndOutInfo(pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, roundingMode), padInfo = _g.padInfo, outDepth = _g.outDepth, outHeight = _g.outHeight, outWidth = _g.outWidth;
+ var outChannels = depthwise ? filterChannels * inChannels : filterChannels;
+ var outShape;
+ if (dataFormat === 'channelsFirst') {
+ outShape = [batchSize, outChannels, outDepth, outHeight, outWidth];
+ }
+ else if (dataFormat === 'channelsLast') {
+ outShape = [batchSize, outDepth, outHeight, outWidth, outChannels];
+ }
+ return {
+ batchSize: batchSize,
+ dataFormat: dataFormat,
+ inDepth: inDepth,
+ inHeight: inHeight,
+ inWidth: inWidth,
+ inChannels: inChannels,
+ outDepth: outDepth,
+ outHeight: outHeight,
+ outWidth: outWidth,
+ outChannels: outChannels,
+ padInfo: padInfo,
+ strideDepth: strideDepth,
+ strideHeight: strideHeight,
+ strideWidth: strideWidth,
+ filterDepth: filterDepth,
+ filterHeight: filterHeight,
+ filterWidth: filterWidth,
+ effectiveFilterDepth: effectiveFilterDepth,
+ effectiveFilterHeight: effectiveFilterHeight,
+ effectiveFilterWidth: effectiveFilterWidth,
+ dilationDepth: dilationDepth,
+ dilationHeight: dilationHeight,
+ dilationWidth: dilationWidth,
+ inShape: inShape,
+ outShape: outShape,
+ filterShape: filterShape
+ };
+ }
+ function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) {
+ if (zeroPad == null) {
+ zeroPad = computeDefaultPad(inShape, fieldSize, stride);
+ }
+ var inputRows = inShape[0];
+ var inputCols = inShape[1];
+ var outputRows = round$1((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ var outputCols = round$1((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ return [outputRows, outputCols];
+ }
+ function computeOutputShape4D(inShape, fieldSize, outChannels, stride, zeroPad, roundingMode) {
+ if (zeroPad == null) {
+ zeroPad = computeDefaultPad(inShape, fieldSize, stride);
+ }
+ var inputDepth = inShape[0];
+ var inputRows = inShape[1];
+ var inputCols = inShape[2];
+ var outputDepths = round$1((inputDepth - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ var outputRows = round$1((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ var outputCols = round$1((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
+ return [outputDepths, outputRows, outputCols, outChannels];
+ }
+ function computeDefaultPad(inputShape, fieldSize, stride, dilation) {
+ if (dilation === void 0) { dilation = 1; }
+ var effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation);
+ return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2);
+ }
+ function parseTupleParam(param) {
+ if (typeof param === 'number') {
+ return [param, param, param];
+ }
+ if (param.length === 2) {
+ return [param[0], param[1], 1];
+ }
+ return param;
+ }
+ function parse3TupleParam(param) {
+ return typeof param === 'number' ? [param, param, param] : param;
+ }
+ /* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d
+ * Atrous convolution is equivalent to standard convolution with upsampled
+ * filters with effective_filter_height =
+ * filter_height + (filter_height - 1) * (dilation - 1)
+ * and effective_filter_width =
+ * filter_width + (filter_width - 1) * (dilation - 1),
+ * produced by inserting dilation - 1 zeros along consecutive elements across
+ * the filters' spatial dimensions.
+ * When there is a dilation, this converts a filter dimension to the
+ * effective filter dimension, so it can be used in a standard convolution.
+ */
+ function getEffectiveFilterSize(filterSize, dilation) {
+ if (dilation <= 1) {
+ return filterSize;
+ }
+ return filterSize + (filterSize - 1) * (dilation - 1);
+ }
+ function getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, filterHeight, filterWidth, roundingMode, dataFormat) {
+ var padInfo;
+ var outHeight;
+ var outWidth;
+ if (typeof pad === 'number') {
+ var padType = (pad === 0) ? 'VALID' : 'NUMBER';
+ padInfo = { top: pad, bottom: pad, left: pad, right: pad, type: padType };
+ var outShape = computeOutputShape2D([inHeight, inWidth], filterHeight, strideHeight, pad, roundingMode);
+ outHeight = outShape[0];
+ outWidth = outShape[1];
+ }
+ else if (pad === 'same') {
+ outHeight = Math.ceil(inHeight / strideHeight);
+ outWidth = Math.ceil(inWidth / strideWidth);
+ var padAlongHeight = Math.max(0, (outHeight - 1) * strideHeight + filterHeight - inHeight);
+ var padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth);
+ var top = Math.floor(padAlongHeight / 2);
+ var bottom = padAlongHeight - top;
+ var left = Math.floor(padAlongWidth / 2);
+ var right = padAlongWidth - left;
+ padInfo = { top: top, bottom: bottom, left: left, right: right, type: 'SAME' };
+ }
+ else if (pad === 'valid') {
+ padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: 'VALID' };
+ outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
+ outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
+ }
+ else if (typeof pad === 'object') {
+ var top = dataFormat === 'channelsLast' ? pad[1][0] : pad[2][0];
+ var bottom = dataFormat === 'channelsLast' ? pad[1][1] : pad[2][1];
+ var left = dataFormat === 'channelsLast' ? pad[2][0] : pad[3][0];
+ var right = dataFormat === 'channelsLast' ? pad[2][1] : pad[3][1];
+ var padType = (top === 0 && bottom === 0 && left === 0 && right === 0) ?
+ 'VALID' :
+ 'EXPLICIT';
+ padInfo = { top: top, bottom: bottom, left: left, right: right, type: padType };
+ outHeight = round$1((inHeight - filterHeight + top + bottom) / strideHeight + 1, roundingMode);
+ outWidth = round$1((inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode);
+ }
+ else {
+ throw Error("Unknown padding parameter: " + pad);
+ }
+ return { padInfo: padInfo, outHeight: outHeight, outWidth: outWidth };
+ }
+ function get3DPadAndOutInfo(pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, filterDepth, filterHeight, filterWidth, roundingMode) {
+ var padInfo;
+ var outDepth;
+ var outHeight;
+ var outWidth;
+ if (typeof pad === 'number') {
+ var padType = (pad === 0) ? 'VALID' : 'NUMBER';
+ padInfo = {
+ top: pad,
+ bottom: pad,
+ left: pad,
+ right: pad,
+ front: pad,
+ back: pad,
+ type: padType
+ };
+ var outShape = computeOutputShape4D([inDepth, inHeight, inWidth, 1], filterDepth, 1, strideDepth, pad, roundingMode);
+ outDepth = outShape[0];
+ outHeight = outShape[1];
+ outWidth = outShape[2];
+ }
+ else if (pad === 'same') {
+ outDepth = Math.ceil(inDepth / strideDepth);
+ outHeight = Math.ceil(inHeight / strideHeight);
+ outWidth = Math.ceil(inWidth / strideWidth);
+ var padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth;
+ var padAlongHeight = (outHeight - 1) * strideHeight + filterHeight - inHeight;
+ var padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth;
+ var front = Math.floor(padAlongDepth / 2);
+ var back = padAlongDepth - front;
+ var top = Math.floor(padAlongHeight / 2);
+ var bottom = padAlongHeight - top;
+ var left = Math.floor(padAlongWidth / 2);
+ var right = padAlongWidth - left;
+ padInfo = { top: top, bottom: bottom, left: left, right: right, front: front, back: back, type: 'SAME' };
+ }
+ else if (pad === 'valid') {
+ padInfo = {
+ top: 0,
+ bottom: 0,
+ left: 0,
+ right: 0,
+ front: 0,
+ back: 0,
+ type: 'VALID'
+ };
+ outDepth = Math.ceil((inDepth - filterDepth + 1) / strideDepth);
+ outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
+ outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
+ }
+ else {
+ throw Error("Unknown padding parameter: " + pad);
+ }
+ return { padInfo: padInfo, outDepth: outDepth, outHeight: outHeight, outWidth: outWidth };
+ }
+ /**
+ * Rounds a value depending on the rounding mode
+ * @param value
+ * @param roundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function round$1(value, roundingMode) {
+ if (!roundingMode) {
+ return Math.trunc(value);
+ }
+ switch (roundingMode) {
+ case 'round':
+ // used for Caffe Conv
+ return Math.round(value);
+ case 'ceil':
+ // used for Caffe Pool
+ return Math.ceil(value);
+ case 'floor':
+ return Math.floor(value);
+ default:
+ throw new Error("Unknown roundingMode " + roundingMode);
+ }
+ }
+ function tupleValuesAreOne(param) {
+ var _a = __read(parseTupleParam(param), 3), dimA = _a[0], dimB = _a[1], dimC = _a[2];
+ return dimA === 1 && dimB === 1 && dimC === 1;
+ }
+ function eitherStridesOrDilationsAreOne(strides, dilations) {
+ return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations);
+ }
+ /**
+ * Convert Conv2D dataFormat from 'NHWC'|'NCHW' to
+ * 'channelsLast'|'channelsFirst'
+ * @param dataFormat in 'NHWC'|'NCHW' mode
+ * @return dataFormat in 'channelsLast'|'channelsFirst' mode
+ * @throws unknown dataFormat
+ */
+ function convertConv2DDataFormat(dataFormat) {
+ if (dataFormat === 'NHWC') {
+ return 'channelsLast';
+ }
+ else if (dataFormat === 'NCHW') {
+ return 'channelsFirst';
+ }
+ else {
+ throw new Error("Unknown dataFormat " + dataFormat);
+ }
+ }
+ /**
+ * Check validity of pad when using dimRoundingMode.
+ * @param opDesc A string of op description
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid` output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @throws unknown padding parameter
+ */
+ function checkPadOnDimRoundingMode(opDesc, pad, dimRoundingMode) {
+ if (dimRoundingMode != null) {
+ if (typeof pad === 'string') {
+ throw Error("Error in " + opDesc + ": pad must be an integer when using " +
+ ("dimRoundingMode " + dimRoundingMode + " but got pad " + pad + "."));
+ }
+ else if (typeof pad === 'number') {
+ assert(isInt(pad), function () { return "Error in " + opDesc + ": pad must be an integer when using " +
+ ("dimRoundingMode " + dimRoundingMode + " but got pad " + pad + "."); });
+ }
+ else if (typeof pad === 'object') {
+ pad.forEach(function (p) {
+ p.forEach(function (v) {
+ assert(isInt(v), function () { return "Error in " + opDesc + ": pad must be an integer when using " +
+ ("dimRoundingMode " + dimRoundingMode + " but got pad " + v + "."); });
+ });
+ });
+ }
+ else {
+ throw Error("Error in " + opDesc + ": Unknown padding parameter: " + pad);
+ }
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reshapes a `tf.Tensor` to a given shape.
+ *
+ * Given an input tensor, returns a new tensor with the same values as the
+ * input tensor with shape `shape`.
+ *
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component of
+ * shape can be -1.
+ *
+ * If shape is 1-D or higher, then the operation returns a tensor with shape
+ * shape filled with the values of tensor. In this case, the number of
+ * elements implied by shape must be the same as the number of elements in
+ * tensor.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * x.reshape([2, 2]).print();
+ * ```
+ *
+ * @param x The input tensor to be reshaped.
+ * @param shape An array of integers defining the output tensor shape.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function reshape_(x, shape) {
+ var $x = convertToTensor(x, 'x', 'reshape', 'string_or_numeric');
+ var inputs = { x: $x };
+ var attrs = { shape: shape };
+ return ENGINE.runKernel(Reshape, inputs, attrs);
+ }
+ var reshape = op({ reshape_: reshape_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 2D average pooling of an image.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function avgPool_(x, filterSize, strides, pad, dimRoundingMode) {
+ var $x = convertToTensor(x, 'x', 'avgPool', 'float32');
+ var dilations = 1;
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in avgPool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in avgPool: x must be rank 4 but got rank " + x4D.rank + "."; });
+ checkPadOnDimRoundingMode('avgPool', pad, dimRoundingMode);
+ var inputs = { x: x4D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(AvgPool, inputs, attrs);
+ res = cast(res, $x.dtype);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var avgPool = op({ avgPool_: avgPool_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 3D average pooling.
+ *
+ * ```js
+ * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);
+ * const result = tf.avgPool3d(x, 2, 1, 'valid');
+ * result.print();
+ * ```
+ *
+ * @param x The input tensor, of rank 5 or rank 4 of shape
+ * `[batch, depth, height, width, inChannels]`.
+ * @param filterSize The filter size:
+ * `[filterDepth, filterHeight, filterWidth]`.
+ * If `filterSize` is a single number,
+ * then `filterDepth == filterHeight == filterWidth`.
+ * @param strides The strides of the pooling:
+ * `[strideDepth, strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideDepth == strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function avgPool3d_(x, filterSize, strides, pad, dimRoundingMode, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ var $x = convertToTensor(x, 'x', 'avgPool3d', 'float32');
+ var x5D = $x;
+ var reshapedTo5D = false;
+ if ($x.rank === 4) {
+ reshapedTo5D = true;
+ x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);
+ }
+ assert(x5D.rank === 5, function () { return "Error in avgPool3d: x must be rank 5 but got rank " + x5D.rank + "."; });
+ assert(dataFormat === 'NDHWC', function () { return "Error in avgPool3d: Only NDHWC is currently supported, " +
+ ("but got dataFormat of " + dataFormat); });
+ checkPadOnDimRoundingMode('avgPool3d', pad, dimRoundingMode);
+ var inputs = { x: x5D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dataFormat: dataFormat };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(AvgPool3D, inputs, attrs);
+ res = cast(res, x5D.dtype);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var avgPool3d = op({ avgPool3d_: avgPool3d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Concatenates a list of `tf.Tensor`s along a given axis.
+ *
+ * The tensors ranks and types must match, and their sizes must match in all
+ * dimensions except `axis`.
+ *
+ * Also available are stricter rank-specific methods that assert that
+ * `tensors` are of the given rank:
+ * - `tf.concat1d`
+ * - `tf.concat2d`
+ * - `tf.concat3d`
+ * - `tf.concat4d`
+ *
+ * Except `tf.concat1d` (which does not have axis param), all methods have
+ * same signature as this method.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * a.concat(b).print(); // or a.concat(b)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.concat([a, b, c]).print();
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 2], [10, 20]]);
+ * const b = tf.tensor2d([[3, 4], [30, 40]]);
+ * const axis = 1;
+ * tf.concat([a, b], axis).print();
+ * ```
+ * @param tensors A list of tensors to concatenate.
+ * @param axis The axis to concate along. Defaults to 0 (the first dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function concat_(tensors, axis) {
+ if (axis === void 0) { axis = 0; }
+ assert(tensors.length >= 1, function () { return 'Pass at least one tensor to concat'; });
+ var $tensors = convertToTensorArray(tensors, 'tensors', 'concat', 'string_or_numeric');
+ if ($tensors[0].dtype === 'complex64') {
+ $tensors.forEach(function (tensor) {
+ if (tensor.dtype !== 'complex64') {
+ throw new Error("Cannot concatenate complex64 tensors with a tensor\n with dtype " + tensor.dtype + ". ");
+ }
+ });
+ }
+ if ($tensors.length === 1) {
+ return clone($tensors[0]);
+ }
+ var inputs = $tensors;
+ var attr = { axis: axis };
+ return ENGINE.runKernel(Concat, inputs, attr);
+ }
+ var concat = op({ concat_: concat_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes sigmoid element-wise, `1 / (1 + exp(-x))`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, -1, 2, -3]);
+ *
+ * x.sigmoid().print(); // or tf.sigmoid(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sigmoid_(x) {
+ var $x = convertToTensor(x, 'x', 'sigmoid', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sigmoid, inputs);
+ }
+ var sigmoid = op({ sigmoid_: sigmoid_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a slice from a `tf.Tensor` starting at coordinates `begin`
+ * and is of size `size`.
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that `x` is of the given rank:
+ * - `tf.slice1d`
+ * - `tf.slice2d`
+ * - `tf.slice3d`
+ * - `tf.slice4d`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.slice([1], [2]).print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * x.slice([1, 0], [1, 2]).print();
+ * ```
+ * @param x The input `tf.Tensor` to slice from.
+ * @param begin The coordinates to start the slice from. The length can be
+ * less than the rank of x - the rest of the axes will have implicit 0 as
+ * start. Can also be a single number, in which case it specifies the
+ * first axis.
+ * @param size The size of the slice. The length can be less than the rank of
+ * x - the rest of the axes will have implicit -1. A value of -1 requests
+ * the rest of the dimensions in the axis. Can also be a single number,
+ * in which case it specifies the size of the first axis.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function slice_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice', 'string_or_numeric');
+ if ($x.rank === 0) {
+ throw new Error('Slicing scalar is not possible');
+ }
+ var inputs = { x: $x };
+ var attrs = { begin: begin, size: size };
+ return ENGINE.runKernel(Slice, inputs, attrs);
+ }
+ var slice = op({ slice_: slice_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes hyperbolic tangent of the input `tf.Tensor` element-wise: `tanh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, 70]);
+ *
+ * x.tanh().print(); // or tf.tanh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function tanh_(x) {
+ var $x = convertToTensor(x, 'x', 'tanh', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Tanh, inputs);
+ }
+ var tanh = op({ tanh_: tanh_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the next state and output of a BasicLSTMCell.
+ *
+ * Returns `[newC, newH]`.
+ *
+ * Derived from tf.contrib.rnn.BasicLSTMCell.
+ *
+ * @param forgetBias Forget bias for the cell.
+ * @param lstmKernel The weights for the cell.
+ * @param lstmBias The bias for the cell.
+ * @param data The input to the cell.
+ * @param c Previous cell state.
+ * @param h Previous cell output.
+ *
+ * @doc {heading: 'Operations', subheading: 'RNN'}
+ */
+ function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) {
+ var $forgetBias = convertToTensor(forgetBias, 'forgetBias', 'basicLSTMCell');
+ var $lstmKernel = convertToTensor(lstmKernel, 'lstmKernel', 'basicLSTMCell');
+ var $lstmBias = convertToTensor(lstmBias, 'lstmBias', 'basicLSTMCell');
+ var $data = convertToTensor(data, 'data', 'basicLSTMCell');
+ var $c = convertToTensor(c, 'c', 'basicLSTMCell');
+ var $h = convertToTensor(h, 'h', 'basicLSTMCell');
+ var combined = concat([$data, $h], 1);
+ var weighted = matMul$1(combined, $lstmKernel);
+ var res = add(weighted, $lstmBias);
+ // i = input_gate, j = new_input, f = forget_gate, o = output_gate
+ var batchSize = res.shape[0];
+ var sliceCols = res.shape[1] / 4;
+ var sliceSize = [batchSize, sliceCols];
+ var i = slice(res, [0, 0], sliceSize);
+ var j = slice(res, [0, sliceCols], sliceSize);
+ var f = slice(res, [0, sliceCols * 2], sliceSize);
+ var o = slice(res, [0, sliceCols * 3], sliceSize);
+ var newC = add(mul(sigmoid(i), tanh(j)), mul($c, sigmoid(add($forgetBias, f))));
+ var newH = mul(tanh(newC), sigmoid(o));
+ return [newC, newH];
+ }
+ var basicLSTMCell = op({ basicLSTMCell_: basicLSTMCell_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
+ * shape `blockShape + [batch]`, interleaves these blocks back into the grid
+ * defined by the spatial dimensions `[1, ..., M]`, to obtain a result with
+ * the same rank as the input. The spatial dimensions of this intermediate
+ * result are then optionally cropped according to `crops` to produce the
+ * output. This is the reverse of `tf.spaceToBatchND`. See below for a precise
+ * description.
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]);
+ * const blockShape = [2, 2];
+ * const crops = [[0, 0], [0, 0]];
+ *
+ * x.batchToSpaceND(blockShape, crops).print();
+ * ```
+ *
+ * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
+ * remainingShape`, where spatialShape has `M` dimensions.
+ * @param blockShape A 1-D array. Must have shape `[M]`, all values must
+ * be >= 1.
+ * @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0.
+ * `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input
+ * dimension `i + 1`, which corresponds to spatial dimension `i`. It is required
+ * that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]`
+ *
+ * This operation is equivalent to the following steps:
+ *
+ * 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ...,
+ * blockShape[M-1], batch / prod(blockShape), x.shape[1], ...,
+ * x.shape[N-1]]`
+ *
+ * 2. Permute dimensions of `reshaped`to produce `permuted` of shape `[batch /
+ * prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M],
+ * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch /
+ * prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] *
+ * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted`
+ * according to `crops` to produce the output of shape: `[batch /
+ * prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1],
+ * ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] -
+ * crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]`
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function batchToSpaceND_(x, blockShape, crops) {
+ var $x = convertToTensor(x, 'x', 'batchToSpaceND');
+ var prod = blockShape.reduce(function (a, b) { return a * b; });
+ assert($x.rank >= 1 + blockShape.length, function () { return "input rank is " + $x.rank + " but should be > than blockShape.length " + blockShape.length; });
+ assert(crops.length === blockShape.length, function () { return "crops.length is " + crops.length + " but should be equal to blockShape.length " + blockShape.length; });
+ assert($x.shape[0] % prod === 0, function () { return "input tensor batch is " + $x.shape[0] + " but is not divisible by the product of " +
+ ("the elements of blockShape " + blockShape.join(' * ') + " === " + prod); });
+ var inputs = { x: $x };
+ var attrs = { blockShape: blockShape, crops: crops };
+ return ENGINE.runKernel(BatchToSpaceND, inputs, attrs);
+ }
+ var batchToSpaceND = op({ batchToSpaceND_: batchToSpaceND_ });
+
+ function xAs4D(x) {
+ var x4D;
+ if (x.rank === 0 || x.rank === 1) {
+ x4D = reshape(x, [1, 1, 1, x.size]);
+ }
+ else if (x.rank === 2) {
+ x4D = reshape(x, [1, 1, x.shape[0], x.shape[1]]);
+ }
+ else if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ else {
+ x4D = x;
+ }
+ return x4D;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Batch normalization.
+ *
+ * As described in
+ * [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167).
+ *
+ * Mean, variance, scale, and offset can be of two shapes:
+ * - The same shape as the input.
+ * - In the common case, the depth dimension is the last dimension of x, so
+ * the values would be an `tf.Tensor1D` of shape [depth].
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that parameters passed are of given rank
+ * - `tf.batchNorm2d`
+ * - `tf.batchNorm3d`
+ * - `tf.batchNorm4d`
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function batchNorm_(x, mean, variance, offset, scale, varianceEpsilon) {
+ if (varianceEpsilon == null) {
+ varianceEpsilon = 0.001;
+ }
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($mean.rank === $variance.rank, function () { return 'Batch normalization gradient requires mean and variance to have ' +
+ 'equal ranks.'; });
+ assert($offset == null || $mean.rank === $offset.rank, function () { return 'Batch normalization gradient requires mean and offset to have ' +
+ 'equal ranks.'; });
+ assert($scale == null || $mean.rank === $scale.rank, function () { return 'Batch normalization gradient requires mean and scale to have ' +
+ 'equal ranks.'; });
+ var x4D = xAs4D($x);
+ var inputs = {
+ x: x4D,
+ scale: $scale,
+ offset: $offset,
+ mean: $mean,
+ variance: $variance
+ };
+ var attrs = { varianceEpsilon: varianceEpsilon };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(FusedBatchNorm, inputs, attrs);
+ return reshape(res, $x.shape);
+ }
+ var batchNorm = op({ batchNorm_: batchNorm_ });
+
+ /**
+ * Batch normalization, strictly for 2D. For the more relaxed version, see
+ * `tf.batchNorm`.
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ */
+ function batchNorm2d_(x, mean, variance, offset, scale, varianceEpsilon) {
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($x.rank === 2, function () { return "Error in batchNorm2D: x must be rank 2 but got rank " +
+ ($x.rank + "."); });
+ assert($mean.rank === 2 || $mean.rank === 1, function () { return "Error in batchNorm2D: mean must be rank 2 or rank 1 but " +
+ ("got rank " + $mean.rank + "."); });
+ assert($variance.rank === 2 || $variance.rank === 1, function () { return "Error in batchNorm2D: variance must be rank 2 or rank 1 " +
+ ("but got rank " + $variance.rank + "."); });
+ if ($scale != null) {
+ assert($scale.rank === 2 || $scale.rank === 1, function () { return "Error in batchNorm2D: scale must be rank 2 or rank 1 " +
+ ("but got rank " + $scale.rank + "."); });
+ }
+ if ($offset != null) {
+ assert($offset.rank === 2 || $offset.rank === 1, function () { return "Error in batchNorm2D: offset must be rank 2 or rank 1 " +
+ ("but got rank " + $offset.rank + "."); });
+ }
+ return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);
+ }
+ var batchNorm2d = op({ batchNorm2d_: batchNorm2d_ });
+
+ /**
+ * Batch normalization, strictly for 3D. For the more relaxed version, see
+ * `tf.batchNorm`.
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ */
+ function batchNorm3d_(x, mean, variance, offset, scale, varianceEpsilon) {
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($x.rank === 3, function () { return "Error in batchNorm3D: x must be rank 3 but got rank " +
+ ($x.rank + "."); });
+ assert($mean.rank === 3 || $mean.rank === 1, function () { return "Error in batchNorm3D: mean must be rank 3 or rank 1 but " +
+ ("got rank " + $mean.rank + "."); });
+ assert($variance.rank === 3 || $variance.rank === 1, function () { return "Error in batchNorm3D: variance must be rank 3 or rank 1 " +
+ ("but got rank " + $variance.rank + "."); });
+ if ($scale != null) {
+ assert($scale.rank === 3 || $scale.rank === 1, function () { return "Error in batchNorm3D: scale must be rank 3 or rank 1 " +
+ ("but got rank " + $scale.rank + "."); });
+ }
+ if ($offset != null) {
+ assert($offset.rank === 3 || $offset.rank === 1, function () { return "Error in batchNorm3D: offset must be rank 3 or rank 1 " +
+ ("but got rank " + $offset.rank + "."); });
+ }
+ return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);
+ }
+ var batchNorm3d = op({ batchNorm3d_: batchNorm3d_ });
+
+ /**
+ * Batch normalization, strictly for 4D. For the more relaxed version, see
+ * `tf.batchNorm`.
+ *
+ * @param x The input Tensor.
+ * @param mean A mean Tensor.
+ * @param variance A variance Tensor.
+ * @param offset An offset Tensor.
+ * @param scale A scale Tensor.
+ * @param varianceEpsilon A small float number to avoid dividing by 0.
+ */
+ function batchNorm4d_(x, mean, variance, offset, scale, varianceEpsilon) {
+ var $x = convertToTensor(x, 'x', 'batchNorm');
+ var $mean = convertToTensor(mean, 'mean', 'batchNorm');
+ var $variance = convertToTensor(variance, 'variance', 'batchNorm');
+ var $scale;
+ if (scale != null) {
+ $scale = convertToTensor(scale, 'scale', 'batchNorm');
+ }
+ var $offset;
+ if (offset != null) {
+ $offset = convertToTensor(offset, 'offset', 'batchNorm');
+ }
+ assert($x.rank === 4, function () { return "Error in batchNorm4D: x must be rank 4 but got rank " +
+ ($x.rank + "."); });
+ assert($mean.rank === 4 || $mean.rank === 1, function () { return "Error in batchNorm4D: mean must be rank 4 or rank 1 but " +
+ ("got rank " + $mean.rank + "."); });
+ assert($variance.rank === 4 || $variance.rank === 1, function () { return "Error in batchNorm4D: variance must be rank 4 or rank 1 " +
+ ("but got rank " + $variance.rank + "."); });
+ if ($scale != null) {
+ assert($scale.rank === 4 || $scale.rank === 1, function () { return "Error in batchNorm4D: scale must be rank 4 or rank 1 " +
+ ("but got rank " + $scale.rank + "."); });
+ }
+ if ($offset != null) {
+ assert($offset.rank === 4 || $offset.rank === 1, function () { return "Error in batchNorm4D: offset must be rank 4 or rank 1 " +
+ ("but got rank " + $offset.rank + "."); });
+ }
+ return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);
+ }
+ var batchNorm4d = op({ batchNorm4d_: batchNorm4d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Outputs a vector with length `size` and the same dtype as `weights`.
+ *
+ * If `weights` are empty, then index `i` stores the number of times the value
+ * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the
+ * sum of the value in `weights` at each index where the corresponding value in
+ * `x` is `i`.
+ *
+ * Values in `x` outside of the range [0, size) are ignored.
+ *
+ * @param x The input int tensor, rank 1.
+ * @param weights The weights tensor, must have the same shape as x, or a
+ * length-0 Tensor, in which case it acts as all weights equal to 1.
+ * @param size Non-negative integer.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function bincount_(x, weights, size) {
+ var $x = convertToTensor(x, 'x', 'bincount');
+ var $weights = convertToTensor(weights, 'weights', 'bincount');
+ assert($x.dtype === 'int32', function () { return "Error in bincount: input " +
+ ("dtype must be int32, but got " + $x.dtype); });
+ assert(size >= 0, function () { return "size must be non-negative, but got " + size + "."; });
+ assert($weights.size === $x.size || $weights.size === 0, function () { return "Error in bincount: weights must have the same size as input or" +
+ ("0-length, but got input shape: " + $x.shape + ", weights shape: ") +
+ ($weights.shape + "."); });
+ var inputs = { x: $x, weights: $weights };
+ var attrs = { size: size };
+ return ENGINE.runKernel(Bincount, inputs, attrs);
+ }
+ var bincount = op({ bincount_: bincount_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Return the shape of s0 op s1 with broadcast.
+ *
+ * compute r0, the broadcasted shape as a tensor.
+ * s0, s1 and r0 are all integer vectors.
+ *
+ * This function returns the shape of the result of an operation between
+ * two tensors of size s0 and s1 performed with broadcast.
+ *
+ * @param s0 A tensor representing a shape
+ * @param s1 A tensor representing a shape
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function broadcastArgs_(s0, s1) {
+ var shape1Input = convertToTensor(s0, 's0', 'broadcastArgs', 'int32');
+ var shape2Input = convertToTensor(s1, 's1', 'broadcastArgs', 'int32');
+ if (shape1Input.rank !== 1) {
+ throw new Error('broadcastArgs(): first input must be a vector (rank=1). ' +
+ ("Has rank " + shape1Input.rank));
+ }
+ if (shape2Input.rank !== 1) {
+ throw new Error('broadcastArgs(): second input must be a vector (rank=1). ' +
+ ("Has rank " + shape2Input.rank));
+ }
+ var inputs = { s0: shape1Input, s1: shape2Input };
+ return ENGINE.runKernel(BroadcastArgs, inputs);
+ }
+ var broadcastArgs = op({ broadcastArgs_: broadcastArgs_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Broadcast an array to a compatible shape NumPy-style.
+ *
+ * The tensor's shape is compared to the broadcast shape from end to beginning.
+ * Ones are prepended to the tensor's shape until is has the same length as
+ * the broadcast shape. If input.shape[i]==shape[i], the (i+1)-th axis is
+ * already broadcast-compatible. If input.shape[i]==1 and shape[i]==N, then
+ * the input tensor is tiled N times along that axis (using tf.tile).
+ *
+ * @param input The tensor that is to be broadcasted.
+ * @param shape The input is to be broadcast to this shape.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function broadcastTo_(x, shape) {
+ var input = convertToTensor(x, 'broadcastTo', 'x');
+ var xShape = input.shape;
+ if (shape.some(function (d) { return !(d > 0) || d % 1 !== 0; })) {
+ throw new Error("broadcastTo(): Invalid broadcast shape [" + shape + "].");
+ }
+ if (shape.length < input.rank) {
+ throw new Error("broadcastTo(): shape.length=" + shape.length + " < input.rank=" + input.rank + ".");
+ }
+ if (shape.length > input.rank) {
+ var newShape = input.shape.slice();
+ while (newShape.length < shape.length) {
+ newShape.unshift(1);
+ }
+ input = reshape(input, newShape);
+ }
+ var inputShape = input.shape;
+ var reps = Array.from(shape);
+ for (var i = shape.length - 1; i >= 0; i--) {
+ if (inputShape[i] === shape[i]) {
+ reps[i] = 1;
+ }
+ else if (input.shape[i] !== 1) {
+ throw new Error("broadcastTo(): [" + xShape + "] cannot be broadcast to [" + shape + "].");
+ }
+ }
+ var axes = reps.map(function (n, i) { return n > 1 ? i : -1; }).filter(function (i) { return i >= 0; });
+ if (axes.length === 0) {
+ return clone(input);
+ }
+ // TODO call broadcastTo kernel directly once backends implement broadcstTo
+ var inputs = { x: input };
+ var attrs = { reps: reps };
+ return ENGINE.runKernel(Tile, inputs, attrs);
+ }
+ var broadcastTo = op({ broadcastTo_: broadcastTo_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.ceil().print(); // or tf.ceil(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function ceil_(x) {
+ var $x = convertToTensor(x, 'x', 'ceil', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Ceil, inputs);
+ }
+ var ceil = op({ ceil_: ceil_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
+ * ```
+ * @param x The input tensor.
+ * @param clipValueMin Lower-bound of range to be clipped to.
+ * @param clipValueMax Upper-bound of range to be clipped to.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function clipByValue_(x, clipValueMin, clipValueMax) {
+ var $x = convertToTensor(x, 'x', 'clipByValue');
+ assert((clipValueMin <= clipValueMax), function () { return "Error in clip: min (" + clipValueMin + ") must be " +
+ ("less than or equal to max (" + clipValueMax + ")."); });
+ var inputs = { x: $x };
+ var attrs = { clipValueMin: clipValueMin, clipValueMax: clipValueMax };
+ return ENGINE.runKernel(ClipByValue, inputs, attrs);
+ }
+ var clipByValue = op({ clipByValue_: clipByValue_ });
+
+ /**
+ * Concatenates a list of`tf.Tensor1D`s along an axis. See `concat` for details.
+ *
+ * For example, if:
+ * A: shape(3) = |r1, g1, b1|
+ * B: shape(2) = |r2, g2|
+ * C = tf.concat1d([A, B]) == |r1, g1, b1, r2, g2|
+ *
+ * @param tensors A list of`tf.Tensor`s to concatenate.
+ * @return The concatenated array.
+ */
+ function concat1d_(tensors) {
+ return concat(tensors, 0 /* axis */);
+ }
+ var concat1d = op({ concat1d_: concat1d_ });
+
+ /**
+ * Concatenates a list of`tf.Tensor2D`s along an axis. See `concat` for details.
+ *
+ * For example, if:
+ * A: shape(2, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ *
+ * B: shape(2, 3) = | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * C = tf.concat2d([A, B], axis)
+ *
+ * if axis = 0:
+ * C: shape(4, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ * | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * if axis = 1:
+ * C = shape(2, 6) = | r1, g1, b1, r3, g3, b3 |
+ * | r2, g2, b2, r4, g4, b4 |
+ *
+ *
+ * @param tensors A list of `tf.Tensor`s to concatenate.
+ * @param axis The axis to concatenate along.
+ * @return The concatenated array.
+ */
+ function concat2d_(tensors, axis) {
+ return concat(tensors, axis);
+ }
+ var concat2d = op({ concat2d_: concat2d_ });
+
+ /**
+ * Concatenates a list of `tf.Tensor3D`s along an axis.
+ * See `concat` for details.
+ *
+ * For example, if:
+ * A: shape(2, 1, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ *
+ * B: shape(2, 1, 3) = | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * C = tf.concat3d([A, B], axis)
+ *
+ * if axis = 0:
+ * C: shape(4, 1, 3) = | r1, g1, b1 |
+ * | r2, g2, b2 |
+ * | r3, g3, b3 |
+ * | r4, g4, b4 |
+ *
+ * if axis = 1:
+ * C: shape(2, 2, 3) = | r1, g1, b1, r3, g3, b3 |
+ * | r2, g2, b2, r4, g4, b4 |
+ *
+ * if axis = 2:
+ * C = shape(2, 1, 6) = | r1, g1, b1, r3, g3, b3 |
+ * | r2, g2, b2, r4, g4, b4 |
+ *
+ * @param tensors A list of`tf.Tensor`s to concatenate.
+ * @param axis The axis to concate along.
+ * @return The concatenated array.
+ */
+ function concat3d_(tensors, axis) {
+ return concat(tensors, axis);
+ }
+ var concat3d = op({ concat3d_: concat3d_ });
+
+ /**
+ * Concatenates a list of `tf.Tensor4D`s along an axis.
+ * See `concat` for details.
+ *
+ * @param tensors A list of `tf.Tensor`s to concatenate.
+ * @param axis The axis to concate along.
+ * @return The concatenated array.
+ */
+ function concat4d_(tensors, axis) {
+ return concat(tensors, axis);
+ }
+ var concat4d = op({ concat4d_: concat4d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes a 2D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv2d_(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var $x = convertToTensor(x, 'x', 'conv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in conv2d: input must be rank 4, but got rank " + x4D.rank + "."; });
+ assert($filter.rank === 4, function () { return "Error in conv2d: filter must be rank 4, but got rank " +
+ ($filter.rank + "."); });
+ checkPadOnDimRoundingMode('conv2d', pad, dimRoundingMode);
+ var inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];
+ assert(inDepth === $filter.shape[2], function () { return "Error in conv2d: depth of input (" + inDepth + ") must match " +
+ ("input depth for filter " + $filter.shape[2] + "."); });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in conv2D: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var inputs = { x: x4D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv2D, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var conv2d$1 = op({ conv2d_: conv2d_ });
+
+ /**
+ * Computes a 1D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 3 or rank 2, of shape
+ * `[batch, width, inChannels]`. If rank 2, batch of 1 is assumed.
+ * @param filter The filter, rank 3, of shape
+ * `[filterWidth, inDepth, outDepth]`.
+ * @param stride The number of entries by which the filter is moved right at
+ * each step.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat An optional string from "NWC", "NCW". Defaults to "NWC",
+ * the data is stored in the order of [batch, in_width, in_channels]. Only
+ * "NWC" is currently supported.
+ * @param dilation The dilation rate in which we sample input values in
+ * atrous convolution. Defaults to `1`. If it is greater than 1, then
+ * stride must be `1`.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv1d_(x, filter, stride, pad, dataFormat, dilation, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NWC'; }
+ if (dilation === void 0) { dilation = 1; }
+ var $x = convertToTensor(x, 'x', 'conv1d');
+ var $filter = convertToTensor(filter, 'filter', 'conv1d');
+ var x3D = $x;
+ var reshapedTo3D = false;
+ if ($x.rank === 2) {
+ reshapedTo3D = true;
+ x3D = reshape($x, [1, $x.shape[0], $x.shape[1]]);
+ }
+ assert(x3D.rank === 3, function () { return "Error in conv1d: input must be rank 3, but got rank " + x3D.rank + "."; });
+ assert($filter.rank === 3, function () { return "Error in conv1d: filter must be rank 3, but got rank " +
+ ($filter.rank + "."); });
+ checkPadOnDimRoundingMode('conv1d', pad, dimRoundingMode);
+ assert(x3D.shape[2] === $filter.shape[1], function () { return "Error in conv1d: depth of input (" + x3D.shape[2] + ") must match " +
+ ("input depth for filter " + $filter.shape[1] + "."); });
+ assert(eitherStridesOrDilationsAreOne(stride, dilation), function () { return 'Error in conv1D: Either stride or dilation must be 1. ' +
+ ("Got stride " + stride + " and dilation '" + dilation + "'"); });
+ assert(dataFormat === 'NWC', function () { return "Error in conv1d: got dataFormat of " + dataFormat + " but only NWC is currently supported."; });
+ var filter4D = reshape($filter, [1, $filter.shape[0], $filter.shape[1], $filter.shape[2]]);
+ var input4D = reshape(x3D, [x3D.shape[0], 1, x3D.shape[1], x3D.shape[2]]);
+ var strides = [1, stride];
+ var dilations = [1, dilation];
+ var conv2dDataFormat = 'NHWC';
+ var res = conv2d$1(input4D, filter4D, strides, pad, conv2dDataFormat, dilations, dimRoundingMode);
+ if (reshapedTo3D) {
+ return reshape(res, [res.shape[2], res.shape[3]]);
+ }
+ return reshape(res, [res.shape[0], res.shape[2], res.shape[3]]);
+ }
+ var conv1d = op({ conv1d_: conv1d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the derivative of the input of a 2D convolution.
+ *
+ * @param xShape The shape of the input: [batch, height, width, inDepth].
+ * If length of 3, batch of 1 is assumed.
+ * @param dy The derivative of the output, of rank 4 or rank 3 of shape
+ * `[batch, outHeight, outWidth, outDepth]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm used:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function conv2DBackpropInput_(xShape, dy, filter, strides, pad, dataFormat, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ assert(xShape.length === dy.rank, function () { return "Length of inShape " +
+ ("(" + xShape.length + ") and rank of dy (" + dy.rank + ") must match"); });
+ var xShape4D = xShape;
+ var dy4D = dy;
+ var reshapedTo4D = false;
+ if (dy.rank === 3) {
+ reshapedTo4D = true;
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ xShape4D = [1, xShape[0], xShape[1], xShape[2]];
+ }
+ assert(xShape4D.length === 4, function () { return "Error in conv2dDerInput: inShape must be length 4, but got length " +
+ (xShape4D.length + "."); });
+ assert(dy4D.rank === 4, function () { return "Error in conv2dDerInput: dy must be rank 4, but got " +
+ ("rank " + dy4D.rank); });
+ assert(filter.rank === 4, function () { return "Error in conv2dDerInput: filter must be rank 4, but got " +
+ ("rank " + filter.rank); });
+ var inDepth = dataFormat === 'NHWC' ? xShape4D[3] : xShape4D[1];
+ var outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1];
+ assert(inDepth === filter.shape[2], function () { return "Error in conv2dDerInput: depth of input (" + inDepth + ") must " +
+ ("match input depth for filter " + filter.shape[2] + "."); });
+ assert(outDepth === filter.shape[3], function () { return "Error in conv2dDerInput: depth of output (" + outDepth + ") must " +
+ ("match output depth for filter " + filter.shape[3] + "."); });
+ checkPadOnDimRoundingMode('conv2dDerInput', pad, dimRoundingMode);
+ var inputs = { dy: dy4D, filter: filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dimRoundingMode: dimRoundingMode, inputShape: xShape4D };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv2DBackpropInput, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var conv2DBackpropInput = op({ conv2DBackpropInput_: conv2DBackpropInput_ });
+
+ /**
+ * Computes the transposed 2D convolution of an image, also known as a
+ * deconvolution.
+ *
+ * @param x The input image, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inDepth]`. If rank 3, batch of 1 is assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, outDepth, inDepth]`.
+ * `inDepth` must match `inDepth` in `x`.
+ * @param outputShape Output shape, of rank 4 or rank 3:
+ * `[batch, height, width, outDepth]`. If rank 3, batch of 1 is assumed.
+ * @param strides The strides of the original convolution:
+ * `[strideHeight, strideWidth]`.
+ * @param pad The type of padding algorithm used in the non-transpose version
+ * of the op.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv2dTranspose_(x, filter, outputShape, strides, pad, dimRoundingMode) {
+ var $x = convertToTensor(x, 'x', 'conv2dTranspose');
+ var $filter = convertToTensor(filter, 'filter', 'conv2dTranspose');
+ return conv2DBackpropInput(outputShape, $x, $filter, strides, pad, 'NHWC', dimRoundingMode);
+ }
+ var conv2dTranspose = op({ conv2dTranspose_: conv2dTranspose_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes a 3D convolution over the input x.
+ *
+ * @param x The input tensor, of rank 5 or rank 4, of shape
+ * `[batch, depth, height, width, channels]`. If rank 4,
+ * batch of 1 is assumed.
+ * @param filter The filter, rank 5, of shape
+ * `[filterDepth, filterHeight, filterWidth, inChannels, outChannels]`.
+ * inChannels must match between input and filter.
+ * @param strides The strides of the convolution: `[strideDepth, strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat: An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ * @param dilations The dilation rates: `[dilationDepth, dilationHeight,
+ * dilationWidth]` in which we sample input values across the height
+ * and width dimensions in atrous convolution. Defaults to `[1, 1, 1]`.
+ * If `dilations` is a single number, then
+ * `dilationDepth == dilationHeight == dilationWidth`. If it is greater
+ * than 1, then all values of `strides` must be 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv3d_(x, filter, strides, pad, dataFormat, dilations) {
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ if (dilations === void 0) { dilations = [1, 1, 1]; }
+ var $x = convertToTensor(x, 'x', 'conv3d');
+ var $filter = convertToTensor(filter, 'filter', 'conv3d');
+ var x5D = $x;
+ var reshapedTo5D = false;
+ if ($x.rank === 4) {
+ reshapedTo5D = true;
+ x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);
+ }
+ assert(x5D.rank === 5, function () { return "Error in conv3d: input must be rank 5, but got rank " + x5D.rank + "."; });
+ assert($filter.rank === 5, function () { return "Error in conv3d: filter must be rank 5, but got rank " +
+ ($filter.rank + "."); });
+ assert(x5D.shape[4] === $filter.shape[3], function () { return "Error in conv3d: depth of input (" + x5D.shape[4] + ") must match " +
+ ("input depth for filter " + $filter.shape[3] + "."); });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in conv3D: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ assert(dataFormat === 'NDHWC', function () { return "Error in conv3d: got dataFormat of " + dataFormat + " but only NDHWC is currently supported."; });
+ var inputs = { x: x5D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv3D, inputs, attrs);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var conv3d = op({ conv3d_: conv3d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the derivative of the input of a 3D convolution.
+ *
+ * @param xShape The shape of the input: [batch, depth, height, width,
+ * in_channels]. If length of 4, batch of 1 is assumed.
+ * @param dy The derivative of the output, of rank 5 or rank 4 of shape
+ * `[batch, outDepth, outHeight, outWidth, in_channels]`.
+ * If rank 4, batch of 1 is assumed.
+ * @param filter The filter, rank 5, of shape
+ * `[filterDepth, filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideDepth, strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm used:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ */
+ function conv3DBackpropInput_(xShape, dy, filter, strides, pad) {
+ assert(xShape.length === dy.rank, function () { return "Length of inShape " +
+ ("(" + xShape.length + ") and rank of dy (" + dy.rank + ") must match"); });
+ var xShape5D = xShape;
+ var dy5D = dy;
+ var reshapedTo5D = false;
+ if (dy.rank === 4) {
+ reshapedTo5D = true;
+ dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);
+ xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]];
+ }
+ var inDepth = xShape5D[4];
+ var outDepth = dy5D.shape[4];
+ assert(xShape5D.length === 5, function () { return "Error in conv3dDerInput: inShape must be length 5, but got length " +
+ (xShape5D.length + "."); });
+ assert(dy5D.rank === 5, function () { return "Error in conv3dDerInput: dy must be rank 5, but got " +
+ ("rank " + dy5D.rank); });
+ assert(filter.rank === 5, function () { return "Error in conv3dDerInput: filter must be rank 5, but got " +
+ ("rank " + filter.rank); });
+ assert(inDepth === filter.shape[3], function () { return "Error in conv3dDerInput: depth of input (" + inDepth + ") must " +
+ ("match input depth for filter " + filter.shape[3] + "."); });
+ assert(outDepth === filter.shape[4], function () { return "Error in conv3dDerInput: depth of output (" + outDepth + ") must " +
+ ("match output depth for filter " + filter.shape[4] + "."); });
+ var inputs = { dy: dy5D, filter: filter };
+ var attrs = { pad: pad, strides: strides, inputShape: xShape5D };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Conv3DBackpropInputV2, inputs, attrs);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var conv3DBackpropInput = op({ conv3DBackpropInput_: conv3DBackpropInput_ });
+
+ /**
+ * Computes the transposed 3D convolution of a volume, also known as a
+ * deconvolution.
+ *
+ * @param x The input image, of rank 5 or rank 4, of shape
+ * `[batch, depth, height, width, inDepth]`. If rank 4, batch of 1 is assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[depth, filterHeight, filterWidth, outDepth, inDepth]`.
+ * `inDepth` must match `inDepth` in `x`.
+ * @param outputShape Output shape, of rank 5 or rank 4:
+ * `[batch, depth, height, width, outDepth]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param strides The strides of the original convolution:
+ * `[strideDepth, strideHeight, strideWidth]`.
+ * @param pad The type of padding algorithm used in the non-transpose version
+ * of the op.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function conv3dTranspose_(x, filter, outputShape, strides, pad) {
+ var $x = convertToTensor(x, 'x', 'conv3dTranspose');
+ var $filter = convertToTensor(filter, 'filter', 'conv3dTranspose');
+ return conv3DBackpropInput(outputShape, $x, $filter, strides, pad);
+ }
+ var conv3dTranspose = op({ conv3dTranspose_: conv3dTranspose_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes cos of the input `tf.Tensor` element-wise: `cos(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.cos().print(); // or tf.cos(x)
+ * ```
+ * @param x The input tensor. Must be float32 type.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function cos_(x) {
+ var $x = convertToTensor(x, 'x', 'cos', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Cos, inputs);
+ }
+ var cos = op({ cos_: cos_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes hyperbolic cos of the input `tf.Tensor` element-wise: `cosh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.cosh().print(); // or tf.cosh(x)
+ * ```
+ * @param x The input tensor. Must be float32 type.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function cosh_(x) {
+ var $x = convertToTensor(x, 'x', 'cosh', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Cosh, inputs);
+ }
+ var cosh = op({ cosh_: cosh_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the cumulative sum of a `tf.Tensor` along `axis`.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2, 3, 4]);
+ * x.cumsum().print();
+ * ```
+ * ```js
+ * const x = tf.tensor([[1, 2], [3, 4]]);
+ * x.cumsum().print();
+ * ```
+ *
+ * @param x The input tensor to be summed.
+ * @param axis The axis along which to sum. Optional. Defaults to 0.
+ * @param exclusive Whether to perform exclusive cumulative sum. Optional.
+ * Defaults to false. If set to true then the sum of each tensor entry
+ * does not include its own value, but only the values previous to it
+ * along the specified axis.
+ * @param reverse Whether to sum in the opposite direction. Optional.
+ * Defaults to false.
+ *
+ * @doc {heading: 'Operations', subheading: 'Scan'}
+ */
+ function cumsum_(x, axis, exclusive, reverse) {
+ if (axis === void 0) { axis = 0; }
+ if (exclusive === void 0) { exclusive = false; }
+ if (reverse === void 0) { reverse = false; }
+ var $x = convertToTensor(x, 'x', 'cumsum');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, exclusive: exclusive, reverse: reverse };
+ return ENGINE.runKernel(Cumsum, inputs, attrs);
+ }
+ var cumsum = op({ cumsum_: cumsum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Outputs a vector with length `size` and the same dtype as `weights`.
+ *
+ * If `weights` are empty, then index `i` stores the number of times the value
+ * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the
+ * sum of the value in `weights` at each index where the corresponding value in
+ * `x` is `i`.
+ *
+ * Values in `x` outside of the range [0, size) are ignored.
+ *
+ * @param x The input int tensor, rank 1 or rank 2.
+ * @param weights The weights tensor, must have the same shape as x, or a
+ * length-0 Tensor, in which case it acts as all weights equal to 1.
+ * @param size Non-negative integer.
+ * @param binaryOutput Optional. Whether the kernel should count the appearance
+ * or number of occurrences. Defaults to False.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function denseBincount_(x, weights, size, binaryOutput) {
+ if (binaryOutput === void 0) { binaryOutput = false; }
+ var $x = convertToTensor(x, 'x', 'denseBincount');
+ var $weights = convertToTensor(weights, 'weights', 'denseBincount');
+ assert($x.dtype === 'int32', function () { return "Error in denseBincount: input " +
+ ("dtype must be int32, but got " + $x.dtype); });
+ assert($x.rank <= 2, function () { return "Error in denseBincount: input must be at most rank 2, but got " +
+ ("rank " + $x.rank + "."); });
+ assert(size >= 0, function () { return "size must be non-negative, but got " + size + "."; });
+ assert($weights.size === $x.size || $weights.size === 0, function () { return "Error in denseBincount: weights must have the same shape as x or " +
+ ("0-length, but got x shape: " + $x.shape + ", weights shape: ") +
+ ($weights.shape + "."); });
+ var inputs = { x: $x, weights: $weights };
+ var attrs = { size: size, binaryOutput: binaryOutput };
+ return ENGINE.runKernel(DenseBincount, inputs, attrs);
+ }
+ var denseBincount = op({ denseBincount_: denseBincount_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Rearranges data from depth into blocks of spatial data. More specifically,
+ * this op outputs a copy of the input tensor where values from the `depth`
+ * dimension are moved in spatial blocks to the `height` and `width` dimensions.
+ * The attr `blockSize` indicates the input block size and how the data is
+ * moved.
+ *
+ * - Chunks of data of size `blockSize * blockSize` from depth are rearranged
+ * into non-overlapping blocks of size `blockSize x blockSize`
+ *
+ * - The width the output tensor is `inputWidth * blockSize`, whereas the
+ * height is `inputHeight * blockSize`
+ *
+ * - The Y, X coordinates within each block of the output image are determined
+ * by the high order component of the input channel index
+ *
+ * - The depth of the input tensor must be divisible by `blockSize *
+ * blockSize`
+ *
+ * The `dataFormat` attr specifies the layout of the input and output tensors
+ * with the following options: "NHWC": [ `batch, height, width, channels` ]
+ * "NCHW": [ `batch, channels, height, width` ]
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [1, 1, 1, 4]);
+ * const blockSize = 2;
+ * const dataFormat = "NHWC";
+ *
+ * tf.depthToSpace(x, blockSize, dataFormat).print();
+ * ```
+ *
+ * @param x The input tensor of rank 4
+ * @param blockSIze An `int` that is `>= 2`. The size of the spatial block
+ * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to "NHWC"
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function depthToSpace_(x, blockSize, dataFormat) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var $x = convertToTensor(x, 'x', 'depthToSpace', 'float32');
+ var inputHeight = (dataFormat === 'NHWC') ? $x.shape[1] : $x.shape[2];
+ var inputWidth = (dataFormat === 'NHWC') ? $x.shape[2] : $x.shape[3];
+ var inputDepth = (dataFormat === 'NHWC') ? $x.shape[3] : $x.shape[1];
+ assert(blockSize > 1, function () { return "blockSize should be > 1 for depthToSpace, but was: " + blockSize; });
+ assert(inputHeight * blockSize >= 0, function () { return "Negative dimension size caused by overflow when multiplying\n " + inputHeight + " and " + blockSize + " for depthToSpace with input shape\n " + $x.shape; });
+ assert(inputWidth * blockSize >= 0, function () { return "Negative dimension size caused by overflow when multiplying\n " + inputWidth + " and " + blockSize + " for depthToSpace with input shape\n " + $x.shape; });
+ assert((inputDepth % (blockSize * blockSize) === 0), function () { return "Dimension size must be evenly divisible by " + blockSize * blockSize + " but is " + inputDepth + " for depthToSpace with input shape " + $x.shape; });
+ var inputs = { x: $x };
+ var attrs = { blockSize: blockSize, dataFormat: dataFormat };
+ return ENGINE.runKernel(DepthToSpace, inputs, attrs);
+ }
+ var depthToSpace = op({ depthToSpace_: depthToSpace_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Depthwise 2D convolution.
+ *
+ * Given a 4D `input` array and a `filter` array of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
+ * `inChannels` convolutional filters of depth 1, this op applies a
+ * different filter to each input channel (expanding from 1 channel to
+ * `channelMultiplier` channels for each), then concatenates the results
+ * together. The output has `inChannels * channelMultiplier` channels.
+ *
+ * See
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
+ * for more details.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter tensor, rank 4, of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`. If strides is a single number, then `strideHeight ==
+ * strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function depthwiseConv2d_(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in depthwiseConv2d: input must be rank 4, but got " +
+ ("rank " + x4D.rank + "."); });
+ assert($filter.rank === 4, function () { return "Error in depthwiseConv2d: filter must be rank 4, but got rank " +
+ ($filter.rank + "."); });
+ assert(x4D.shape[3] === $filter.shape[2], function () { return "Error in depthwiseConv2d: number of input channels " +
+ ("(" + x4D.shape[3] + ") must match the inChannels dimension in ") +
+ ("filter " + $filter.shape[2] + "."); });
+ checkPadOnDimRoundingMode('depthwiseConv2d', pad, dimRoundingMode);
+ var inputs = { x: x4D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dilations: dilations, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(DepthwiseConv2dNative, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var depthwiseConv2d$1 = op({ depthwiseConv2d_: depthwiseConv2d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns a diagonal tensor with a given diagonal values.
+ *
+ * Given a diagonal, this operation returns a tensor with the diagonal and
+ * everything else padded with zeros.
+ *
+ * Assume the input has dimensions `[D1,..., Dk]`, then the output is a tensor
+ * of rank 2k with dimensions `[D1,..., Dk, D1,..., Dk]`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * tf.diag(x).print()
+ * ```
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 6, 8], [4, 2])
+ *
+ * tf.diag(x).print()
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function diag_(x) {
+ var $x = convertToTensor(x, 'x', 'diag');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Diag, inputs);
+ }
+ var diag = op({ diag_: diag_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the grayscale dilation over the input `x`.
+ *
+ * @param x The input tensor, rank 3 or rank 4 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filter The filter tensor, rank 3, of shape
+ * `[filterHeight, filterWidth, depth]`.
+ * @param strides The strides of the sliding window for each dimension of the
+ * input tensor: `[strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat Specify the data format of the input and output data.
+ * Defaults to 'NHWC'. Only 'NHWC' is currently supported. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * for atrous morphological dilation. Defaults to `[1, 1]`. If `dilations`
+ * is a single number, then `dilationHeight == dilationWidth`. If it is
+ * greater than 1, then all values of `strides` must be 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function dilation2d_(x, filter, strides, pad, dilations, dataFormat) {
+ if (dilations === void 0) { dilations = [1, 1]; }
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var $x = convertToTensor(x, 'x', 'dilation2d');
+ var $filter = convertToTensor(filter, 'filter', 'dilation2d');
+ assert($x.rank === 3 || $x.rank === 4, function () { return "Error in dilation2d: input must be rank 3 or 4, but got rank " +
+ ($x.rank + "."); });
+ assert($filter.rank === 3, function () { return "Error in dilation2d: filter must be rank 3, but got rank " +
+ ($filter.rank + "."); });
+ assert(dataFormat === 'NHWC', function () { return "Error in dilation2d: Only NHWC is currently supported, " +
+ ("but got dataFormat of " + dataFormat); });
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ reshapedTo4D = true;
+ }
+ var inputs = { x: x4D, filter: $filter };
+ var attrs = { strides: strides, pad: pad, dilations: dilations };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Dilation2D, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var dilation2d = op({ dilation2d_: dilation2d_ });
+
+ /**
+ * Returns the truth value of (a == b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.equal(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function equal_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'equal', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'equal', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Equal, inputs);
+ }
+ var equal = op({ equal_: equal_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the elements, either `a` or `b` depending on the `condition`.
+ *
+ * If the condition is true, select from `a`, otherwise select from `b`.
+ *
+ * ```js
+ * const cond = tf.tensor1d([false, false, true], 'bool');
+ * const a = tf.tensor1d([1 , 2, 3]);
+ * const b = tf.tensor1d([-1, -2, -3]);
+ *
+ * a.where(cond, b).print();
+ * ```
+ *
+ * @param condition The input condition. Must be of dtype bool.
+ * @param a If `condition` is rank 1, `a` may have a higher rank but
+ * its first dimension must match the size of `condition`.
+ * @param b A tensor with the same dtype as `a` and with shape that is
+ * compatible with `a`.
+ * @return A tensor with same dtype as `a` and `b`, and shape that is
+ * broadcastable from `a` and `b`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function where_(condition, a, b) {
+ var $a = convertToTensor(a, 'a', 'where');
+ var $b = convertToTensor(b, 'b', 'where');
+ var $condition = convertToTensor(condition, 'condition', 'where', 'bool');
+ // TODO: move this logic to forward function when the broadcastTo op is
+ // implemented in WASM.
+ // Find the broadcastable shape for $condition, $a, and $b.
+ var broadcastShape = assertAndGetBroadcastShape(assertAndGetBroadcastShape($condition.shape, $a.shape), $b.shape);
+ var $broadcastedCondition = broadcastTo($condition, broadcastShape);
+ var $broadcastedA = broadcastTo($a, broadcastShape);
+ var $broadcastedB = broadcastTo($b, broadcastShape);
+ var inputs = {
+ condition: $broadcastedCondition,
+ t: $broadcastedA,
+ e: $broadcastedB
+ };
+ return ENGINE.runKernel(Select, inputs);
+ }
+ var where = op({ where_: where_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 0 with the same shape as the
+ * given tensor.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2]);
+ * tf.zerosLike(x).print();
+ * ```
+ *
+ * @param x The tensor of required shape.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function zerosLike_(x) {
+ var $x = convertToTensor(x, 'x', 'zerosLike');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(ZerosLike, inputs);
+ }
+ var zerosLike = op({ zerosLike_: zerosLike_ });
+
+ /**
+ * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0
+ * if denominator is 0.
+ *
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 9, 16]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ * const c = tf.tensor1d([0, 0, 0, 0]);
+ *
+ * a.divNoNan(b).print(); // or tf.divNoNan(a, b)
+ * a.divNoNan(c).print(); // or tf.divNoNan(a, c)
+ * ```
+ *
+ * ```js
+ * // Broadcast div a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(2);
+ * const c = tf.scalar(0);
+ *
+ * a.divNoNan(b).print(); // or tf.divNoNan(a, b)
+ * a.divNoNan(c).print(); // or tf.divNoNan(a, c)
+ * ```
+ *
+ * @param a The first tensor as the numerator.
+ * @param b The second tensor as the denominator. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function divNoNan_(a, b) {
+ var _a;
+ // TODO: Make this into its own kernel.
+ var $a = convertToTensor(a, 'a', 'div');
+ var $b = convertToTensor(b, 'b', 'div');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var divResult = div($a, $b);
+ var zeros = zerosLike(divResult);
+ var bEqualsZero = equal($b, zeros);
+ return where(bEqualsZero, zeros, divResult);
+ }
+ var divNoNan = op({ divNoNan_: divNoNan_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the dot product of two matrices and/or vectors, `t1` and `t2`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor2d([[1, 2], [3, 4]]);
+ * const c = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);
+ *
+ * a.dot(b).print(); // or tf.dot(a, b)
+ * b.dot(a).print();
+ * b.dot(c).print();
+ * ```
+ * @param t1 The first tensor in the dot operation.
+ * @param t2 The second tensor in the dot operation.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function dot_(t1, t2) {
+ var $t1 = convertToTensor(t1, 't1', 'dot');
+ var $t2 = convertToTensor(t2, 't2', 'dot');
+ assert(($t1.rank === 1 || $t1.rank === 2) && ($t2.rank === 1 || $t2.rank === 2), function () { return "Error in dot: inputs must all be rank 1 or 2, but got ranks " +
+ ($t1.rank + " and " + $t2.rank + "."); });
+ var t1Inner = ($t1.rank === 1 ? $t1.size : $t1.shape[1]);
+ var t2Inner = ($t2.rank === 1 ? $t2.size : $t2.shape[0]);
+ assert(t1Inner === t2Inner, function () { return "Error in dot: inner dimensions of inputs must match, but got " +
+ (t1Inner + " and " + t2Inner + "."); });
+ if ($t1.rank === 1 && $t2.rank === 1) {
+ var t12D = reshape($t1, [1, -1]);
+ var t22D = reshape($t2, [-1, 1]);
+ var t1t2 = matMul$1(t12D, t22D);
+ return reshape(t1t2, []);
+ }
+ else if ($t1.rank === 1 && $t2.rank === 2) {
+ var t12D = reshape($t1, [1, -1]);
+ var t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]);
+ var t1t2 = matMul$1(t12D, t22D);
+ return reshape(t1t2, [t1t2.size]);
+ }
+ else if ($t1.rank === 2 && $t2.rank === 1) {
+ var t22D = reshape($t2, [-1, 1]);
+ var t1t2 = matMul$1($t1, t22D);
+ return reshape(t1t2, [t1t2.size]);
+ }
+ else {
+ var t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]);
+ var t1t2 = matMul$1($t1, t22D);
+ return t1t2;
+ }
+ }
+ var dot = op({ dot_: dot_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Tensor contraction over specified indices and outer product.
+ *
+ * `einsum` allows defining Tensors by defining their element-wise computation.
+ * This computation is based on
+ * [Einstein summation](https://en.wikipedia.org/wiki/Einstein_notation).
+ *
+ * Some special cases include:
+ *
+ * Matrix multiplication:
+ * ```js
+ * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);
+ * const y = tf.tensor2d([[0, 1], [2, 3], [4, 5]]);
+ * x.print();
+ * y.print();
+ * tf.einsum('ij,jk->ik', x, y).print();
+ * ```
+ *
+ * Dot product:
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ * const y = tf.tensor1d([0, 1, 2]);
+ * x.print();
+ * y.print();
+ * tf.einsum('i,i->', x, y).print();
+ * ```
+ *
+ * Batch dot product:
+ * ```js
+ * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);
+ * const y = tf.tensor2d([[0, 1, 2], [3, 4, 5]]);
+ * x.print();
+ * y.print();
+ * tf.einsum('bi,bi->b', x, y).print();
+ * ```
+ *
+ * Outer prouduct:
+ * ```js
+ * const x = tf.tensor1d([1, 3, 5]);
+ * const y = tf.tensor1d([2, 4, 6]);
+ * x.print();
+ * y.print();
+ * tf.einsum('i,j->ij', x, y).print();
+ * ```
+ *
+ * Matrix transpose:
+ * ```js
+ * const x = tf.tensor2d([[1, 2], [3, 4]]);
+ * x.print();
+ * tf.einsum('ij->ji', x).print();
+ * ```
+ *
+ * Batch matrix transpose:
+ * ```js
+ * const x = tf.tensor3d([[[1, 2], [3, 4]], [[-1, -2], [-3, -4]]]);
+ * x.print();
+ * tf.einsum('bij->bji', x).print();
+ * ```
+ *
+ * Limitations:
+ *
+ * This implementation of einsum has the following limitations:
+ *
+ * - Does not support >2 input tensors.
+ * - Does not support duplicate axes for any given input tensor. E.g., equation
+ * 'ii->' is not suppoted.
+ * - The `...` notation is not supported.
+ *
+ * @param equation a string describing the contraction, in the same format as
+ * [numpy.einsum](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html).
+ * @param tensors the input(s) to contract (each one a Tensor), whose shapes
+ * should be consistent with equation.
+ * @returns The output tensor.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Matrices'}
+ */
+ function einsum_(equation) {
+ var tensors = [];
+ for (var _i = 1; _i < arguments.length; _i++) {
+ tensors[_i - 1] = arguments[_i];
+ }
+ var $tensors = tensors.map(function (t, i) { return convertToTensor(t, "tensors" + i, 'einsum'); });
+ var attrs = { equation: equation };
+ return ENGINE.runKernel(Einsum, $tensors, attrs);
+ }
+ var einsum = op({ einsum_: einsum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes exponential linear element-wise: `x > 0 ? x : (e ^ x) - 1`.
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 1, -3, 2]);
+ *
+ * x.elu().print(); // or tf.elu(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function elu_(x) {
+ var $x = convertToTensor(x, 'x', 'elu', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Elu, inputs);
+ }
+ var elu = op({ elu_: elu_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes gause error function of the input `tf.Tensor` element-wise:
+ * `erf(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, .1, -.1, .7]);
+ *
+ * x.erf().print(); // or tf.erf(x);
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function erf_(x) {
+ var $x = convertToTensor(x, 'x', 'erf');
+ assert($x.dtype === 'int32' || $x.dtype === 'float32', function () { return 'Input dtype must be `int32` or `float32`.'; });
+ if ($x.dtype === 'int32') {
+ $x = cast($x, 'float32');
+ }
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Erf, inputs);
+ }
+ var erf = op({ erf_: erf_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, -3]);
+ *
+ * x.exp().print(); // or tf.exp(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function exp_(x) {
+ var $x = convertToTensor(x, 'x', 'exp');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Exp, inputs);
+ }
+ var exp = op({ exp_: exp_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns a `tf.Tensor` that has expanded rank, by inserting a dimension
+ * into the tensor's shape.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const axis = 1;
+ * x.expandDims(axis).print();
+ * ```
+ *
+ * @param x The input tensor whose dimensions to be expanded.
+ * @param axis The dimension index at which to insert shape of `1`. Defaults
+ * to 0 (the first dimension).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function expandDims_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'expandDims', 'string_or_numeric');
+ assert(axis <= $x.rank, function () { return 'Axis must be <= rank of the tensor'; });
+ var inputs = { input: $x };
+ var attrs = { dim: axis };
+ return ENGINE.runKernel(ExpandDims, inputs, attrs);
+ }
+ var expandDims = op({ expandDims_: expandDims_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes exponential of the input `tf.Tensor` minus one element-wise.
+ * `e ^ x - 1`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, -3]);
+ *
+ * x.expm1().print(); // or tf.expm1(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function expm1_(x) {
+ var $x = convertToTensor(x, 'x', 'expm1');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Expm1, inputs);
+ }
+ var expm1 = op({ expm1_: expm1_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Construct a tensor by repeating it the number of times given by reps.
+ *
+ * This operation creates a new tensor by replicating `input` `reps`
+ * times. The output tensor's i'th dimension has `input.shape[i] *
+ * reps[i]` elements, and the values of `input` are replicated
+ * `reps[i]` times along the i'th dimension. For example, tiling
+ * `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ *
+ * a.tile([2]).print(); // or a.tile([2])
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * a.tile([1, 2]).print(); // or a.tile([1, 2])
+ * ```
+ * @param x The tensor to tile.
+ * @param reps Determines the number of replications per dimension.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function tile_(x, reps) {
+ var $x = convertToTensor(x, 'x', 'tile', 'string_or_numeric');
+ assert($x.rank === reps.length, function () { return "Error in transpose: rank of input " + $x.rank + " " +
+ ("must match length of reps " + reps + "."); });
+ var inputs = { x: $x };
+ var attrs = { reps: reps };
+ return ENGINE.runKernel(Tile, inputs, attrs);
+ }
+ var tile = op({ tile_: tile_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Create an identity matrix.
+ *
+ * @param numRows Number of rows.
+ * @param numColumns Number of columns. Defaults to `numRows`.
+ * @param batchShape If provided, will add the batch shape to the beginning
+ * of the shape of the returned `tf.Tensor` by repeating the identity
+ * matrix.
+ * @param dtype Data type.
+ * @returns Identity matrix of the specified size and data type, possibly
+ * with batch repetition if `batchShape` is specified.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function eye_(numRows, numColumns, batchShape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (numColumns == null) {
+ numColumns = numRows;
+ }
+ var buff = buffer([numRows, numColumns], dtype);
+ var n = numRows <= numColumns ? numRows : numColumns;
+ for (var i = 0; i < n; ++i) {
+ buff.set(1, i, i);
+ }
+ var out = reshape(buff.toTensor(), [numRows, numColumns]);
+ if (batchShape == null) {
+ return out;
+ }
+ else {
+ if (batchShape.length === 1) {
+ return tile(expandDims(out, 0), [batchShape[0], 1, 1]);
+ }
+ else if (batchShape.length === 2) {
+ // tslint:disable-next-line:no-unnecessary-type-assertion
+ return tile(expandDims(expandDims(out, 0), 0), [batchShape[0], batchShape[1], 1, 1]);
+ }
+ else if (batchShape.length === 3) {
+ // tslint:disable-next-line:no-unnecessary-type-assertion
+ return tile(expandDims(expandDims(expandDims(out, 0), 0), 0), [
+ batchShape[0], batchShape[1], batchShape[2], 1, 1
+ ]);
+ }
+ else {
+ throw new Error("eye() currently supports only 1D and 2D " +
+ (
+ // tslint:disable-next-line:no-any
+ "batchShapes, but received " + batchShape.length + "D."));
+ }
+ }
+ }
+ var eye = op({ eye_: eye_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` filled with a scalar value.
+ *
+ * ```js
+ * tf.fill([2, 2], 4).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param value The scalar value to fill the tensor with.
+ * @param dtype The type of an element in the resulting tensor. Defaults to
+ * 'float'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function fill(shape, value, dtype) {
+ var attrs = { shape: shape, value: value, dtype: dtype };
+ return ENGINE.runKernel(Fill, {}, attrs);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes floor of input `tf.Tensor` element-wise: `floor(x)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.floor().print(); // or tf.floor(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function floor_(x) {
+ var $x = convertToTensor(x, 'x', 'floor', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Floor, inputs);
+ }
+ var floor = op({ floor_: floor_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gather slices from tensor `x`'s axis `axis` according to `indices`.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const indices = tf.tensor1d([1, 3, 3], 'int32');
+ *
+ * x.gather(indices).print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const indices = tf.tensor1d([1, 1, 0], 'int32');
+ *
+ * x.gather(indices).print();
+ * ```
+ * @param x The input tensor whose slices to be gathered.
+ * @param indices The indices of the values to extract.
+ * @param axis The axis over which to select values. Defaults to 0.
+ * @param batchDims Optional. The number of batch dimensions. It must be less
+ * than or equal to rank(indices). Defaults to 0.
+ * The output tensor will have shape of
+ * `x.shape[:axis] + indices.shape[batchDims:] + x.shape[axis + 1:]`
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function gather_(x, indices, axis, batchDims) {
+ if (axis === void 0) { axis = 0; }
+ if (batchDims === void 0) { batchDims = 0; }
+ var $x = convertToTensor(x, 'x', 'gather');
+ var $indices = convertToTensor(indices, 'indices', 'gather', 'int32');
+ var inputs = { x: $x, indices: $indices };
+ var attrs = { axis: axis, batchDims: batchDims };
+ return ENGINE.runKernel(GatherV2, inputs, attrs);
+ }
+ var gather = op({ gather_: gather_ });
+
+ /**
+ * Returns the truth value of (a > b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.greater(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function greater_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'greater', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'greater', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Greater, inputs);
+ }
+ var greater = op({ greater_: greater_ });
+
+ /**
+ * Returns the truth value of (a >= b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.greaterEqual(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function greaterEqual_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'greaterEqual', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'greaterEqual', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(GreaterEqual, inputs);
+ }
+ var greaterEqual = op({ greaterEqual_: greaterEqual_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the imaginary part of a complex (or real) tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of type float that is
+ * the imaginary part of each element in input considered as a complex number.
+ * If input is real, a tensor of all zeros is returned.
+ *
+ * ```js
+ * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);
+ * tf.imag(x).print();
+ * ```
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function imag_(input) {
+ var $input = convertToTensor(input, 'input', 'imag');
+ var inputs = { input: $input };
+ return ENGINE.runKernel(Imag, inputs);
+ }
+ var imag = op({ imag_: imag_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns which elements of x are finite.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isFinite().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function isFinite_(x) {
+ var $x = convertToTensor(x, 'x', 'isFinite');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(IsFinite, inputs);
+ }
+ var isFinite$1 = op({ isFinite_: isFinite_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns which elements of x are Infinity or -Infinity.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isInf().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function isInf_(x) {
+ var $x = convertToTensor(x, 'x', 'isInf');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(IsInf, inputs);
+ }
+ var isInf = op({ isInf_: isInf_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * RReturns which elements of x are NaN.
+ *
+ * ```js
+ * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);
+ *
+ * x.isNaN().print(); // or tf.isNaN(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function isNaN_(x) {
+ var $x = convertToTensor(x, 'x', 'isNaN');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(IsNan, inputs);
+ }
+ var isNaN$1 = op({ isNaN_: isNaN_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes leaky rectified linear element-wise.
+ *
+ * See
+ * [http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf](
+ * http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.leakyRelu(0.1).print(); // or tf.leakyRelu(x, 0.1)
+ * ```
+ * @param x The input tensor.
+ * @param alpha The scaling factor for negative values, defaults to 0.2.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function leakyRelu_(x, alpha) {
+ if (alpha === void 0) { alpha = 0.2; }
+ var $x = convertToTensor(x, 'x', 'leakyRelu');
+ var inputs = { x: $x };
+ var attrs = { alpha: alpha };
+ return ENGINE.runKernel(LeakyRelu, inputs, attrs);
+ }
+ var leakyRelu = op({ leakyRelu_: leakyRelu_ });
+
+ /**
+ * Returns the truth value of (a < b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.less(b).print();
+ * ```
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function less_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'less', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'less', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Less, inputs);
+ }
+ var less = op({ less_: less_ });
+
+ /**
+ * Returns the truth value of (a <= b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([2, 2, 2]);
+ *
+ * a.lessEqual(b).print();
+ * ```
+ *
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function lessEqual_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'lessEqual', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'lessEqual', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(LessEqual, inputs);
+ }
+ var lessEqual = op({ lessEqual_: lessEqual_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Return an evenly spaced sequence of numbers over the given interval.
+ *
+ * ```js
+ * tf.linspace(0, 9, 10).print();
+ * ```
+ * @param start The start value of the sequence.
+ * @param stop The end value of the sequence.
+ * @param num The number of values to generate.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function linspace(start, stop, num) {
+ if (num <= 0) {
+ throw new Error('The number of values should be positive.');
+ }
+ var attrs = { start: start, stop: stop, num: num };
+ return ENGINE.runKernel(LinSpace, {}, attrs);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Normalizes the activation of a local neighborhood across or within
+ * channels.
+ *
+ * @param x The input tensor. The 4-D input tensor is treated as a 3-D array
+ * of 1D vectors (along the last dimension), and each vector is
+ * normalized independently.
+ * @param depthRadius The number of adjacent channels in the 1D normalization
+ * window.
+ * @param bias A constant bias term for the basis.
+ * @param alpha A scale factor, usually positive.
+ * @param beta An exponent.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function localResponseNormalization_(x, depthRadius, bias, alpha, beta) {
+ if (depthRadius === void 0) { depthRadius = 5; }
+ if (bias === void 0) { bias = 1; }
+ if (alpha === void 0) { alpha = 1; }
+ if (beta === void 0) { beta = 0.5; }
+ var $x = convertToTensor(x, 'x', 'localResponseNormalization');
+ assert($x.rank === 4 || $x.rank === 3, function () { return "Error in localResponseNormalization: x must be rank 3 or 4 but got\n rank " + $x.rank + "."; });
+ assert(isInt(depthRadius), function () { return "Error in localResponseNormalization: depthRadius must be an " +
+ ("integer but got depthRadius " + depthRadius + "."); });
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ var inputs = { x: x4D };
+ var attrs = { depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(LRN, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ else {
+ return res;
+ }
+ }
+ var localResponseNormalization = op({ localResponseNormalization_: localResponseNormalization_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes natural logarithm of the input `tf.Tensor` element-wise: `ln(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.E]);
+ *
+ * x.log().print(); // or tf.log(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function log_(x) {
+ var $x = convertToTensor(x, 'x', 'log', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Log, inputs);
+ }
+ var log = op({ log_: log_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes natural logarithm of the input `tf.Tensor` plus one
+ * element-wise: `ln(1 + x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.E - 1]);
+ *
+ * x.log1p().print(); // or tf.log1p(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function log1p_(x) {
+ var $x = convertToTensor(x, 'x', 'log1p');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Log1p, inputs);
+ }
+ var log1p = op({ log1p_: log1p_ });
+
+ /**
+ * Provided `f(x)`, returns another function `g(x, dy?)`, which gives the
+ * gradient of `f(x)` with respect to `x`.
+ *
+ * If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to
+ * `x` is computed instead. `f(x)` must take a single tensor `x` and return a
+ * single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead.
+ *
+ * ```js
+ * // f(x) = x ^ 2
+ * const f = x => x.square();
+ * // f'(x) = 2x
+ * const g = tf.grad(f);
+ *
+ * const x = tf.tensor1d([2, 3]);
+ * g(x).print();
+ * ```
+ *
+ * ```js
+ * // f(x) = x ^ 3
+ * const f = x => x.pow(tf.scalar(3, 'int32'));
+ * // f'(x) = 3x ^ 2
+ * const g = tf.grad(f);
+ * // f''(x) = 6x
+ * const gg = tf.grad(g);
+ *
+ * const x = tf.tensor1d([2, 3]);
+ * gg(x).print();
+ * ```
+ *
+ * @param f The function f(x), to compute gradient for.
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function grad(f) {
+ assert(isFunction(f), function () { return 'The f passed in grad(f) must be a function'; });
+ return function (x, dy) {
+ // x can be of any dtype, thus null as the last argument.
+ var $x = convertToTensor(x, 'x', 'tf.grad', 'string_or_numeric');
+ var $dy = (dy != null) ? convertToTensor(dy, 'dy', 'tf.grad') : null;
+ return ENGINE.tidy(function () {
+ var _a = ENGINE.gradients(function () { return f($x); }, [$x], $dy), value = _a.value, grads = _a.grads;
+ if ($dy != null) {
+ assertShapesMatch(value.shape, $dy.shape, 'The shape of dy passed in grad(f)(x, dy) must match the shape ' +
+ 'returned by f(x)');
+ }
+ checkGrads(grads);
+ return grads[0];
+ });
+ };
+ }
+ /**
+ * Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`,
+ * which gives an array of gradients of `f()` with respect to each input
+ * [`x1`,`x2`,...].
+ *
+ * If `dy` is passed when calling `g()`, the gradient of
+ * `f(x1,...).mul(dy).sum()` with respect to each input is computed instead.
+ * The provided `f` must take one or more tensors and return a single tensor
+ * `y`. If `f()` takes a single input, we recommend using `tf.grad` instead.
+ *
+ * ```js
+ * // f(a, b) = a * b
+ * const f = (a, b) => a.mul(b);
+ * // df / da = b, df / db = a
+ * const g = tf.grads(f);
+ *
+ * const a = tf.tensor1d([2, 3]);
+ * const b = tf.tensor1d([-2, -3]);
+ * const [da, db] = g([a, b]);
+ * console.log('da');
+ * da.print();
+ * console.log('db');
+ * db.print();
+ * ```
+ *
+ * @param f The function `f(x1, x2,...)` to compute gradients for.
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function grads(f) {
+ assert(isFunction(f), function () { return 'The f passed in grads(f) must be a function'; });
+ return function (args, dy) {
+ assert(Array.isArray(args), function () { return 'The args passed in grads(f)(args) must be an array ' +
+ 'of `Tensor`s or `TensorLike`s'; });
+ // args can be of any dtype, thus null as the last argument.
+ var $args = convertToTensorArray(args, 'args', 'tf.grads', 'string_or_numeric');
+ var $dy = (dy != null) ? convertToTensor(dy, 'dy', 'tf.grads') : null;
+ return ENGINE.tidy(function () {
+ var _a = ENGINE.gradients(function () { return f.apply(void 0, __spread($args)); }, $args, $dy), value = _a.value, grads = _a.grads;
+ if ($dy != null) {
+ assertShapesMatch(value.shape, $dy.shape, 'The shape of dy passed in grads(f)([x1,...], dy) must ' +
+ 'match the shape returned by f([x1,...])');
+ }
+ checkGrads(grads);
+ return grads;
+ });
+ };
+ }
+ /**
+ * Like `tf.grad`, but also returns the value of `f()`. Useful when `f()`
+ * returns a metric you want to show.
+ *
+ * The result is a rich object with the following properties:
+ * - grad: The gradient of `f(x)` w.r.t `x` (result of `tf.grad`).
+ * - value: The value returned by `f(x)`.
+ *
+ * ```js
+ * // f(x) = x ^ 2
+ * const f = x => x.square();
+ * // f'(x) = 2x
+ * const g = tf.valueAndGrad(f);
+ *
+ * const x = tf.tensor1d([2, 3]);
+ * const {value, grad} = g(x);
+ *
+ * console.log('value');
+ * value.print();
+ * console.log('grad');
+ * grad.print();
+ * ```
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function valueAndGrad(f) {
+ assert(isFunction(f), function () { return 'The f passed in valueAndGrad(f) must be a function'; });
+ return function (x, dy) {
+ assert(x instanceof Tensor, function () { return 'The x passed in valueAndGrad(f)(x) must be a tensor'; });
+ assert(dy == null || dy instanceof Tensor, function () { return 'The dy passed in valueAndGrad(f)(x, dy) must be a tensor'; });
+ var _a = ENGINE.gradients(function () { return f(x); }, [x], dy), grads = _a.grads, value = _a.value;
+ checkGrads(grads);
+ return { grad: grads[0], value: value };
+ };
+ }
+ /**
+ * Like `tf.grads`, but returns also the value of `f()`. Useful when `f()`
+ * returns a metric you want to show.
+ *
+ * The result is a rich object with the following properties:
+ * - grads: The gradients of `f()` w.r.t each input (result of `tf.grads`).
+ * - value: The value returned by `f(x)`.
+ *
+ * ```js
+ * // f(a, b) = a * b
+ * const f = (a, b) => a.mul(b);
+ * // df/da = b, df/db = a
+ * const g = tf.valueAndGrads(f);
+ *
+ * const a = tf.tensor1d([2, 3]);
+ * const b = tf.tensor1d([-2, -3]);
+ * const {value, grads} = g([a, b]);
+ *
+ * const [da, db] = grads;
+ *
+ * console.log('value');
+ * value.print();
+ *
+ * console.log('da');
+ * da.print();
+ * console.log('db');
+ * db.print();
+ * ```
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function valueAndGrads(f) {
+ assert(isFunction(f), function () { return 'The f passed in valueAndGrads(f) must be a function'; });
+ return function (args, dy) {
+ assert(Array.isArray(args) && args.every(function (arg) { return arg instanceof Tensor; }), function () { return 'The args passed in valueAndGrads(f)(args) must be array of ' +
+ 'tensors'; });
+ assert(dy == null || dy instanceof Tensor, function () { return 'The dy passed in valueAndGrads(f)(args, dy) must be a tensor'; });
+ var res = ENGINE.gradients(function () { return f.apply(void 0, __spread(args)); }, args, dy);
+ if (dy != null) {
+ assertShapesMatch(res.value.shape, dy.shape, 'The shape of dy passed in valueAndGrads(f)([x1,...], dy) must ' +
+ 'match the shape returned by f([x1,...])');
+ }
+ checkGrads(res.grads);
+ return res;
+ };
+ }
+ /**
+ * Computes and returns the gradient of f(x) with respect to the list of
+ * trainable variables provided by `varList`. If no list is provided, it
+ * defaults to all trainable variables.
+ *
+ * ```js
+ * const a = tf.variable(tf.tensor1d([3, 4]));
+ * const b = tf.variable(tf.tensor1d([5, 6]));
+ * const x = tf.tensor1d([1, 2]);
+ *
+ * // f(a, b) = a * x ^ 2 + b * x
+ * const f = () => a.mul(x.square()).add(b.mul(x)).sum();
+ * // df/da = x ^ 2, df/db = x
+ * const {value, grads} = tf.variableGrads(f);
+ *
+ * Object.keys(grads).forEach(varName => grads[varName].print());
+ * ```
+ *
+ * @param f The function to execute. f() should return a scalar.
+ * @param varList The list of variables to compute the gradients with respect
+ * to. Defaults to all trainable variables.
+ * @returns An object with the following keys and values:
+ * - `value`: The value of the function `f`.
+ * - `grads`: A map from the names of the variables to the gradients.
+ * If the `varList` argument is provided explicitly and contains a subset of
+ * non-trainable variables, this map in the return value will contain keys
+ * that map the names of the non-trainable variables to `null`.
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function variableGrads(f, varList) {
+ assert(isFunction(f), function () { return 'The f passed in variableGrads(f) must be a function'; });
+ assert(varList == null ||
+ Array.isArray(varList) && varList.every(function (v) { return v instanceof Variable; }), function () { return 'The varList passed in variableGrads(f, varList) must be an array ' +
+ 'of variables'; });
+ var specifiedVarList = varList != null;
+ if (!specifiedVarList) {
+ // Get all of the trainable variables.
+ varList = [];
+ for (var varName in ENGINE.registeredVariables) {
+ varList.push(ENGINE.registeredVariables[varName]);
+ }
+ }
+ var specifiedNonTrainable = specifiedVarList ? varList.filter(function (variable) { return !variable.trainable; }) : null;
+ // Prune non-trainable variables.
+ var originalVarCount = varList.length;
+ varList = varList.filter(function (variable) { return variable.trainable; });
+ assert(varList.length > 0, function () { return "variableGrads() expects at least one of the input variables to " +
+ ("be trainable, but none of the " + originalVarCount + " variables is ") +
+ "trainable."; });
+ var allowNoGradients = true;
+ var _a = ENGINE.gradients(f, varList, null, allowNoGradients), value = _a.value, grads = _a.grads;
+ assert(grads.some(function (g) { return g != null; }), function () { return 'Cannot find a connection between any variable and the result of ' +
+ 'the loss function y=f(x). Please make sure the operations that ' +
+ 'use variables are inside the function f passed to minimize().'; });
+ assert(value.rank === 0, function () { return "The f passed in variableGrads(f) must return a scalar, but it " +
+ ("returned a rank-" + value.rank + " tensor"); });
+ var namedGrads = {};
+ varList.forEach(function (v, i) {
+ if (grads[i] != null) {
+ namedGrads[v.name] = grads[i];
+ }
+ });
+ if (specifiedNonTrainable != null) {
+ // If varList is explicitly provided and contains non-trainable values,
+ // add them to the returned gradients with `null` values.
+ specifiedNonTrainable.forEach(function (v) { return namedGrads[v.name] = null; });
+ }
+ return { value: value, grads: namedGrads };
+ }
+ /**
+ * Overrides the gradient computation of a function `f`.
+ *
+ * Takes a function
+ * `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}`
+ * and returns another function `g(...inputs)` which takes the same inputs as
+ * `f`. When called, `g` returns `f().value`. In backward mode, custom gradients
+ * with respect to each input of `f` are computed using `f().gradFunc`.
+ *
+ * The `save` function passsed to `f` should be used for saving tensors needed
+ * in the gradient. And the `saved` passed to the `gradFunc` is a
+ * `NamedTensorMap`, which contains those saved tensor.
+ *
+ * ```js
+ * const customOp = tf.customGrad((x, save) => {
+ * // Save x to make sure it's available later for the gradient.
+ * save([x]);
+ * // Override gradient of our custom x ^ 2 op to be dy * abs(x);
+ * return {
+ * value: x.square(),
+ * // Note `saved.x` which points to the `x` we saved earlier.
+ * gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]
+ * };
+ * });
+ *
+ * const x = tf.tensor1d([-1, -2, 3]);
+ * const dx = tf.grad(x => customOp(x));
+ *
+ * console.log(`f(x):`);
+ * customOp(x).print();
+ * console.log(`f'(x):`);
+ * dx(x).print();
+ * ```
+ *
+ * @param f The function to evaluate in forward mode, which should return
+ * `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc`
+ * returns the custom gradients of `f` with respect to its inputs.
+ *
+ * @doc {heading: 'Training', subheading: 'Gradients'}
+ */
+ function customGrad(f) {
+ return ENGINE.customGrad(f);
+ }
+ function checkGrads(grads) {
+ var numNullGradients = grads.filter(function (g) { return g == null; }).length;
+ if (numNullGradients > 0) {
+ throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that\n the f you passed encloses all operations that lead from x to y.");
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes `-1 * x` element-wise.
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);
+ *
+ * x.neg().print(); // or tf.neg(x)
+ * ```
+ *
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function neg_(x) {
+ var $x = convertToTensor(x, 'x', 'neg');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Neg, inputs);
+ }
+ var neg = op({ neg_: neg_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes softplus of the input `tf.Tensor` element-wise: `log(exp(x) + 1)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.softplus().print(); // or tf.softplus(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function softplus_(x) {
+ var $x = convertToTensor(x, 'x', 'softplus');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Softplus, inputs);
+ }
+ var softplus = op({ softplus_: softplus_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes log sigmoid of the input `tf.Tensor` element-wise:
+ * `logSigmoid(x)`. For numerical stability, we use `-tf.softplus(-x)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.logSigmoid().print(); // or tf.logSigmoid(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function logSigmoid_(x) {
+ var $x = convertToTensor(x, 'x', 'logSigmoid');
+ // Use a custom gradient to maintain previous implementation.
+ // There is no LogSigmoid kernel in TF so we can't use engine.runKernel
+ // directly
+ var customOp = customGrad(function (x) {
+ // TODO(yassogba) we can remove the chained softplus call here only
+ // after backends have modualrized softplus at which point we can call
+ // engine runKernel(..., Sotfplus, ...) directly.
+ var value = neg(softplus(neg(x)));
+ var gradFunc = function (dy) {
+ var derX = mul(dy, sigmoid(neg(x)));
+ return derX;
+ };
+ return { value: value, gradFunc: gradFunc };
+ });
+ return customOp($x);
+ }
+ var logSigmoid = op({ logSigmoid_: logSigmoid_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the maximum of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and an
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.max().print(); // or tf.max(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.max(axis).print(); // or tf.max(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function max_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'max');
+ var inputs = { x: $x };
+ var attrs = { reductionIndices: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Max, inputs, attrs);
+ }
+ var max = op({ max_: max_ });
+
+ /**
+ * Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * a.sub(b).print(); // or tf.sub(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast subtract a with b.
+ * const a = tf.tensor1d([10, 20, 30, 40]);
+ * const b = tf.scalar(5);
+ *
+ * a.sub(b).print(); // or tf.sub(a, b)
+ * ```
+ * @param a The first `tf.Tensor` to subtract from.
+ * @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as
+ * `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function sub_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'sub');
+ var $b = convertToTensor(b, 'b', 'sub');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Sub, inputs);
+ }
+ var sub = op({ sub_: sub_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the sum of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If axes has no entries, all dimensions are reduced, and a
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.sum().print(); // or tf.sum(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.sum(axis).print(); // or tf.sum(x, axis)
+ * ```
+ *
+ * @param x The input tensor to compute the sum over. If the dtype is `bool`
+ * it will be converted to `int32` and the output dtype will be `int32`.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function sum_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'sum');
+ if ($x.dtype === 'bool') {
+ $x = cast($x, 'int32');
+ }
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Sum, inputs, attrs);
+ }
+ var sum = op({ sum_: sum_ });
+
+ /**
+ * Computes the log softmax.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ *
+ * a.logSoftmax().print(); // or tf.logSoftmax(a)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
+ *
+ * a.logSoftmax().print(); // or tf.logSoftmax(a)
+ * ```
+ *
+ * @param logits The logits array.
+ * @param axis The dimension softmax would be performed on. Defaults to `-1`
+ * which indicates the last dimension.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function logSoftmax_(logits, axis) {
+ if (axis === void 0) { axis = -1; }
+ var $logits = convertToTensor(logits, 'logits', 'logSoftmax');
+ if (axis === -1) {
+ axis = $logits.rank - 1;
+ }
+ if (axis !== $logits.rank - 1) {
+ throw Error('Log Softmax along a non-last dimension is not yet supported. ' +
+ ("Logits was rank " + $logits.rank + " and axis was " + axis));
+ }
+ // const forward: ForwardFunc<Tensor> = (backend, save) => {
+ // const keepDims = true;
+ // const xMax = max(logits, axis, true);
+ // const shifted = sub(logits, xMax);
+ // const value =
+ // sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis,
+ // keepDims)));
+ // save([value]);
+ // return value;
+ // };
+ // Use a custom gradient for numerical stability.
+ var customOp = customGrad(function (logits, save) {
+ var keepDims = true;
+ var xMax = max(logits, axis, true);
+ var shifted = sub(logits, xMax);
+ var value = sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis, keepDims)));
+ save([value]);
+ var gradFunc = function (dy, saved) {
+ var _a = __read(saved, 1), value = _a[0];
+ var keepDims = true;
+ var softmax = exp(value);
+ return sub(dy, mul(sum(dy, axis, keepDims), softmax));
+ };
+ return { value: value, gradFunc: gradFunc };
+ });
+ return customOp($logits);
+ // TODO Use Engine.runKernel when CPU/WebGL/WASM backends implement this.
+ // const inputs: LogSoftmaxInputs = {logits: $logits};
+ // const attrs: LogSoftmaxAttrs = {axis};
+ // return ENGINE.runKernel(
+ // LogSoftmax, inputs as {} as NamedTensorMap,
+ // attrs as {} as NamedAttrMap);
+ }
+ var logSoftmax = op({ logSoftmax_: logSoftmax_ });
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns true if the axis specifies the inner most dimensions of the
+ * array.
+ */
+ function axesAreInnerMostDims(axes, rank) {
+ for (var i = 0; i < axes.length; ++i) {
+ if (axes[axes.length - i - 1] !== rank - 1 - i) {
+ return false;
+ }
+ }
+ return true;
+ }
+ function combineLocations(outputLoc, reduceLoc, axes) {
+ var rank = outputLoc.length + reduceLoc.length;
+ var loc = [];
+ var outIdx = 0;
+ var reduceIdx = 0;
+ for (var dim = 0; dim < rank; dim++) {
+ if (axes.indexOf(dim) === -1) {
+ loc.push(outputLoc[outIdx++]);
+ }
+ else {
+ loc.push(reduceLoc[reduceIdx++]);
+ }
+ }
+ return loc;
+ }
+ function computeOutAndReduceShapes(aShape, axes) {
+ var outShape = [];
+ var rank = aShape.length;
+ for (var dim = 0; dim < rank; dim++) {
+ if (axes.indexOf(dim) === -1) {
+ outShape.push(aShape[dim]);
+ }
+ }
+ var reduceShape = axes.map(function (dim) { return aShape[dim]; });
+ return [outShape, reduceShape];
+ }
+ function expandShapeToKeepDim(shape, axes) {
+ var reduceSubShape = axes.map(function (x) { return 1; });
+ return combineLocations(shape, reduceSubShape, axes);
+ }
+ function assertAxesAreInnerMostDims(msg, axes, rank) {
+ assert(axesAreInnerMostDims(axes, rank), function () { return msg + " supports only inner-most axes for now. " +
+ ("Got axes " + axes + " and rank-" + rank + " input."); });
+ }
+ /**
+ * Returns the axes permutation to be used with `tf.transpose`, if such
+ * permutation is necessary. Otherwise it returns null. This method is used by
+ * operations that operate only on inner-most axes.
+ */
+ function getAxesPermutation(axes, rank) {
+ if (axesAreInnerMostDims(axes, rank)) {
+ return null;
+ }
+ var result = [];
+ for (var i = 0; i < rank; ++i) {
+ if (axes.indexOf(i) === -1) {
+ result.push(i);
+ }
+ }
+ axes.forEach(function (axis) { return result.push(axis); });
+ return result;
+ }
+ /** Returns the axes permutation that undoes the original permutation. */
+ function getUndoAxesPermutation(axes) {
+ return axes.map(function (axis, i) { return [i, axis]; })
+ .sort(function (a, b) { return a[1] - b[1]; })
+ .map(function (x) { return x[0]; });
+ }
+ function getInnerMostAxes(numAxes, rank) {
+ var res = [];
+ for (var i = rank - numAxes; i < rank; ++i) {
+ res.push(i);
+ }
+ return res;
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the log(sum(exp(elements across the reduction dimensions)).
+ *
+ * Reduces the input along the dimensions given in `axis`. Unless `keepDims`
+ * is true, the rank of the array is reduced by 1 for each entry in `axis`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axis` has no entries, all dimensions are reduced, and an array with a
+ * single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.logSumExp().print(); // or tf.logSumExp(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.logSumExp(axis).print(); // or tf.logSumExp(a, axis)
+ * ```
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. If null (the default),
+ * reduces all dimensions.
+ * @param keepDims If true, retains reduced dimensions with length
+ * of 1. Defaults to false.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function logSumExp_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'logSumExp');
+ var axes = parseAxisParam(axis, $x.shape);
+ var xMax = max($x, axes, true /* keepDims */);
+ var a = sub($x, xMax);
+ var b = exp(a);
+ var c = sum(b, axes);
+ var d = log(c);
+ var res = add(reshape(xMax, d.shape), d);
+ if (keepDims) {
+ var newShape = expandShapeToKeepDim(res.shape, axes);
+ return reshape(res, newShape);
+ }
+ return res;
+ }
+ var logSumExp = op({ logSumExp_: logSumExp_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `a AND b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalAnd(b).print();
+ * ```
+ *
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalAnd_(a, b) {
+ var $a = convertToTensor(a, 'a', 'logicalAnd', 'bool');
+ var $b = convertToTensor(b, 'b', 'logicalAnd', 'bool');
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(LogicalAnd, inputs);
+ }
+ var logicalAnd = op({ logicalAnd_: logicalAnd_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `NOT x` element-wise.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, true], 'bool');
+ *
+ * a.logicalNot().print();
+ * ```
+ *
+ * @param x The input tensor. Must be of dtype 'bool'.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalNot_(x) {
+ var $x = convertToTensor(x, 'x', 'logicalNot', 'bool');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(LogicalNot, inputs);
+ }
+ var logicalNot = op({ logicalNot_: logicalNot_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `a OR b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalOr(b).print();
+ * ```
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalOr_(a, b) {
+ var $a = convertToTensor(a, 'a', 'logicalOr', 'bool');
+ var $b = convertToTensor(b, 'b', 'logicalOr', 'bool');
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(LogicalOr, inputs);
+ }
+ var logicalOr = op({ logicalOr_: logicalOr_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the truth value of `a XOR b` element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([false, false, true, true], 'bool');
+ * const b = tf.tensor1d([false, true, false, true], 'bool');
+ *
+ * a.logicalXor(b).print();
+ * ```
+ *
+ * @param a The first input tensor. Must be of dtype bool.
+ * @param b The second input tensor. Must be of dtype bool.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function logicalXor_(a, b) {
+ var $a = convertToTensor(a, 'a', 'logicalXor', 'bool');
+ var $b = convertToTensor(b, 'b', 'logicalXor', 'bool');
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ // x ^ y = (x | y) & ~(x & y)
+ return logicalAnd(logicalOr(a, b), logicalNot(logicalAnd(a, b)));
+ }
+ var logicalXor = op({ logicalXor_: logicalXor_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 2D max pooling of an image.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in dilated pooling. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function maxPool_(x, filterSize, strides, pad, dimRoundingMode) {
+ var $x = convertToTensor(x, 'x', 'maxPool');
+ var dilations = 1;
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in maxPool: input must be rank 4 but got rank " + x4D.rank + "."; });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in maxPool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ checkPadOnDimRoundingMode('maxPool', pad, dimRoundingMode);
+ var inputs = { x: x4D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(MaxPool, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var maxPool = op({ maxPool_: maxPool_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 3D max pooling.
+ *
+ * ```js
+ * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);
+ * const result = tf.maxPool3d(x, 2, 1, 'valid');
+ * result.print();
+ * ```
+ *
+ * @param x The input tensor, of rank 5 or rank 4 of shape
+ * `[batch, depth, height, width, inChannels]`.
+ * @param filterSize The filter size:
+ * `[filterDepth, filterHeight, filterWidth]`.
+ * If `filterSize` is a single number,
+ * then `filterDepth == filterHeight == filterWidth`.
+ * @param strides The strides of the pooling:
+ * `[strideDepth, strideHeight, strideWidth]`.
+ * If `strides` is a single number,
+ * then `strideDepth == strideHeight == strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1*1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function maxPool3d_(x, filterSize, strides, pad, dimRoundingMode, dataFormat) {
+ if (filterSize === void 0) { filterSize = [1, 1, 1]; }
+ if (dataFormat === void 0) { dataFormat = 'NDHWC'; }
+ var $x = convertToTensor(x, 'x', 'maxPool3d');
+ var x5D = $x;
+ var reshapedTo5D = false;
+ if ($x.rank === 4) {
+ reshapedTo5D = true;
+ x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);
+ }
+ assert(x5D.rank === 5, function () { return "Error in maxPool3d: x must be rank 5 but got rank " + x5D.rank + "."; });
+ assert(dataFormat === 'NDHWC', function () { return "Error in maxPool3d: Only NDHWC is currently supported, " +
+ ("but got dataFormat of " + dataFormat); });
+ checkPadOnDimRoundingMode('maxPool3d', pad, dimRoundingMode);
+ var inputs = { x: x5D };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dataFormat: dataFormat };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(MaxPool3D, inputs, attrs);
+ if (reshapedTo5D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]);
+ }
+ return res;
+ }
+ var maxPool3d = op({ maxPool3d_: maxPool3d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the 2D max pooling of an image with Argmax index.
+ * The indices in argmax are flattened, so that a maximum value at position `[b,
+ * y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
+ * include_batch_in_index is False; `((b * height + y) * width + x) * channels
+ * +c` if include_batch_in_index is True.
+ *
+ * The indices returned are always in `[0, height) x [0, width)` before
+ * flattening.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param filterSize The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
+ * "NDHWC". Specify the data format of the input and output data. With the
+ * default format "NDHWC", the data is stored in the order of: [batch,
+ * depth, height, width, channels]. Only "NDHWC" is currently supported.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param includeBatchIndex Defaults to False. Whether to include batch
+ * dimension in flattened index of argmax.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function maxPoolWithArgmax_(x, filterSize, strides, pad, includeBatchInIndex) {
+ if (includeBatchInIndex === void 0) { includeBatchInIndex = false; }
+ var $x = convertToTensor(x, 'x', 'maxPoolWithArgmax');
+ var inputs = { x: $x };
+ var attrs = { filterSize: filterSize, strides: strides, pad: pad, includeBatchInIndex: includeBatchInIndex };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var result = ENGINE.runKernel(MaxPoolWithArgmax, inputs, attrs);
+ return { result: result[0], indexes: result[1] };
+ }
+ var maxPoolWithArgmax = op({ maxPoolWithArgmax_: maxPoolWithArgmax_ });
+
+ /**
+ * Returns the max of a and b (`a > b ? a : b`) element-wise.
+ * Supports broadcasting.
+ *
+ * We also expose `tf.maximumStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.maximum(b).print(); // or tf.maximum(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast maximum a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.maximum(b).print(); // or tf.maximum(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function maximum_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'maximum');
+ var $b = convertToTensor(b, 'b', 'maximum');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ if ($a.dtype === 'bool') {
+ $a = cast($a, 'int32');
+ $b = cast($b, 'int32');
+ }
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Maximum, inputs);
+ }
+ var maximum = op({ maximum_: maximum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the mean of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is
+ * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with
+ * a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.mean().print(); // or tf.mean(a)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.mean(axis).print(); // or tf.mean(x, axis)
+ * ```
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function mean_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'mean');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Mean, inputs, attrs);
+ }
+ var mean = op({ mean_: mean_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 0.
+ *
+ * ```js
+ * tf.zeros([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The type of an element in the resulting tensor. Can
+ * be 'float32', 'int32' or 'bool'. Defaults to 'float'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function zeros(shape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (dtype === 'complex64') {
+ var real = zeros(shape, 'float32');
+ var imag = zeros(shape, 'float32');
+ return complex(real, imag);
+ }
+ var values = makeZerosTypedArray(sizeFromShape(shape), dtype);
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 1.
+ *
+ * ```js
+ * tf.ones([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param dtype The type of an element in the resulting tensor. Defaults to
+ * 'float'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function ones(shape, dtype) {
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (dtype === 'complex64') {
+ var real = ones(shape, 'float32');
+ var imag = zeros(shape, 'float32');
+ return complex(real, imag);
+ }
+ var values = makeOnesTypedArray(sizeFromShape(shape), dtype);
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Broadcasts parameters for evaluation on an N-D grid.
+ *
+ * Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
+ * of N-D coordinate arrays for evaluating expressions on an N-D grid.
+ *
+ * Notes:
+ * `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
+ * When the `indexing` argument is set to 'xy' (the default), the broadcasting
+ * instructions for the first two dimensions are swapped.
+ * Examples:
+ * Calling `const [X, Y] = meshgrid(x, y)` with the tensors
+ *
+ * ```javascript
+ * const x = [1, 2, 3];
+ * const y = [4, 5, 6];
+ * const [X, Y] = tf.meshgrid(x, y);
+ * // X = [[1, 2, 3],
+ * // [1, 2, 3],
+ * // [1, 2, 3]]
+ * // Y = [[4, 4, 4],
+ * // [5, 5, 5],
+ * // [6, 6, 6]]
+ * ```
+ *
+ * @param x Tensor with rank geq 1.
+ * @param y Tensor with rank geq 1.
+ * @param indexing
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function meshgrid(x, y, _a) {
+ var _b = (_a === void 0 ? {} : _a).indexing, indexing = _b === void 0 ? 'xy' : _b;
+ if (indexing !== 'xy' && indexing !== 'ij') {
+ throw new TypeError(indexing + " is not a valid third argument to meshgrid");
+ }
+ if (x === undefined) {
+ return [];
+ }
+ var $x = convertToTensor(x, 'x', 'meshgrid', x instanceof Tensor ? x.dtype : 'float32');
+ if (y === undefined) {
+ return [$x];
+ }
+ var $y = convertToTensor(y, 'y', 'meshgrid', y instanceof Tensor ? y.dtype : 'float32');
+ var w = sizeFromShape($x.shape);
+ var h = sizeFromShape($y.shape);
+ if (indexing === 'xy') {
+ $x = reshape($x, [1, -1]);
+ $y = reshape($y, [-1, 1]);
+ return [
+ matMul$1(ones([h, 1], $x.dtype), $x),
+ matMul$1($y, ones([1, w], $y.dtype)),
+ ];
+ }
+ $x = reshape($x, [-1, 1]);
+ $y = reshape($y, [1, -1]);
+ return [
+ matMul$1($x, ones([1, h], $x.dtype)),
+ matMul$1(ones([w, 1], $y.dtype), $y),
+ ];
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the minimum value from the input.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the array is reduced by 1 for each entry in `axes`.
+ * If `keepDims` is true, the reduced dimensions are retained with length 1.
+ * If `axes` has no entries, all dimensions are reduced, and an array with a
+ * single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.min().print(); // or tf.min(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.min(axis).print(); // or tf.min(x, axis)
+ * ```
+ *
+ * @param x The input Tensor.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function min_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'min');
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(Min, inputs, attrs);
+ }
+ var min = op({ min_: min_ });
+
+ /**
+ * Returns the min of a and b (`a < b ? a : b`) element-wise.
+ * Supports broadcasting.
+ *
+ * We also expose `minimumStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.minimum(b).print(); // or tf.minimum(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast minimum a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.minimum(b).print(); // or tf.minimum(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function minimum_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'minimum');
+ var $b = convertToTensor(b, 'b', 'minimum');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ if ($a.dtype === 'bool') {
+ $a = cast($a, 'int32');
+ $b = cast($b, 'int32');
+ }
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Minimum, inputs);
+ }
+ var minimum = op({ minimum_: minimum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Pads a `tf.Tensor` using mirror padding.
+ *
+ * This operation implements the `REFLECT` and `SYMMETRIC` modes of pad.
+ *
+ * ```js
+ * const x = tf.range(0, 9).reshape([1, 1, 3, 3]);
+ * x.mirrorPad([[0, 0], [0, 0], [2, 2], [2, 2]], 'reflect').print();
+ * ```
+ * @param x The tensor to pad.
+ * @param paddings An array of length `R` (the rank of the tensor), where
+ * each element is a length-2 tuple of ints `[padBefore, padAfter]`,
+ * specifying how much to pad along each dimension of the tensor.
+ * In "reflect" mode, the padded regions do not include the borders,
+ * while in "symmetric" mode the padded regions do include the borders.
+ * For example, if the input is `[1, 2, 3]` and paddings is `[0, 2]`,
+ * then the output is `[1, 2, 3, 2, 1]` in "reflect" mode, and
+ * `[1, 2, 3, 3, 2]` in "symmetric" mode.
+ * If `mode` is "reflect" then both `paddings[D, 0]` and `paddings[D, 1]`
+ * must be no greater than `x.shape[D] - 1`. If mode is "symmetric"
+ * then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than
+ * `x.shape[D]`
+ * @param mode String to specify padding mode. Can be `'reflect' | 'symmetric'`
+ */
+ /** @doc {heading: 'Tensors', subheading: 'Transformations'} */
+ function mirrorPad_(x, paddings, mode) {
+ assert(mode === 'reflect' || mode === 'symmetric', function () { return "Invalid mode. Mode must be either reflect or symmetric. " +
+ ("Got " + mode + "."); });
+ var $x = convertToTensor(x, 'x', 'mirrorPad');
+ if ($x.rank === 0) {
+ throw new Error('mirrorPad(scalar) is not defined. ' +
+ 'Pass non-scalar to mirrorPad');
+ }
+ assert(paddings.length === $x.rank, function () { return "Padding doesn't match input. Must be " + $x.rank + ". " +
+ ("Got " + paddings.length + "."); });
+ var shapeOffset = mode === 'reflect' ? 1 : 0;
+ var _loop_1 = function (i) {
+ assert(paddings[i].length === 2, function () { return "Invalid number of paddings. Must be length of 2 each."; });
+ assert(paddings[i][0] >= 0 && paddings[i][0] <= $x.shape[i] - shapeOffset &&
+ paddings[i][1] >= 0 && paddings[i][1] <= $x.shape[i] - shapeOffset, function () { return "Padding in dimension " + i + " cannot be greater than or equal " +
+ ("to " + ($x.shape[i] - shapeOffset) + " or less than 0 for input of ") +
+ ("shape " + $x.shape); });
+ };
+ for (var i = 0; i < $x.rank; i++) {
+ _loop_1(i);
+ }
+ var attrs = { paddings: paddings, mode: mode };
+ var inputs = { x: $x };
+ return ENGINE.runKernel(MirrorPad, inputs, attrs);
+ }
+ var mirrorPad = op({ mirrorPad_: mirrorPad_ });
+
+ /**
+ * Returns the mod of a and b element-wise.
+ * `floor(x / y) * y + mod(x, y) = x`
+ * Supports broadcasting.
+ *
+ * We also expose `tf.modStrict` which has the same signature as this op and
+ * asserts that `a` and `b` are the same shape (does not broadcast).
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.mod(b).print(); // or tf.mod(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast a mod b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.mod(b).print(); // or tf.mod(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function mod_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'mod');
+ var $b = convertToTensor(b, 'b', 'mod');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(Mod, inputs);
+ }
+ var mod = op({ mod_: mod_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes square of `x` element-wise: `x ^ 2`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);
+ *
+ * x.square().print(); // or tf.square(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function square_(x) {
+ var $x = convertToTensor(x, 'x', 'square');
+ var attrs = {};
+ return ENGINE.runKernel('Square', { x: $x }, attrs);
+ }
+ var square = op({ square_: square_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Calculates the mean and variance of `x`. The mean and variance are
+ * calculated by aggregating the contents of `x` across `axes`. If `x` is
+ * 1-D and `axes = [0]` this is just the mean and variance of a vector.
+ *
+ * @param x The input tensor.
+ * @param axis The dimension(s) along with to compute mean and
+ * variance. By default it reduces all dimensions.
+ * @param keepDims If true, the moments have the same dimensionality as the
+ * input.
+ * @return An object with two keys: `mean` and `variance`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function moments_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ x = convertToTensor(x, 'x', 'moments');
+ var axes = parseAxisParam(axis, x.shape);
+ var xMean = mean(x, axes, keepDims);
+ var keepDimsShape = xMean.shape;
+ if (!keepDims) {
+ keepDimsShape = expandShapeToKeepDim(xMean.shape, axes);
+ }
+ var devSquared = square(sub(cast(x, 'float32'), reshape(xMean, keepDimsShape)));
+ var variance = mean(devSquared, axes, keepDims);
+ return { mean: xMean, variance: variance };
+ }
+ var moments = op({ moments_: moments_ });
+
+ /**
+ * Computes the next states and outputs of a stack of LSTMCells.
+ *
+ * Each cell output is used as input to the next cell.
+ *
+ * Returns `[cellState, cellOutput]`.
+ *
+ * Derived from tf.contrib.rn.MultiRNNCell.
+ *
+ * @param lstmCells Array of LSTMCell functions.
+ * @param data The input to the cell.
+ * @param c Array of previous cell states.
+ * @param h Array of previous cell outputs.
+ *
+ * @doc {heading: 'Operations', subheading: 'RNN'}
+ */
+ function multiRNNCell_(lstmCells, data, c, h) {
+ var $data = convertToTensor(data, 'data', 'multiRNNCell');
+ var $c = convertToTensorArray(c, 'c', 'multiRNNCell');
+ var $h = convertToTensorArray(h, 'h', 'multiRNNCell');
+ var input = $data;
+ var newStates = [];
+ for (var i = 0; i < lstmCells.length; i++) {
+ var output = lstmCells[i](input, $c[i], $h[i]);
+ newStates.push(output[0]);
+ newStates.push(output[1]);
+ input = output[1];
+ }
+ var newC = [];
+ var newH = [];
+ for (var i = 0; i < newStates.length; i += 2) {
+ newC.push(newStates[i]);
+ newH.push(newStates[i + 1]);
+ }
+ return [newC, newH];
+ }
+ var multiRNNCell = op({ multiRNNCell_: multiRNNCell_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values drawn from a multinomial distribution.
+ *
+ * ```js
+ * const probs = tf.tensor([.75, .25]);
+ * tf.multinomial(probs, 3).print();
+ * ```
+ *
+ * @param logits 1D array with unnormalized log-probabilities, or
+ * 2D array of shape `[batchSize, numOutcomes]`. See the `normalized`
+ * parameter.
+ * @param numSamples Number of samples to draw for each row slice.
+ * @param seed The seed number.
+ * @param normalized Whether the provided `logits` are normalized true
+ * probabilities (sum to 1). Defaults to false.
+ * @return 1D array of shape `[numSamples]`, or 2D array of shape
+ * `[batchSize, numSamples]`, depending on the rank of the input.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function multinomial_(logits, numSamples, seed, normalized) {
+ if (normalized === void 0) { normalized = false; }
+ var $logits = convertToTensor(logits, 'logits', 'multinomial');
+ var numOutcomes = $logits.size;
+ var origRank = $logits.rank;
+ if (numOutcomes < 2) {
+ throw new Error("Error in multinomial: you need at least 2 outcomes, but got " +
+ (numOutcomes + "."));
+ }
+ if (origRank > 2) {
+ throw new Error("Rank of probabilities must be 1 or 2, but is " + origRank);
+ }
+ // TODO(lina128): Investigate correct seed behavior. The code seems not allow
+ // setting see to 0.
+ seed = seed || Math.random();
+ // The kernel only accepts (and returns) rank 2 tensors.
+ var logits2D = origRank === 1 ? reshape($logits, [1, -1]) : $logits;
+ var inputs = { logits: logits2D };
+ var attrs = { numSamples: numSamples, seed: seed, normalized: normalized };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(Multinomial, inputs, attrs);
+ // tslint:disable-next-line:no-unnecessary-type-assertion
+ return origRank === 1 ? reshape(res, [res.size]) : res;
+ }
+ var multinomial = op({ multinomial_: multinomial_ });
+
+ /**
+ * Returns the truth value of (a != b) element-wise. Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([0, 2, 3]);
+ *
+ * a.notEqual(b).print();
+ * ```
+ * @param a The first input tensor.
+ * @param b The second input tensor. Must have the same dtype as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function notEqual_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'notEqual', 'string_or_numeric');
+ var $b = convertToTensor(b, 'b', 'notEqual', 'string_or_numeric');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ return ENGINE.runKernel(NotEqual, inputs);
+ }
+ var notEqual = op({ notEqual_: notEqual_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with all elements set to 1 with the same shape as the
+ * given tensor.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2]);
+ * tf.onesLike(x).print();
+ * ```
+ * @param x A tensor.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function onesLike_(x) {
+ var $x = convertToTensor(x, 'x', 'onesLike');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(OnesLike, inputs);
+ }
+ var onesLike = op({ onesLike_: onesLike_ });
+
+ /**
+ * Computes the outer product of two vectors, `v1` and `v2`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ * const b = tf.tensor1d([3, 4, 5]);
+ *
+ * tf.outerProduct(a, b).print();
+ * ```
+ * @param v1 The first vector in the outer product operation.
+ * @param v2 The second vector in the outer product operation.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function outerProduct_(v1, v2) {
+ var $v1 = convertToTensor(v1, 'v1', 'outerProduct');
+ var $v2 = convertToTensor(v2, 'v2', 'outerProduct');
+ assert($v1.rank === 1 && $v2.rank === 1, function () { return "Error in outerProduct: inputs must be rank 1, but got ranks " +
+ ($v1.rank + " and " + $v2.rank + "."); });
+ var v12D = reshape($v1, [-1, 1]);
+ var v22D = reshape($v2, [1, -1]);
+ return matMul$1(v12D, v22D);
+ }
+ var outerProduct = op({ outerProduct_: outerProduct_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Pads a `tf.Tensor` with a given value and paddings.
+ *
+ * This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`,
+ * refer to `tf.mirrorPad`
+ *
+ * Also available are stricter rank-specific methods with the same signature
+ * as this method that assert that `paddings` is of given length.
+ * - `tf.pad1d`
+ * - `tf.pad2d`
+ * - `tf.pad3d`
+ * - `tf.pad4d`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * x.pad([[1, 2]]).print();
+ * ```
+ * @param x The tensor to pad.
+ * @param paddings An array of length `R` (the rank of the tensor), where
+ * each element is a length-2 tuple of ints `[padBefore, padAfter]`,
+ * specifying how much to pad along each dimension of the tensor.
+ * @param constantValue The pad value to use. Defaults to 0.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function pad_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ var $x = convertToTensor(x, 'x', 'pad');
+ if ($x.rank === 0) {
+ throw new Error('pad(scalar) is not defined. Pass non-scalar to pad');
+ }
+ var attrs = { paddings: paddings, constantValue: constantValue };
+ var inputs = { x: $x };
+ return ENGINE.runKernel(PadV2, inputs, attrs);
+ }
+ var pad = op({ pad_: pad_ });
+
+ /**
+ * Pads a `tf.Tensor1D` with a given value and paddings. See `pad` for details.
+ */
+ function pad1d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 2, function () { return 'Invalid number of paddings. Must be length of 2.'; });
+ return pad(x, [paddings], constantValue);
+ }
+ var pad1d = op({ pad1d_: pad1d_ });
+
+ /**
+ * Pads a `tf.Tensor2D` with a given value and paddings. See `pad` for details.
+ */
+ function pad2d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 2 && paddings[0].length === 2 &&
+ paddings[1].length === 2, function () { return 'Invalid number of paddings. Must be length of 2 each.'; });
+ return pad(x, paddings, constantValue);
+ }
+ var pad2d = op({ pad2d_: pad2d_ });
+
+ /**
+ * Pads a `tf.Tensor3D` with a given value and paddings. See `pad` for details.
+ */
+ function pad3d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 3 && paddings[0].length === 2 &&
+ paddings[1].length === 2 && paddings[2].length === 2, function () { return 'Invalid number of paddings. Must be length of 2 each.'; });
+ return pad(x, paddings, constantValue);
+ }
+ var pad3d = op({ pad3d_: pad3d_ });
+
+ /**
+ * Pads a `tf.Tensor4D` with a given value and paddings. See `pad` for details.
+ */
+ function pad4d_(x, paddings, constantValue) {
+ if (constantValue === void 0) { constantValue = 0; }
+ assert(paddings.length === 4 && paddings[0].length === 2 &&
+ paddings[1].length === 2 && paddings[2].length === 2 &&
+ paddings[3].length === 2, function () { return 'Invalid number of paddings. Must be length of 2 each.'; });
+ return pad(x, paddings, constantValue);
+ }
+ var pad4d = op({ pad4d_: pad4d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * This operation divides "spatial" dimensions `[1, ..., M]` of the input into
+ * a grid of blocks of shape `blockShape`, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial
+ * dimensions `[1, ..., M]` correspond to the position within the grid,
+ * and the batch dimension combines both the position within a spatial block
+ * and the original batch position. Prior to division into blocks,
+ * the spatial dimensions of the input are optionally zero padded
+ * according to `paddings`. See below for a precise description.
+ *
+ * ```js
+ * const x = tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]);
+ * const blockShape = [2, 2];
+ * const paddings = [[0, 0], [0, 0]];
+ *
+ * x.spaceToBatchND(blockShape, paddings).print();
+ * ```
+ *
+ * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
+ * remainingShape`, where spatialShape has `M` dimensions.
+ * @param blockShape A 1-D array. Must have shape `[M]`, all values must
+ * be >= 1.
+ * @param paddings A 2-D array. Must have shape `[M, 2]`, all values must be >=
+ * 0. `paddings[i] = [padStart, padEnd]` specifies the amount to zero-pad
+ * from input dimension `i + 1`, which corresponds to spatial dimension `i`. It
+ * is required that
+ * `(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0`
+ *
+ * This operation is equivalent to the following steps:
+ *
+ * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input
+ * according to `paddings` to produce `padded` of shape paddedShape.
+ *
+ * 2. Reshape `padded` to `reshapedPadded` of shape:
+ * `[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape`
+ *
+ * 3. Permute dimensions of `reshapedPadded` to produce `permutedReshapedPadded`
+ * of shape: `blockShape + [batch] + [paddedShape[1] / blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1]] + remainingShape`
+ *
+ * 4. Reshape `permutedReshapedPadded` to flatten `blockShape` into the
+ * batch dimension, producing an output tensor of shape:
+ * `[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ...,
+ * paddedShape[M] / blockShape[M-1]] + remainingShape`
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function spaceToBatchND_(x, blockShape, paddings) {
+ var $x = convertToTensor(x, 'x', 'spaceToBatchND');
+ assert($x.rank >= 1 + blockShape.length, function () { return "input rank " + $x.rank + " should be > than [blockShape] " + blockShape.length; });
+ assert(paddings.length === blockShape.length, function () { return "paddings.shape[0] " + paddings.length + " must be equal to [blockShape] " + blockShape.length; });
+ assert($x.shape.reduce(function (a, b, i) {
+ if (i > 0 && i <= blockShape.length) {
+ return a &&
+ ((b + paddings[i - 1][0] + paddings[i - 1][1]) %
+ blockShape[i - 1] ===
+ 0);
+ }
+ return a;
+ }, true), function () { return "input spatial dimensions " + $x.shape.slice(1) + " with paddings " + paddings.toString() + " must be divisible by blockShapes " + blockShape.toString(); });
+ var inputs = { x: $x };
+ var attrs = { blockShape: blockShape, paddings: paddings };
+ return ENGINE.runKernel(SpaceToBatchND, inputs, attrs);
+ }
+ var spaceToBatchND = op({ spaceToBatchND_: spaceToBatchND_ });
+
+ /**
+ * Performs an N-D pooling operation
+ *
+ * @param input The input tensor, of rank 4 or rank 3 of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param windowShape The filter size: `[filterHeight, filterWidth]`. If
+ * `filterSize` is a single number, then `filterHeight == filterWidth`.
+ * @param poolingType The type of pooling, either 'max' or 'avg'.
+ * @param pad The type of padding algorithm:
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_guides/python/nn#Convolution](
+ * https://www.tensorflow.org/api_guides/python/nn#Convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in dilated pooling. Defaults to `[1, 1]`. If `dilationRate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
+ * `strides` is a single number, then `strideHeight == strideWidth`.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function pool_(input, windowShape, poolingType, pad, dilations, strides, dimRoundingMode) {
+ if (dilations == null) {
+ dilations = [1, 1];
+ }
+ if (strides == null) {
+ strides = 1;
+ }
+ if (pad === 0) {
+ pad = 'valid';
+ }
+ var $x = convertToTensor(input, 'x', 'maxPool');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in pool: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ var convInfo = computePool2DInfo(x4D.shape, windowShape, strides, dilations, pad);
+ var dilation = [convInfo.dilationHeight, convInfo.dilationWidth];
+ // The following implementation does batchToSpace(pool(spaceToBatch(x)))
+ // whenever dilation > 1 since the TF kernels do not support dilation > 1.
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L1037
+ var basePadding;
+ if (pad === 'same') {
+ basePadding = withSpaceToBatchBasePaddings([convInfo.filterHeight, convInfo.filterWidth], dilation);
+ }
+ else {
+ basePadding = [[0, 0], [0, 0]];
+ }
+ var isDilationOne = dilation[0] === 1 && dilation[1] === 1;
+ var _a = __read(requiredSpaceToBatchPaddings([convInfo.inHeight, convInfo.inWidth], dilation, basePadding), 2), adjustedPadding = _a[0], adjustedCrops = _a[1];
+ var convertedPad = isDilationOne ? pad : 'valid';
+ var convertedX = isDilationOne ? x4D : spaceToBatchND(x4D, dilation, adjustedPadding);
+ var forwardOp = poolingType === 'avg' ?
+ function () { return avgPool(convertedX, windowShape, strides, convertedPad, dimRoundingMode); } :
+ function () { return maxPool(convertedX, windowShape, strides, convertedPad, dimRoundingMode); };
+ var y = forwardOp();
+ var res = isDilationOne ? y : batchToSpaceND(y, dilation, adjustedCrops);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ // Helper function to compute crops and paddings for pool with dilation > 1.
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/array_ops.py#L2184
+ function requiredSpaceToBatchPaddings(inputShape, blockShape, basePadding) {
+ var padStart = basePadding.map(function (b) { return b[0]; });
+ var origPadEnd = basePadding.map(function (b) { return b[1]; });
+ var fullInputShape = inputShape.concat(padStart, origPadEnd);
+ var padEndExtra = blockShape.map(function (b, i) { return (b - fullInputShape[i] % b) % b; });
+ var padEnd = origPadEnd.map(function (s, i) { return s + padEndExtra[i]; });
+ var paddings = blockShape.map(function (_, i) { return [padStart[i], padEnd[i]]; });
+ var crops = blockShape.map(function (_, i) { return [0, padEndExtra[i]]; });
+ return [paddings, crops];
+ }
+ // Helper function to compute base paddings for pool with dilation > 1.
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L524
+ function withSpaceToBatchBasePaddings(filterShape, dilation) {
+ // Spatial dimensions of the filters and the upsampled filters in which we
+ // introduce (rate - 1) zeros between consecutive filter values.
+ var dilatedFilterShape = filterShape.map(function (s, i) {
+ return s + (s - 1) * (dilation[i] - 1);
+ });
+ var padExtraShape = dilatedFilterShape.map(function (s) { return s - 1; });
+ // When padding is odd, we pad more at end, following the same
+ // convention as conv2d.
+ var padExtraStart = padExtraShape.map(function (s) { return Math.floor(s / 2); });
+ var padExtraEnd = padExtraShape.map(function (s, i) { return s - padExtraStart[i]; });
+ return padExtraShape.map(function (_, i) {
+ return [padExtraStart[i], padExtraEnd[i]];
+ });
+ }
+ var pool = op({ pool_: pool_ });
+
+ /**
+ * Computes the power of one `tf.Tensor` to another. Supports broadcasting.
+ *
+ * Given a `tf.Tensor` x and a `tf.Tensor` y, this operation computes x^y for
+ * corresponding elements in x and y. The result's dtype will be the upcasted
+ * type of the `base` and `exp` dtypes.
+ *
+ * ```js
+ * const a = tf.tensor([[2, 3], [4, 5]])
+ * const b = tf.tensor([[1, 2], [3, 0]]).toInt();
+ *
+ * a.pow(b).print(); // or tf.pow(a, b)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor([[1, 2], [3, 4]])
+ * const b = tf.tensor(2).toInt();
+ *
+ * a.pow(b).print(); // or tf.pow(a, b)
+ * ```
+ * We also expose `powStrict` which has the same signature as this op and
+ * asserts that `base` and `exp` are the same shape (does not broadcast).
+ *
+ * @param base The base `tf.Tensor` to pow element-wise.
+ * @param exp The exponent `tf.Tensor` to pow element-wise.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function pow_(base, exp) {
+ var _a;
+ var $base = convertToTensor(base, 'base', 'pow');
+ var $exp = convertToTensor(exp, 'exp', 'pow');
+ _a = __read(makeTypesMatch($base, $exp), 2), $base = _a[0], $exp = _a[1];
+ var inputs = { a: $base, b: $exp };
+ return ENGINE.runKernel(Pow, inputs);
+ }
+ var pow = op({ pow_: pow_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes leaky rectified linear element-wise with parametric alphas.
+ *
+ * `x < 0 ? alpha * x : f(x) = x`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ * const alpha = tf.scalar(0.1);
+ *
+ * x.prelu(alpha).print(); // or tf.prelu(x, alpha)
+ * ```
+ * @param x The input tensor.
+ * @param alpha Scaling factor for negative values.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function prelu_(x, alpha) {
+ var $x = convertToTensor(x, 'x', 'prelu');
+ var $alpha = convertToTensor(alpha, 'alpha', 'prelu');
+ var inputs = { x: $x, alpha: $alpha };
+ return ENGINE.runKernel(Prelu, inputs);
+ }
+ var prelu = op({ prelu_: prelu_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the product of elements across dimensions of a `tf.Tensor`.
+ *
+ * Reduces the input along the dimensions given in `axes`. Unless `keepDims`
+ * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
+ * `axes`. If `keepDims` is true, the reduced dimensions are retained with
+ * length 1. If `axes` has no entries, all dimensions are reduced, and a
+ * `tf.Tensor` with a single element is returned.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3]);
+ *
+ * x.prod().print(); // or tf.prod(x)
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.prod(axis).print(); // or tf.prod(x, axis)
+ * ```
+ *
+ * @param x The input tensor to compute the product over. If the dtype is `bool`
+ * it will be converted to `int32` and the output dtype will be `int32`.
+ * @param axis The dimension(s) to reduce. By default it reduces
+ * all dimensions.
+ * @param keepDims If true, retains reduced dimensions with size 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Reduction'}
+ */
+ function prod_(x, axis, keepDims) {
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ var $x = convertToTensor(x, 'x', 'prod');
+ if ($x.dtype === 'bool') {
+ // bool is not an allowed type for the underlying kernel.
+ $x = cast($x, 'int32');
+ }
+ var inputs = { x: $x };
+ var attrs = { axis: axis, keepDims: keepDims };
+ return ENGINE.runKernel(Prod, inputs, attrs);
+ }
+ var prod = op({ prod_: prod_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a random number generator
+ * function defined by the user.
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param randFunction A random number generator function which is called
+ * for each element in the output tensor.
+ * @param dtype The data type of the output tensor. Defaults to 'float32'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function rand_(shape, randFunction, dtype) {
+ var size = sizeFromShape(shape);
+ var values = null;
+ if (dtype == null || dtype === 'float32') {
+ values = new Float32Array(size);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(size);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(size);
+ }
+ else {
+ throw new Error("Unknown data type " + dtype);
+ }
+ for (var i = 0; i < size; i++) {
+ values[i] = randFunction();
+ }
+ return ENGINE.makeTensor(values, shape, dtype);
+ }
+ var rand = op({ rand_: rand_ });
+
+ var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};
+ function createCommonjsModule(fn) {
+ var module = { exports: {} };
+ return fn(module, module.exports), module.exports;
+ }
+
+ var alea = createCommonjsModule(function (module) {
+ // A port of an algorithm by Johannes Baagøe <[email protected]>, 2010
+ // http://baagoe.com/en/RandomMusings/javascript/
+ // https://github.com/nquinlan/better-random-numbers-for-javascript-mirror
+ // Original work is under MIT license -
+ // Copyright (C) 2010 by Johannes Baagøe <[email protected]>
+ //
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
+ // of this software and associated documentation files (the "Software"), to deal
+ // in the Software without restriction, including without limitation the rights
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ // copies of the Software, and to permit persons to whom the Software is
+ // furnished to do so, subject to the following conditions:
+ //
+ // The above copyright notice and this permission notice shall be included in
+ // all copies or substantial portions of the Software.
+ //
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ // THE SOFTWARE.
+ (function (global, module, define) {
+ function Alea(seed) {
+ var me = this, mash = Mash();
+ me.next = function () {
+ var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32
+ me.s0 = me.s1;
+ me.s1 = me.s2;
+ return me.s2 = t - (me.c = t | 0);
+ };
+ // Apply the seeding algorithm from Baagoe.
+ me.c = 1;
+ me.s0 = mash(' ');
+ me.s1 = mash(' ');
+ me.s2 = mash(' ');
+ me.s0 -= mash(seed);
+ if (me.s0 < 0) {
+ me.s0 += 1;
+ }
+ me.s1 -= mash(seed);
+ if (me.s1 < 0) {
+ me.s1 += 1;
+ }
+ me.s2 -= mash(seed);
+ if (me.s2 < 0) {
+ me.s2 += 1;
+ }
+ mash = null;
+ }
+ function copy(f, t) {
+ t.c = f.c;
+ t.s0 = f.s0;
+ t.s1 = f.s1;
+ t.s2 = f.s2;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new Alea(seed), state = opts && opts.state, prng = xg.next;
+ prng.int32 = function () { return (xg.next() * 0x100000000) | 0; };
+ prng.double = function () {
+ return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53
+ };
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ function Mash() {
+ var n = 0xefc8249d;
+ var mash = function (data) {
+ data = String(data);
+ for (var i = 0; i < data.length; i++) {
+ n += data.charCodeAt(i);
+ var h = 0.02519603282416938 * n;
+ n = h >>> 0;
+ h -= n;
+ h *= n;
+ n = h >>> 0;
+ h -= n;
+ n += h * 0x100000000; // 2^32
+ }
+ return (n >>> 0) * 2.3283064365386963e-10; // 2^-32
+ };
+ return mash;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.alea = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xor128 = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "xor128" prng algorithm by
+ // George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this, strseed = '';
+ me.x = 0;
+ me.y = 0;
+ me.z = 0;
+ me.w = 0;
+ // Set up generator function.
+ me.next = function () {
+ var t = me.x ^ (me.x << 11);
+ me.x = me.y;
+ me.y = me.z;
+ me.z = me.w;
+ return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);
+ };
+ if (seed === (seed | 0)) {
+ // Integer seed.
+ me.x = seed;
+ }
+ else {
+ // String seed.
+ strseed += seed;
+ }
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 64; k++) {
+ me.x ^= strseed.charCodeAt(k) | 0;
+ me.next();
+ }
+ }
+ function copy(f, t) {
+ t.x = f.x;
+ t.y = f.y;
+ t.z = f.z;
+ t.w = f.w;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xor128 = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xorwow = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "xorwow" prng algorithm by
+ // George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this, strseed = '';
+ // Set up generator function.
+ me.next = function () {
+ var t = (me.x ^ (me.x >>> 2));
+ me.x = me.y;
+ me.y = me.z;
+ me.z = me.w;
+ me.w = me.v;
+ return (me.d = (me.d + 362437 | 0)) +
+ (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;
+ };
+ me.x = 0;
+ me.y = 0;
+ me.z = 0;
+ me.w = 0;
+ me.v = 0;
+ if (seed === (seed | 0)) {
+ // Integer seed.
+ me.x = seed;
+ }
+ else {
+ // String seed.
+ strseed += seed;
+ }
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 64; k++) {
+ me.x ^= strseed.charCodeAt(k) | 0;
+ if (k == strseed.length) {
+ me.d = me.x << 10 ^ me.x >>> 4;
+ }
+ me.next();
+ }
+ }
+ function copy(f, t) {
+ t.x = f.x;
+ t.y = f.y;
+ t.z = f.z;
+ t.w = f.w;
+ t.v = f.v;
+ t.d = f.d;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xorwow = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xorshift7 = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "xorshift7" algorithm by
+ // François Panneton and Pierre L'ecuyer:
+ // "On the Xorgshift Random Number Generators"
+ // http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this;
+ // Set up generator function.
+ me.next = function () {
+ // Update xor generator.
+ var X = me.x, i = me.i, t, v;
+ t = X[i];
+ t ^= (t >>> 7);
+ v = t ^ (t << 24);
+ t = X[(i + 1) & 7];
+ v ^= t ^ (t >>> 10);
+ t = X[(i + 3) & 7];
+ v ^= t ^ (t >>> 3);
+ t = X[(i + 4) & 7];
+ v ^= t ^ (t << 7);
+ t = X[(i + 7) & 7];
+ t = t ^ (t << 13);
+ v ^= t ^ (t << 9);
+ X[i] = v;
+ me.i = (i + 1) & 7;
+ return v;
+ };
+ function init(me, seed) {
+ var j, X = [];
+ if (seed === (seed | 0)) {
+ // Seed state array using a 32-bit integer.
+ X[0] = seed;
+ }
+ else {
+ // Seed state using a string.
+ seed = '' + seed;
+ for (j = 0; j < seed.length; ++j) {
+ X[j & 7] = (X[j & 7] << 15) ^
+ (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);
+ }
+ }
+ // Enforce an array length of 8, not all zeroes.
+ while (X.length < 8)
+ X.push(0);
+ for (j = 0; j < 8 && X[j] === 0; ++j)
+ ;
+ if (j == 8)
+ X[7] = -1;
+ me.x = X;
+ me.i = 0;
+ // Discard an initial 256 values.
+ for (j = 256; j > 0; --j) {
+ me.next();
+ }
+ }
+ init(me, seed);
+ }
+ function copy(f, t) {
+ t.x = f.x.slice();
+ t.i = f.i;
+ return t;
+ }
+ function impl(seed, opts) {
+ if (seed == null)
+ seed = +(new Date);
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (state.x)
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xorshift7 = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var xor4096 = createCommonjsModule(function (module) {
+ // A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.
+ //
+ // This fast non-cryptographic random number generator is designed for
+ // use in Monte-Carlo algorithms. It combines a long-period xorshift
+ // generator with a Weyl generator, and it passes all common batteries
+ // of stasticial tests for randomness while consuming only a few nanoseconds
+ // for each prng generated. For background on the generator, see Brent's
+ // paper: "Some long-period random number generators using shifts and xors."
+ // http://arxiv.org/pdf/1004.3115v1.pdf
+ //
+ // Usage:
+ //
+ // var xor4096 = require('xor4096');
+ // random = xor4096(1); // Seed with int32 or string.
+ // assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.
+ // assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.
+ //
+ // For nonzero numeric keys, this impelementation provides a sequence
+ // identical to that by Brent's xorgens 3 implementaion in C. This
+ // implementation also provides for initalizing the generator with
+ // string seeds, or for saving and restoring the state of the generator.
+ //
+ // On Chrome, this prng benchmarks about 2.1 times slower than
+ // Javascript's built-in Math.random().
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this;
+ // Set up generator function.
+ me.next = function () {
+ var w = me.w, X = me.X, i = me.i, t, v;
+ // Update Weyl generator.
+ me.w = w = (w + 0x61c88647) | 0;
+ // Update xor generator.
+ v = X[(i + 34) & 127];
+ t = X[i = ((i + 1) & 127)];
+ v ^= v << 13;
+ t ^= t << 17;
+ v ^= v >>> 15;
+ t ^= t >>> 12;
+ // Update Xor generator array state.
+ v = X[i] = v ^ t;
+ me.i = i;
+ // Result is the combination.
+ return (v + (w ^ (w >>> 16))) | 0;
+ };
+ function init(me, seed) {
+ var t, v, i, j, w, X = [], limit = 128;
+ if (seed === (seed | 0)) {
+ // Numeric seeds initialize v, which is used to generates X.
+ v = seed;
+ seed = null;
+ }
+ else {
+ // String seeds are mixed into v and X one character at a time.
+ seed = seed + '\0';
+ v = 0;
+ limit = Math.max(limit, seed.length);
+ }
+ // Initialize circular array and weyl value.
+ for (i = 0, j = -32; j < limit; ++j) {
+ // Put the unicode characters into the array, and shuffle them.
+ if (seed)
+ v ^= seed.charCodeAt((j + 32) % seed.length);
+ // After 32 shuffles, take v as the starting w value.
+ if (j === 0)
+ w = v;
+ v ^= v << 10;
+ v ^= v >>> 15;
+ v ^= v << 4;
+ v ^= v >>> 13;
+ if (j >= 0) {
+ w = (w + 0x61c88647) | 0; // Weyl.
+ t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.
+ i = (0 == t) ? i + 1 : 0; // Count zeroes.
+ }
+ }
+ // We have detected all zeroes; make the key nonzero.
+ if (i >= 128) {
+ X[(seed && seed.length || 0) & 127] = -1;
+ }
+ // Run the generator 512 times to further mix the state before using it.
+ // Factoring this as a function slows the main generator, so it is just
+ // unrolled here. The weyl generator is not advanced while warming up.
+ i = 127;
+ for (j = 4 * 128; j > 0; --j) {
+ v = X[(i + 34) & 127];
+ t = X[i = ((i + 1) & 127)];
+ v ^= v << 13;
+ t ^= t << 17;
+ v ^= v >>> 15;
+ t ^= t >>> 12;
+ X[i] = v ^ t;
+ }
+ // Storing state as object members is faster than using closure variables.
+ me.w = w;
+ me.X = X;
+ me.i = i;
+ }
+ init(me, seed);
+ }
+ function copy(f, t) {
+ t.i = f.i;
+ t.w = f.w;
+ t.X = f.X.slice();
+ return t;
+ }
+ function impl(seed, opts) {
+ if (seed == null)
+ seed = +(new Date);
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (state.X)
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.xor4096 = impl;
+ }
+ })(commonjsGlobal, // window object or global
+ module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ var tychei = createCommonjsModule(function (module) {
+ // A Javascript implementaion of the "Tyche-i" prng algorithm by
+ // Samuel Neves and Filipe Araujo.
+ // See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf
+ (function (global, module, define) {
+ function XorGen(seed) {
+ var me = this, strseed = '';
+ // Set up generator function.
+ me.next = function () {
+ var b = me.b, c = me.c, d = me.d, a = me.a;
+ b = (b << 25) ^ (b >>> 7) ^ c;
+ c = (c - d) | 0;
+ d = (d << 24) ^ (d >>> 8) ^ a;
+ a = (a - b) | 0;
+ me.b = b = (b << 20) ^ (b >>> 12) ^ c;
+ me.c = c = (c - d) | 0;
+ me.d = (d << 16) ^ (c >>> 16) ^ a;
+ return me.a = (a - b) | 0;
+ };
+ /* The following is non-inverted tyche, which has better internal
+ * bit diffusion, but which is about 25% slower than tyche-i in JS.
+ me.next = function() {
+ var a = me.a, b = me.b, c = me.c, d = me.d;
+ a = (me.a + me.b | 0) >>> 0;
+ d = me.d ^ a; d = d << 16 ^ d >>> 16;
+ c = me.c + d | 0;
+ b = me.b ^ c; b = b << 12 ^ d >>> 20;
+ me.a = a = a + b | 0;
+ d = d ^ a; me.d = d = d << 8 ^ d >>> 24;
+ me.c = c = c + d | 0;
+ b = b ^ c;
+ return me.b = (b << 7 ^ b >>> 25);
+ }
+ */
+ me.a = 0;
+ me.b = 0;
+ me.c = 2654435769 | 0;
+ me.d = 1367130551;
+ if (seed === Math.floor(seed)) {
+ // Integer seed.
+ me.a = (seed / 0x100000000) | 0;
+ me.b = seed | 0;
+ }
+ else {
+ // String seed.
+ strseed += seed;
+ }
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 20; k++) {
+ me.b ^= strseed.charCodeAt(k) | 0;
+ me.next();
+ }
+ }
+ function copy(f, t) {
+ t.a = f.a;
+ t.b = f.b;
+ t.c = f.c;
+ t.d = f.d;
+ return t;
+ }
+ function impl(seed, opts) {
+ var xg = new XorGen(seed), state = opts && opts.state, prng = function () { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function () {
+ do {
+ var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 0x100000000, result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof (state) == 'object')
+ copy(state, xg);
+ prng.state = function () { return copy(xg, {}); };
+ }
+ return prng;
+ }
+ if (module && module.exports) {
+ module.exports = impl;
+ }
+ else if (define && define.amd) {
+ define(function () { return impl; });
+ }
+ else {
+ this.tychei = impl;
+ }
+ })(commonjsGlobal, module, // present in node.js
+ (typeof undefined) == 'function' // present with an AMD loader
+ );
+ });
+
+ /*
+ Copyright 2019 David Bau.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ */
+ var seedrandom$1 = createCommonjsModule(function (module) {
+ (function (global, pool, math) {
+ //
+ // The following constants are related to IEEE 754 limits.
+ //
+ var width = 256, // each RC4 output is 0 <= x < 256
+ chunks = 6, // at least six RC4 outputs for each double
+ digits = 52, // there are 52 significant digits in a double
+ rngname = 'random', // rngname: name for Math.random and Math.seedrandom
+ startdenom = math.pow(width, chunks), significance = math.pow(2, digits), overflow = significance * 2, mask = width - 1, nodecrypto; // node.js crypto module, initialized at the bottom.
+ //
+ // seedrandom()
+ // This is the seedrandom function described above.
+ //
+ function seedrandom(seed, options, callback) {
+ var key = [];
+ options = (options == true) ? { entropy: true } : (options || {});
+ // Flatten the seed string or build one from local entropy if needed.
+ var shortseed = mixkey(flatten(options.entropy ? [seed, tostring(pool)] :
+ (seed == null) ? autoseed() : seed, 3), key);
+ // Use the seed to initialize an ARC4 generator.
+ var arc4 = new ARC4(key);
+ // This function returns a random double in [0, 1) that contains
+ // randomness in every bit of the mantissa of the IEEE 754 value.
+ var prng = function () {
+ var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48
+ d = startdenom, // and denominator d = 2 ^ 48.
+ x = 0; // and no 'extra last byte'.
+ while (n < significance) { // Fill up all significant digits by
+ n = (n + x) * width; // shifting numerator and
+ d *= width; // denominator and generating a
+ x = arc4.g(1); // new least-significant-byte.
+ }
+ while (n >= overflow) { // To avoid rounding up, before adding
+ n /= 2; // last byte, shift everything
+ d /= 2; // right using integer math until
+ x >>>= 1; // we have exactly the desired bits.
+ }
+ return (n + x) / d; // Form the number within [0, 1).
+ };
+ prng.int32 = function () { return arc4.g(4) | 0; };
+ prng.quick = function () { return arc4.g(4) / 0x100000000; };
+ prng.double = prng;
+ // Mix the randomness into accumulated entropy.
+ mixkey(tostring(arc4.S), pool);
+ // Calling convention: what to return as a function of prng, seed, is_math.
+ return (options.pass || callback ||
+ function (prng, seed, is_math_call, state) {
+ if (state) {
+ // Load the arc4 state from the given state if it has an S array.
+ if (state.S) {
+ copy(state, arc4);
+ }
+ // Only provide the .state method if requested via options.state.
+ prng.state = function () { return copy(arc4, {}); };
+ }
+ // If called as a method of Math (Math.seedrandom()), mutate
+ // Math.random because that is how seedrandom.js has worked since v1.0.
+ if (is_math_call) {
+ math[rngname] = prng;
+ return seed;
+ }
+ // Otherwise, it is a newer calling convention, so return the
+ // prng directly.
+ else
+ return prng;
+ })(prng, shortseed, 'global' in options ? options.global : (this == math), options.state);
+ }
+ //
+ // ARC4
+ //
+ // An ARC4 implementation. The constructor takes a key in the form of
+ // an array of at most (width) integers that should be 0 <= x < (width).
+ //
+ // The g(count) method returns a pseudorandom integer that concatenates
+ // the next (count) outputs from ARC4. Its return value is a number x
+ // that is in the range 0 <= x < (width ^ count).
+ //
+ function ARC4(key) {
+ var t, keylen = key.length, me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];
+ // The empty key [] is treated as [0].
+ if (!keylen) {
+ key = [keylen++];
+ }
+ // Set up S using the standard key scheduling algorithm.
+ while (i < width) {
+ s[i] = i++;
+ }
+ for (i = 0; i < width; i++) {
+ s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];
+ s[j] = t;
+ }
+ // The "g" method returns the next (count) outputs as one number.
+ (me.g = function (count) {
+ // Using instance members instead of closure state nearly doubles speed.
+ var t, r = 0, i = me.i, j = me.j, s = me.S;
+ while (count--) {
+ t = s[i = mask & (i + 1)];
+ r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];
+ }
+ me.i = i;
+ me.j = j;
+ return r;
+ // For robust unpredictability, the function call below automatically
+ // discards an initial batch of values. This is called RC4-drop[256].
+ // See http://google.com/search?q=rsa+fluhrer+response&btnI
+ })(width);
+ }
+ //
+ // copy()
+ // Copies internal state of ARC4 to or from a plain object.
+ //
+ function copy(f, t) {
+ t.i = f.i;
+ t.j = f.j;
+ t.S = f.S.slice();
+ return t;
+ }
+ //
+ // flatten()
+ // Converts an object tree to nested arrays of strings.
+ //
+ function flatten(obj, depth) {
+ var result = [], typ = (typeof obj), prop;
+ if (depth && typ == 'object') {
+ for (prop in obj) {
+ try {
+ result.push(flatten(obj[prop], depth - 1));
+ }
+ catch (e) { }
+ }
+ }
+ return (result.length ? result : typ == 'string' ? obj : obj + '\0');
+ }
+ //
+ // mixkey()
+ // Mixes a string seed into a key that is an array of integers, and
+ // returns a shortened string seed that is equivalent to the result key.
+ //
+ function mixkey(seed, key) {
+ var stringseed = seed + '', smear, j = 0;
+ while (j < stringseed.length) {
+ key[mask & j] =
+ mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));
+ }
+ return tostring(key);
+ }
+ //
+ // autoseed()
+ // Returns an object for autoseeding, using window.crypto and Node crypto
+ // module if available.
+ //
+ function autoseed() {
+ try {
+ var out;
+ if (nodecrypto && (out = nodecrypto.randomBytes)) {
+ // The use of 'out' to remember randomBytes makes tight minified code.
+ out = out(width);
+ }
+ else {
+ out = new Uint8Array(width);
+ (global.crypto || global.msCrypto).getRandomValues(out);
+ }
+ return tostring(out);
+ }
+ catch (e) {
+ var browser = global.navigator, plugins = browser && browser.plugins;
+ return [+new Date, global, plugins, global.screen, tostring(pool)];
+ }
+ }
+ //
+ // tostring()
+ // Converts an array of charcodes to a string
+ //
+ function tostring(a) {
+ return String.fromCharCode.apply(0, a);
+ }
+ //
+ // When seedrandom.js is loaded, we immediately mix a few bits
+ // from the built-in RNG into the entropy pool. Because we do
+ // not want to interfere with deterministic PRNG state later,
+ // seedrandom will not call math.random on its own again after
+ // initialization.
+ //
+ mixkey(math.random(), pool);
+ //
+ // Nodejs and AMD support: export the implementation as a module using
+ // either convention.
+ //
+ if (module.exports) {
+ module.exports = seedrandom;
+ // When in node.js, try using crypto package for autoseeding.
+ try {
+ nodecrypto = require$$0__default['default'];
+ }
+ catch (ex) { }
+ }
+ else {
+ // When included as a plain script, set up Math.seedrandom global.
+ math['seed' + rngname] = seedrandom;
+ }
+ // End anonymous scope, and pass initial values.
+ })(
+ // global: `self` in browsers (including strict mode and web workers),
+ // otherwise `this` in Node and other environments
+ (typeof self !== 'undefined') ? self : commonjsGlobal, [], // pool: entropy pool starts empty
+ Math // math: package containing random, pow, and seedrandom
+ );
+ });
+
+ // A library of seedable RNGs implemented in Javascript.
+ //
+ // Usage:
+ //
+ // var seedrandom = require('seedrandom');
+ // var random = seedrandom(1); // or any seed.
+ // var x = random(); // 0 <= x < 1. Every bit is random.
+ // var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.
+ // alea, a 53-bit multiply-with-carry generator by Johannes Baagøe.
+ // Period: ~2^116
+ // Reported to pass all BigCrush tests.
+ // xor128, a pure xor-shift generator by George Marsaglia.
+ // Period: 2^128-1.
+ // Reported to fail: MatrixRank and LinearComp.
+ // xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.
+ // Period: 2^192-2^32
+ // Reported to fail: CollisionOver, SimpPoker, and LinearComp.
+ // xorshift7, by François Panneton and Pierre L'ecuyer, takes
+ // a different approach: it adds robustness by allowing more shifts
+ // than Marsaglia's original three. It is a 7-shift generator
+ // with 256 bits, that passes BigCrush with no systmatic failures.
+ // Period 2^256-1.
+ // No systematic BigCrush failures reported.
+ // xor4096, by Richard Brent, is a 4096-bit xor-shift with a
+ // very long period that also adds a Weyl generator. It also passes
+ // BigCrush with no systematic failures. Its long period may
+ // be useful if you have many generators and need to avoid
+ // collisions.
+ // Period: 2^4128-2^32.
+ // No systematic BigCrush failures reported.
+ // Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random
+ // number generator derived from ChaCha, a modern stream cipher.
+ // https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf
+ // Period: ~2^127
+ // No systematic BigCrush failures reported.
+ // The original ARC4-based prng included in this library.
+ // Period: ~2^1600
+ seedrandom$1.alea = alea;
+ seedrandom$1.xor128 = xor128;
+ seedrandom$1.xorwow = xorwow;
+ seedrandom$1.xorshift7 = xorshift7;
+ seedrandom$1.xor4096 = xor4096;
+ seedrandom$1.tychei = tychei;
+ var seedrandom = seedrandom$1;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // https://en.wikipedia.org/wiki/Marsaglia_polar_method
+ var MPRandGauss = /** @class */ (function () {
+ function MPRandGauss(mean, stdDeviation, dtype, truncated, seed) {
+ this.mean = mean;
+ this.stdDev = stdDeviation;
+ this.dtype = dtype;
+ this.nextVal = NaN;
+ this.truncated = truncated;
+ if (this.truncated) {
+ this.upper = this.mean + this.stdDev * 2;
+ this.lower = this.mean - this.stdDev * 2;
+ }
+ var seedValue = seed ? seed : Math.random();
+ this.random = seedrandom.alea(seedValue.toString());
+ }
+ /** Returns next sample from a Gaussian distribution. */
+ MPRandGauss.prototype.nextValue = function () {
+ if (!isNaN(this.nextVal)) {
+ var value = this.nextVal;
+ this.nextVal = NaN;
+ return value;
+ }
+ var resultX, resultY;
+ var isValid = false;
+ while (!isValid) {
+ var v1 = void 0, v2 = void 0, s = void 0;
+ do {
+ v1 = 2 * this.random() - 1;
+ v2 = 2 * this.random() - 1;
+ s = v1 * v1 + v2 * v2;
+ } while (s >= 1 || s === 0);
+ var mul = Math.sqrt(-2.0 * Math.log(s) / s);
+ resultX = this.mean + this.stdDev * v1 * mul;
+ resultY = this.mean + this.stdDev * v2 * mul;
+ if (!this.truncated || this.isValidTruncated(resultX)) {
+ isValid = true;
+ }
+ }
+ if (!this.truncated || this.isValidTruncated(resultY)) {
+ this.nextVal = this.convertValue(resultY);
+ }
+ return this.convertValue(resultX);
+ };
+ /** Handles proper rounding for non-floating-point numbers. */
+ MPRandGauss.prototype.convertValue = function (value) {
+ if (this.dtype == null || this.dtype === 'float32') {
+ return value;
+ }
+ return Math.round(value);
+ };
+ /** Returns true if less than 2-standard-deviations from the mean. */
+ MPRandGauss.prototype.isValidTruncated = function (value) {
+ return value <= this.upper && value >= this.lower;
+ };
+ return MPRandGauss;
+ }());
+ // Marsaglia, George, and Wai Wan Tsang. 2000. "A Simple Method for Generating
+ // Gamma Variables."
+ var RandGamma = /** @class */ (function () {
+ function RandGamma(alpha, beta, dtype, seed) {
+ this.alpha = alpha;
+ this.beta = 1 / beta; // convert rate to scale parameter
+ this.dtype = dtype;
+ var seedValue = seed ? seed : Math.random();
+ this.randu = seedrandom.alea(seedValue.toString());
+ this.randn = new MPRandGauss(0, 1, dtype, false, this.randu());
+ if (alpha < 1) {
+ this.d = alpha + (2 / 3);
+ }
+ else {
+ this.d = alpha - (1 / 3);
+ }
+ this.c = 1 / Math.sqrt(9 * this.d);
+ }
+ /** Returns next sample from a gamma distribution. */
+ RandGamma.prototype.nextValue = function () {
+ var x2, v0, v1, x, u, v;
+ while (true) {
+ do {
+ x = this.randn.nextValue();
+ v = 1 + (this.c * x);
+ } while (v <= 0);
+ v *= v * v;
+ x2 = x * x;
+ v0 = 1 - (0.331 * x2 * x2);
+ v1 = (0.5 * x2) + (this.d * (1 - v + Math.log(v)));
+ u = this.randu();
+ if (u < v0 || Math.log(u) < v1) {
+ break;
+ }
+ }
+ v = (1 / this.beta) * this.d * v;
+ if (this.alpha < 1) {
+ v *= Math.pow(this.randu(), 1 / this.alpha);
+ }
+ return this.convertValue(v);
+ };
+ /** Handles proper rounding for non-floating-point numbers. */
+ RandGamma.prototype.convertValue = function (value) {
+ if (this.dtype === 'float32') {
+ return value;
+ }
+ return Math.round(value);
+ };
+ return RandGamma;
+ }());
+ var UniformRandom = /** @class */ (function () {
+ function UniformRandom(min, max, dtype, seed) {
+ var _this = this;
+ if (min === void 0) { min = 0; }
+ if (max === void 0) { max = 1; }
+ /** Handles proper rounding for non floating point numbers. */
+ this.canReturnFloat = function () { return (_this.dtype == null || _this.dtype === 'float32'); };
+ this.min = min;
+ this.range = max - min;
+ this.dtype = dtype;
+ if (seed == null) {
+ seed = Math.random();
+ }
+ if (typeof seed === 'number') {
+ seed = seed.toString();
+ }
+ if (!this.canReturnFloat() && this.range <= 1) {
+ throw new Error("The difference between " + min + " - " + max + " <= 1 and dtype is not float");
+ }
+ this.random = seedrandom.alea(seed);
+ }
+ UniformRandom.prototype.convertValue = function (value) {
+ if (this.canReturnFloat()) {
+ return value;
+ }
+ return Math.round(value);
+ };
+ UniformRandom.prototype.nextValue = function () {
+ return this.convertValue(this.min + this.range * this.random());
+ };
+ return UniformRandom;
+ }());
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a gamma distribution.
+ *
+ * ```js
+ * tf.randomGamma([2, 2], 1).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param alpha The shape parameter of the gamma distribution.
+ * @param beta The inverse scale parameter of the gamma distribution. Defaults
+ * to 1.
+ * @param dtype The data type of the output. Defaults to float32.
+ * @param seed The seed for the random number generator.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function randomGamma_(shape, alpha, beta, dtype, seed) {
+ if (beta === void 0) { beta = 1; }
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (beta == null) {
+ beta = 1;
+ }
+ if (dtype == null) {
+ dtype = 'float32';
+ }
+ if (dtype !== 'float32' && dtype !== 'int32') {
+ throw new Error("Unsupported data type " + dtype);
+ }
+ var rgamma = new RandGamma(alpha, beta, dtype, seed);
+ var res = buffer(shape, dtype);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = rgamma.nextValue();
+ }
+ return res.toTensor();
+ }
+ var randomGamma = op({ randomGamma_: randomGamma_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a normal distribution.
+ *
+ * ```js
+ * tf.randomNormal([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param mean The mean of the normal distribution.
+ * @param stdDev The standard deviation of the normal distribution.
+ * @param dtype The data type of the output.
+ * @param seed The seed for the random number generator.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function randomNormal_(shape, mean, stdDev, dtype, seed) {
+ if (mean === void 0) { mean = 0; }
+ if (stdDev === void 0) { stdDev = 1; }
+ if (dtype != null && dtype === 'bool') {
+ throw new Error("Unsupported data type " + dtype);
+ }
+ var randGauss = new MPRandGauss(mean, stdDev, dtype, false /* truncated */, seed);
+ var res = buffer(shape, dtype);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = randGauss.nextValue();
+ }
+ return res.toTensor();
+ }
+ var randomNormal = op({ randomNormal_: randomNormal_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a uniform distribution.
+ *
+ * The generated values follow a uniform distribution in the range [minval,
+ * maxval). The lower bound minval is included in the range, while the upper
+ * bound maxval is excluded.
+ *
+ * ```js
+ * tf.randomUniform([2, 2]).print();
+ * ```
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param minval The lower bound on the range of random values to generate.
+ * Defaults to 0.
+ * @param maxval The upper bound on the range of random values to generate.
+ * Defaults to 1.
+ * @param dtype The data type of the output tensor. Defaults to 'float32'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Random'}
+ */
+ function randomUniform_(shape, minval, maxval, dtype, seed) {
+ if (minval === void 0) { minval = 0; }
+ if (maxval === void 0) { maxval = 1; }
+ if (dtype === void 0) { dtype = 'float32'; }
+ var res = buffer(shape, dtype);
+ var random = new UniformRandom(minval, maxval, null, seed);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = random.nextValue();
+ }
+ return res.toTensor();
+ }
+ var randomUniform = op({ randomUniform_: randomUniform_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new `tf.Tensor1D` filled with the numbers in the range provided.
+ *
+ * The tensor is a is half-open interval meaning it includes start, but
+ * excludes stop. Decrementing ranges and negative step values are also
+ * supported.sv
+ *
+ *
+ * ```js
+ * tf.range(0, 9, 2).print();
+ * ```
+ *
+ * @param start An integer start value
+ * @param stop An integer stop value
+ * @param step An integer increment (will default to 1 or -1)
+ * @param dtype The data type of the output tensor. Defaults to 'float32'.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function range(start, stop, step, dtype) {
+ if (step === void 0) { step = 1; }
+ if (dtype === void 0) { dtype = 'float32'; }
+ if (step === 0) {
+ throw new Error('Cannot have a step of zero');
+ }
+ var attrs = { start: start, stop: stop, step: step, dtype: dtype };
+ return ENGINE.runKernel(Range, {} /* inputs */, attrs);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns the real part of a complex (or real) tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of type float that is
+ * the real part of each element in input considered as a complex number.
+ *
+ * If the input is real, it simply makes a clone.
+ *
+ * ```js
+ * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);
+ * tf.real(x).print();
+ * ```
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function real_(input) {
+ var $input = convertToTensor(input, 'input', 'real');
+ var inputs = { input: $input };
+ return ENGINE.runKernel(Real, inputs);
+ }
+ var real = op({ real_: real_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes reciprocal of x element-wise: `1 / x`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, 2]);
+ *
+ * x.reciprocal().print(); // or tf.reciprocal(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function reciprocal_(x) {
+ var $x = convertToTensor(x, 'x', 'reciprocal');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Reciprocal, inputs);
+ }
+ var reciprocal = op({ reciprocal_: reciprocal_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes rectified linear element-wise: `max(x, 0)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.relu().print(); // or tf.relu(x)
+ * ```
+ * @param x The input tensor. If the dtype is `bool`, the output dtype will be
+ * `int32'.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function relu_(x) {
+ var $x = convertToTensor(x, 'x', 'relu');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Relu, inputs);
+ }
+ var relu = op({ relu_: relu_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes rectified linear 6 element-wise: `min(max(x, 0), 6)`.
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 8]);
+ *
+ * x.relu6().print(); // or tf.relu6(x)
+ * ```
+ * @param x The input tensor. If the dtype is `bool`, the output dtype will be
+ * `int32'.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function relu6_(x) {
+ var $x = convertToTensor(x, 'x', 'relu6');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Relu6, inputs);
+ }
+ var relu6 = op({ relu6_: relu6_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor` along a specified axis.
+ *
+ * Also available are stricter rank-specific methods that assert that `x` is
+ * of the given rank:
+ * - `tf.reverse1d`
+ * - `tf.reverse2d`
+ * - `tf.reverse3d`
+ * - `tf.reverse4d`
+ *
+ * Except `tf.reverse1d` (which does not have axis param), all methods have
+ * same signature as this method.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.reverse().print();
+ * ```
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * const axis = 1;
+ * x.reverse(axis).print();
+ * ```
+ * @param x The input tensor to be reversed.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function reverse_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ var inputs = { x: $x };
+ var attrs = { dims: axis };
+ return ENGINE.runKernel(Reverse, inputs, attrs);
+ }
+ var reverse = op({ reverse_: reverse_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor1D`.
+ *
+ * @param x The input tensor.
+ */
+ function reverse1d_(x) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 1, function () { return "Error in reverse1D: x must be rank 1 but got rank " + $x.rank + "."; });
+ return reverse($x, 0);
+ }
+ var reverse1d = op({ reverse1d_: reverse1d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor2D` along a specified axis.
+ *
+ * @param x The input tensor.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ */
+ function reverse2d_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 2, function () { return "Error in reverse2D: x must be rank 2 but got rank " + $x.rank + "."; });
+ return reverse($x, axis);
+ }
+ var reverse2d = op({ reverse2d_: reverse2d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor3D` along a specified axis.
+ *
+ * @param x The input tensor.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ */
+ function reverse3d_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 3, function () { return "Error in reverse3D: x must be rank 3 but got rank " + $x.rank + "."; });
+ return reverse($x, axis);
+ }
+ var reverse3d = op({ reverse3d_: reverse3d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Reverses a `tf.Tensor4D` along a specified axis.
+ *
+ * @param x The input tensor.
+ * @param axis The set of dimensions to reverse. Must be in the
+ * range [-rank(x), rank(x)). Defaults to all axes.
+ */
+ function reverse4d_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'reverse');
+ assert($x.rank === 4, function () { return "Error in reverse4D: x must be rank 4 but got rank " + $x.rank + "."; });
+ return reverse($x, axis);
+ }
+ var reverse4d = op({ reverse4d_: reverse4d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes round of input `tf.Tensor` element-wise: `round(x)`.
+ * It implements banker's rounding.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3]);
+ *
+ * x.round().print(); // or tf.round(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function round_(x) {
+ var $x = convertToTensor(x, 'x', 'round');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Round, inputs);
+ }
+ var round = op({ round_: round_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes reciprocal of square root of the input `tf.Tensor` element-wise:
+ * `y = 1 / sqrt(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 4, -1]);
+ *
+ * x.rsqrt().print(); // or tf.rsqrt(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function rsqrt_(x) {
+ var $x = convertToTensor(x, 'x', 'rsqrt', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Rsqrt, inputs);
+ }
+ var rsqrt = op({ rsqrt_: rsqrt_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-0 `tf.Tensor` (scalar) with the provided value and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.scalar` as it makes the code more readable.
+ *
+ * ```js
+ * tf.scalar(3.14).print();
+ * ```
+ *
+ * @param value The value of the scalar.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function scalar(value, dtype) {
+ if (((isTypedArray(value) && dtype !== 'string') || Array.isArray(value)) &&
+ dtype !== 'complex64') {
+ throw new Error('Error creating a new Scalar: value must be a primitive ' +
+ '(number|boolean|string)');
+ }
+ if (dtype === 'string' && isTypedArray(value) &&
+ !(value instanceof Uint8Array)) {
+ throw new Error('When making a scalar from encoded string, ' +
+ 'the value must be `Uint8Array`.');
+ }
+ var shape = [];
+ var inferredShape = [];
+ return makeTensor(value, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes scaled exponential linear element-wise.
+ *
+ * `x < 0 ? scale * alpha * (exp(x) - 1) : x`
+ *
+ * ```js
+ * const x = tf.tensor1d([-1, 2, -3, 4]);
+ *
+ * x.selu().print(); // or tf.selu(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function selu_(x) {
+ var $x = convertToTensor(x, 'x', 'selu');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Selu, inputs);
+ }
+ var selu = op({ selu_: selu_ });
+
+ /**
+ * 2-D convolution with separable filters.
+ *
+ * Performs a depthwise convolution that acts separately on channels followed
+ * by a pointwise convolution that mixes channels. Note that this is
+ * separability between dimensions [1, 2] and 3, not spatial separability
+ * between dimensions 1 and 2.
+ *
+ * See
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d)
+ * for more details.
+ *
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param depthwiseFilter The depthwise filter tensor, rank 4, of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]`. This is
+ * the filter used in the first step.
+ * @param pointwiseFilter The pointwise filter tensor, rank 4, of shape
+ * `[1, 1, inChannels * channelMultiplier, outChannels]`. This is
+ * the filter used in the second step.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`. If strides is a single number, then `strideHeight ==
+ * strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ *
+ * @doc {heading: 'Operations', subheading: 'Convolution'}
+ */
+ function separableConv2d_(x, depthwiseFilter, pointwiseFilter, strides, pad, dilation, dataFormat) {
+ if (dilation === void 0) { dilation = [1, 1]; }
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var $x = convertToTensor(x, 'x', 'separableConv2d');
+ var $depthwiseFilter = convertToTensor(depthwiseFilter, 'depthwiseFilter', 'separableConv2d');
+ var $pointwiseFilter = convertToTensor(pointwiseFilter, 'pointwiseFilter', 'separableConv2d');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ if (dataFormat === 'NCHW') {
+ throw new Error('separableConv2d currently does not support dataFormat NCHW; only ' +
+ 'NHWC is supported');
+ }
+ assert(x4D.rank === 4, function () { return "Error in separableConv2d: input must be rank 4, but got " +
+ ("rank " + x4D.rank + "."); });
+ assert($depthwiseFilter.rank === 4, function () { return "Error in separableConv2d: depthwise filter must be rank 4, but " +
+ ("got rank " + $depthwiseFilter.rank + "."); });
+ assert($pointwiseFilter.rank === 4, function () { return "Error in separableConv2d: pointwise filter must be rank 4, but " +
+ ("got rank " + $depthwiseFilter.rank + "."); });
+ assert($pointwiseFilter.shape[0] === 1, function () { return "Error in separableConv2d: the first dimension of pointwise filter " +
+ (" must be 1, but got " + $pointwiseFilter.shape[0] + "."); });
+ assert($pointwiseFilter.shape[1] === 1, function () { return "Error in separableConv2d: the second dimension of pointwise " +
+ ("filter must be 1, but got " + $pointwiseFilter.shape[1] + "."); });
+ var inChannels = $depthwiseFilter.shape[2];
+ var channelMultiplier = $depthwiseFilter.shape[3];
+ assert($pointwiseFilter.shape[2] === inChannels * channelMultiplier, function () { return "Error in separableConv2d: the third dimension of pointwise filter " +
+ ("must be " + inChannels * channelMultiplier + ", ") +
+ ("but got " + $pointwiseFilter.shape[2] + "."); });
+ var depthwise = depthwiseConv2d$1(x4D, $depthwiseFilter, strides, pad, dataFormat, dilation);
+ var pointwiseStride = 1;
+ var res = conv2d$1(depthwise, $pointwiseFilter, pointwiseStride, 'valid', dataFormat);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var separableConv2d = op({ separableConv2d_: separableConv2d_ });
+
+ /**
+ * Computes the difference between two lists of numbers.
+ *
+ * Given a Tensor `x` and a Tensor `y`, this operation returns a Tensor `out`
+ * that represents all values that are in `x` but not in `y`. The returned
+ * Tensor `out` is sorted in the same order that the numbers appear in `x`
+ * (duplicates are preserved). This operation also returns a Tensor indices that
+ * represents the position of each out element in `x`. In other words:
+ *
+ * `out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]`
+ *
+ * ```js
+ * const x = [1, 2, 3, 4, 5, 6];
+ * const y = [1, 3, 5];
+ *
+ * const [out, indices] = await tf.setdiff1dAsync(x, y);
+ * out.print(); // [2, 4, 6]
+ * indices.print(); // [1, 3, 5]
+ * ```
+ *
+ * @param x 1-D Tensor. Values to keep.
+ * @param y 1-D Tensor. Must have the same type as x. Values to exclude in the
+ * output.
+ * @returns Promise of Tensor tuple [out, indices].
+ * out: Tensor with the same type as x.
+ * indices: A Tensor of type int32.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function setdiff1dAsync_(x, y) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $x, $y, xVals, yVals, ySet, outputSize, i, buffer, indices, i, p;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $x = convertToTensor(x, 'x', 'setdiff1d');
+ $y = convertToTensor(y, 'y', 'setdiff1d');
+ assert($x.dtype === $y.dtype, function () { return "x and y should have the same dtype, but got x (" + $x.dtype + ") and y (" + $y.dtype + ")."; });
+ assert($x.rank === 1, function () { return "x should be 1D tensor, but got x (" + $x.shape + ")."; });
+ assert($y.rank === 1, function () { return "y should be 1D tensor, but got y (" + $y.shape + ")."; });
+ return [4 /*yield*/, $x.data()];
+ case 1:
+ xVals = _a.sent();
+ return [4 /*yield*/, $y.data()];
+ case 2:
+ yVals = _a.sent();
+ ySet = new Set(yVals);
+ outputSize = 0;
+ for (i = 0; i < xVals.length; i++) {
+ if (!ySet.has(xVals[i])) {
+ outputSize++;
+ }
+ }
+ buffer = new TensorBuffer([outputSize], $x.dtype);
+ indices = new TensorBuffer([outputSize], 'int32');
+ for (i = 0, p = 0; i < xVals.length; i++) {
+ if (!ySet.has(xVals[i])) {
+ buffer.values[p] = xVals[i];
+ indices.values[p] = i;
+ p++;
+ }
+ }
+ return [2 /*return*/, [buffer.toTensor(), indices.toTensor()]];
+ }
+ });
+ });
+ }
+ var setdiff1dAsync = setdiff1dAsync_;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Returns an element-wise indication of the sign of a number.
+ *
+ * ```js
+ * const x = tf.tensor1d([.6, 1.1, -3.3, NaN, 0]);
+ *
+ * x.sign().print(); // or tf.sign(x)
+ * ```
+ * @param x The input Tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sign_(x) {
+ var $x = convertToTensor(x, 'x', 'sign');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sign, inputs);
+ }
+ var sign = op({ sign_: sign_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes sin of the input Tensor element-wise: `sin(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.sin().print(); // or tf.sin(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sin_(x) {
+ var $x = convertToTensor(x, 'x', 'sin', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sin, inputs);
+ }
+ var sin = op({ sin_: sin_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes hyperbolic sin of the input `tf.Tensor` element-wise: `sinh(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 1, -1, .7]);
+ *
+ * x.sinh().print(); // or tf.sinh(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sinh_(x) {
+ var $x = convertToTensor(x, 'x', 'sinh');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sinh, inputs);
+ }
+ var sinh = op({ sinh_: sinh_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 1D slice from 1D array starting at coordinates `begin` and is
+ * of length `size`. See `slice` for details.
+ */
+ function slice1d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice1d');
+ assert($x.rank === 1, function () { return "slice1d expects a rank-1 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, [begin], [size]);
+ }
+ var slice1d = op({ slice1d_: slice1d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 2D slice from a 2D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice2d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice2d');
+ assert($x.rank === 2, function () { return "slice2d expects a rank-2 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, begin, size);
+ }
+ var slice2d = op({ slice2d_: slice2d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 3D slice from a 3D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice3d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice3d');
+ assert($x.rank === 3, function () { return "slice3d expects a rank-3 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, begin, size);
+ }
+ var slice3d = op({ slice3d_: slice3d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a 4D slice from a 4D array starting at coordinates `begin` and
+ * is of size `size`. See `slice` for details.
+ */
+ function slice4d_(x, begin, size) {
+ var $x = convertToTensor(x, 'x', 'slice4d');
+ assert($x.rank === 4, function () { return "slice4d expects a rank-4 tensor, but got a rank-" + $x.rank + " tensor"; });
+ return slice($x, begin, size);
+ }
+ var slice4d = op({ slice4d_: slice4d_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the softmax normalized vector given the logits.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2, 3]);
+ *
+ * a.softmax().print(); // or tf.softmax(a)
+ * ```
+ *
+ * ```js
+ * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
+ *
+ * a.softmax().print(); // or tf.softmax(a)
+ * ```
+ *
+ * @param logits The logits array.
+ * @param dim The dimension softmax would be performed on. Defaults to `-1`
+ * which indicates the last dimension.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function softmax_(logits, dim) {
+ if (dim === void 0) { dim = -1; }
+ var $logits = convertToTensor(logits, 'logits', 'softmax', 'float32');
+ if (dim === -1) {
+ dim = $logits.rank - 1;
+ }
+ if (dim !== $logits.rank - 1) {
+ throw Error('Softmax along a non-last dimension is not yet supported. ' +
+ ("Logits was rank " + $logits.rank + " and dim was " + dim));
+ }
+ var inputs = { logits: $logits };
+ var attrs = { dim: dim };
+ return ENGINE.runKernel(Softmax, inputs, attrs);
+ }
+ var softmax = op({ softmax_: softmax_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Fast Fourier transform.
+ *
+ * Computes the 1-dimensional discrete Fourier transform over the inner-most
+ * dimension of input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ * const imag = tf.tensor1d([1, 2, 3]);
+ * const x = tf.complex(real, imag);
+ *
+ * x.fft().print(); // tf.spectral.fft(x).print();
+ * ```
+ * @param input The complex input to compute an fft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function fft_(input) {
+ assert(input.dtype === 'complex64', function () { return "The dtype for tf.spectral.fft() must be complex64 " +
+ ("but got " + input.dtype + "."); });
+ var inputs = { input: input };
+ return ENGINE.runKernel(FFT, inputs);
+ }
+ var fft = op({ fft_: fft_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Inverse fast Fourier transform.
+ *
+ * Computes the inverse 1-dimensional discrete Fourier transform over the
+ * inner-most dimension of input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ * const imag = tf.tensor1d([1, 2, 3]);
+ * const x = tf.complex(real, imag);
+ *
+ * x.ifft().print(); // tf.spectral.ifft(x).print();
+ * ```
+ * @param input The complex input to compute an ifft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function ifft_(input) {
+ assert(input.dtype === 'complex64', function () { return "The dtype for tf.spectral.ifft() must be complex64 " +
+ ("but got " + input.dtype + "."); });
+ var inputs = { input: input };
+ return ENGINE.runKernel(IFFT, inputs);
+ }
+ var ifft = op({ ifft_: ifft_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Inversed real value input fast Fourier transform.
+ *
+ * Computes the 1-dimensional inversed discrete Fourier transform over the
+ * inner-most dimension of the real input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ * const imag = tf.tensor1d([0, 0, 0]);
+ * const x = tf.complex(real, imag);
+ *
+ * x.irfft().print();
+ * ```
+ * @param input The real value input to compute an irfft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function irfft_(input) {
+ var innerDimensionSize = input.shape[input.shape.length - 1];
+ var batch = input.size / innerDimensionSize;
+ var ret;
+ if (innerDimensionSize <= 2) {
+ var complexInput = reshape(input, [batch, innerDimensionSize]);
+ ret = ifft(complexInput);
+ }
+ else {
+ // The length of unique components of the DFT of a real-valued signal
+ // is 2 * (input_len - 1)
+ var outputShape = [batch, 2 * (innerDimensionSize - 1)];
+ var realInput = reshape(real(input), [batch, innerDimensionSize]);
+ var imagInput = reshape(imag(input), [batch, innerDimensionSize]);
+ var realConjugate = reverse(slice(realInput, [0, 1], [batch, innerDimensionSize - 2]), 1);
+ var imagConjugate = mul(reverse(slice(imagInput, [0, 1], [batch, innerDimensionSize - 2]), 1), scalar(-1));
+ var r = concat([realInput, realConjugate], 1);
+ var i = concat([imagInput, imagConjugate], 1);
+ var complexInput = reshape(complex(r, i), [outputShape[0], outputShape[1]]);
+ ret = ifft(complexInput);
+ }
+ ret = real(ret);
+ // reshape the result if the input is 3D tensor.
+ if (input.rank === 3 && input.shape[0] !== 0) {
+ var temp = ret;
+ var batch_1 = input.shape[0];
+ ret = reshape(ret, [batch_1, ret.shape[0] / batch_1, ret.shape[1]]);
+ temp.dispose();
+ }
+ return ret;
+ }
+ var irfft = op({ irfft_: irfft_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Splits a `tf.Tensor` into sub tensors.
+ *
+ * If `numOrSizeSplits` is a number, splits `x` along dimension `axis`
+ * into `numOrSizeSplits` smaller tensors.
+ * Requires that `numOrSizeSplits` evenly divides `x.shape[axis]`.
+ *
+ * If `numOrSizeSplits` is a number array, splits `x` into
+ * `numOrSizeSplits.length` pieces. The shape of the `i`-th piece has the
+ * same size as `x` except along dimension `axis` where the size is
+ * `numOrSizeSplits[i]`.
+ *
+ * ```js
+ * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [2, 4]);
+ * const [a, b] = tf.split(x, 2, 1);
+ * a.print();
+ * b.print();
+ *
+ * const [c, d, e] = tf.split(x, [1, 2, 1], 1);
+ * c.print();
+ * d.print();
+ * e.print();
+ * ```
+ *
+ * @param x The input tensor to split.
+ * @param numOrSizeSplits Either an integer indicating the number of
+ * splits along the axis or an array of integers containing the sizes of
+ * each output tensor along the axis. If a number then it must evenly divide
+ * `x.shape[axis]`; otherwise the sum of sizes must match `x.shape[axis]`.
+ * Can contain one -1 indicating that dimension is to be inferred.
+ * @param axis The dimension along which to split. Defaults to 0 (the first
+ * dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function split_(x, numOrSizeSplits, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'split');
+ var inputs = { x: $x };
+ var attr = { numOrSizeSplits: numOrSizeSplits, axis: axis };
+ return ENGINE.runKernel(SplitV, inputs, attr);
+ }
+ var split = op({ split_: split_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Real value input fast Fourier transform.
+ *
+ * Computes the 1-dimensional discrete Fourier transform over the
+ * inner-most dimension of the real input.
+ *
+ * ```js
+ * const real = tf.tensor1d([1, 2, 3]);
+ *
+ * real.rfft().print();
+ * ```
+ * @param input The real value input to compute an rfft over.
+ *
+ * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}
+ */
+ function rfft_(input, fftLength) {
+ assert(input.dtype === 'float32', function () { return "The dtype for rfft() must be real value but got " + input.dtype; });
+ var innerDimensionSize = input.shape[input.shape.length - 1];
+ var batch = input.size / innerDimensionSize;
+ var adjustedInput;
+ if (fftLength != null && fftLength < innerDimensionSize) {
+ // Need to crop
+ var begin = input.shape.map(function (v) { return 0; });
+ var size = input.shape.map(function (v) { return v; });
+ size[input.shape.length - 1] = fftLength;
+ adjustedInput = slice(input, begin, size);
+ innerDimensionSize = fftLength;
+ }
+ else if (fftLength != null && fftLength > innerDimensionSize) {
+ // Need to pad with zeros
+ var zerosShape = input.shape.map(function (v) { return v; });
+ zerosShape[input.shape.length - 1] = fftLength - innerDimensionSize;
+ adjustedInput = concat([input, zeros(zerosShape)], input.shape.length - 1);
+ innerDimensionSize = fftLength;
+ }
+ else {
+ adjustedInput = input;
+ }
+ // Complement the input with zero imaginary numbers.
+ var zerosInput = zerosLike(adjustedInput);
+ var complexInput = reshape(complex(adjustedInput, zerosInput), [batch, innerDimensionSize]);
+ var ret = fft(complexInput);
+ // Exclude complex conjugations. These conjugations are put symmetrically.
+ var half = Math.floor(innerDimensionSize / 2) + 1;
+ var realValues = real(ret);
+ var imagValues = imag(ret);
+ var realComplexConjugate = split(realValues, [half, innerDimensionSize - half], realValues.shape.length - 1);
+ var imagComplexConjugate = split(imagValues, [half, innerDimensionSize - half], imagValues.shape.length - 1);
+ var outputShape = adjustedInput.shape.slice();
+ outputShape[adjustedInput.shape.length - 1] = half;
+ return reshape(complex(realComplexConjugate[0], imagComplexConjugate[0]), outputShape);
+ }
+ var rfft = op({ rfft_: rfft_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 4, -1]);
+ *
+ * x.sqrt().print(); // or tf.sqrt(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function sqrt_(x) {
+ var $x = convertToTensor(x, 'x', 'sqrt', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Sqrt, inputs);
+ }
+ var sqrt = op({ sqrt_: sqrt_ });
+
+ /**
+ * Returns (a - b) * (a - b) element-wise.
+ * Supports broadcasting.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 4, 3, 16]);
+ * const b = tf.tensor1d([1, 2, 9, 4]);
+ *
+ * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b)
+ * ```
+ *
+ * ```js
+ * // Broadcast squared difference a with b.
+ * const a = tf.tensor1d([2, 4, 6, 8]);
+ * const b = tf.scalar(5);
+ *
+ * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b)
+ * ```
+ *
+ * @param a The first tensor.
+ * @param b The second tensor. Must have the same type as `a`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Arithmetic'}
+ */
+ function squaredDifference_(a, b) {
+ var _a;
+ var $a = convertToTensor(a, 'a', 'squaredDifference');
+ var $b = convertToTensor(b, 'b', 'squaredDifference');
+ _a = __read(makeTypesMatch($a, $b), 2), $a = _a[0], $b = _a[1];
+ assertAndGetBroadcastShape($a.shape, $b.shape);
+ var inputs = { a: $a, b: $b };
+ var attrs = {};
+ return ENGINE.runKernel(SquaredDifference, inputs, attrs);
+ }
+ var squaredDifference = op({ squaredDifference_: squaredDifference_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Removes dimensions of size 1 from the shape of a `tf.Tensor`.
+ *
+ * ```js
+ * const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]);
+ * x.squeeze().print();
+ * ```
+ *
+ * @param x The input tensor to be squeezed.
+ * @param axis An optional list of numbers. If specified, only
+ * squeezes the dimensions listed. The dimension index starts at 0. It
+ * is an error to squeeze a dimension that is not 1.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Transformations'}
+ */
+ function squeeze_(x, axis) {
+ var $x = convertToTensor(x, 'x', 'squeeze');
+ return reshape($x, squeezeShape($x.shape, axis).newShape);
+ }
+ var squeeze = op({ squeeze_: squeeze_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`.
+ *
+ * ```js
+ * const a = tf.tensor1d([1, 2]);
+ * const b = tf.tensor1d([3, 4]);
+ * const c = tf.tensor1d([5, 6]);
+ * tf.stack([a, b, c]).print();
+ * ```
+ *
+ * @param tensors A list of tensor objects with the same shape and dtype.
+ * @param axis The axis to stack along. Defaults to 0 (the first dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function stack_(tensors, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $tensors = convertToTensorArray(tensors, 'tensors', 'stack', 'string_or_numeric');
+ assert($tensors.length >= 1, function () { return 'Pass at least one tensor to tf.stack'; });
+ if ($tensors.length > 0) {
+ assert(axis <= $tensors[0].rank, function () { return 'Axis must be <= rank of the tensor'; });
+ }
+ var inputs = $tensors;
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(Pack, inputs, attrs);
+ }
+ var stack = op({ stack_: stack_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes step of the input `tf.Tensor` element-wise: `x > 0 ? 1 : alpha * x`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, 2, -1, -3]);
+ *
+ * x.step(.5).print(); // or tf.step(x, .5)
+ * ```
+ * @param x The input tensor.
+ * @param alpha The gradient when input is negative.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function step_(x, alpha) {
+ if (alpha === void 0) { alpha = 0.0; }
+ var $x = convertToTensor(x, 'x', 'step');
+ var inputs = { x: $x };
+ var attrs = { alpha: alpha };
+ return ENGINE.runKernel(Step, inputs, attrs);
+ }
+ var step = op({ step_: step_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts a strided slice of a tensor.
+ *
+ * Roughly speaking, this op extracts a slice of size (end-begin)/stride from
+ * the given input tensor (x). Starting at the location specified by begin the
+ * slice continues by adding stride to the index until all dimensions are not
+ * less than end. Note that a stride can be negative, which causes a reverse
+ * slice.
+ *
+ * ```js
+ * const t = tf.tensor3d([1, 1, 1 ,2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+ * [3, 2, 3]);
+ * t.stridedSlice([1, 0, 0], [2, 1, 3], [1, 1, 1]).print() // [[[3, 3, 3]]]
+ * t.stridedSlice([1, 0, 0], [2, 2, 3], [1, 1, 1]).print() // [[[3, 3, 3],
+ * // [4, 4, 4]]]
+ * t.stridedSlice([1, -1, 0], [2, -3, 3], [1, -1, 1]).print() // [[[4, 4, 4],
+ * // [3, 3, 3]]]
+ * ```
+ *
+ * @param x The tensor to stride slice.
+ * @param begin The coordinates to start the slice from.
+ * @param end: The coordinates to end the slice at.
+ * @param strides: The size of the slice.
+ * @param beginMask: If the ith bit of beginMask is set, begin[i] is ignored
+ * and the fullest possible range in that dimension is used instead.
+ * @param endMask: If the ith bit of endMask is set, end[i] is ignored
+ * and the fullest possible range in that dimension is used instead.
+ * @param shrinkAxisMask: a bitmask where bit i implies that
+ * the ith specification should shrink the dimensionality. begin and end must
+ * imply a slice of size 1 in the dimension.
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function stridedSlice_(x, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) {
+ if (beginMask === void 0) { beginMask = 0; }
+ if (endMask === void 0) { endMask = 0; }
+ if (ellipsisMask === void 0) { ellipsisMask = 0; }
+ if (newAxisMask === void 0) { newAxisMask = 0; }
+ if (shrinkAxisMask === void 0) { shrinkAxisMask = 0; }
+ var $x = convertToTensor(x, 'x', 'stridedSlice', 'string_or_numeric');
+ var inputs = { x: $x };
+ var attrs = {
+ begin: begin,
+ end: end,
+ strides: strides,
+ beginMask: beginMask,
+ endMask: endMask,
+ ellipsisMask: ellipsisMask,
+ newAxisMask: newAxisMask,
+ shrinkAxisMask: shrinkAxisMask
+ };
+ return ENGINE.runKernel(StridedSlice, inputs, attrs);
+ }
+ var stridedSlice = op({ stridedSlice_: stridedSlice_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes tan of the input `tf.Tensor` element-wise, `tan(x)`
+ *
+ * ```js
+ * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);
+ *
+ * x.tan().print(); // or tf.tan(x)
+ * ```
+ * @param x The input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Basic math'}
+ */
+ function tan_(x) {
+ var $x = convertToTensor(x, 'x', 'tan', 'float32');
+ var inputs = { x: $x };
+ return ENGINE.runKernel(Tan, inputs);
+ }
+ var tan = op({ tan_: tan_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-1 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor1d` as it makes the code more readable.
+ *
+ * ```js
+ * tf.tensor1d([1, 2, 3]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be array of numbers,
+ * or a `TypedArray`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor1d(values, dtype) {
+ assertNonNull(values);
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 1) {
+ throw new Error('tensor1d() requires values to be a flat/TypedArray');
+ }
+ var shape = null;
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-2 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor2d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor2d([[1, 2], [3, 4]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor2d([1, 2, 3, 4], [2, 2]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. If not provided, it is inferred from
+ * `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor2d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 2) {
+ throw new Error('tensor2d() requires shape to have two numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 2 && inferredShape.length !== 1) {
+ throw new Error('tensor2d() requires values to be number[][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor2d() requires shape to be provided when `values` ' +
+ 'are a flat/TypedArray');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-4 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor4d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor4d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 4) {
+ throw new Error('tensor4d() requires shape to have four numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 4 && inferredShape.length !== 1) {
+ throw new Error('tensor4d() requires values to be number[][][][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor4d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-5 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor5d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor5d([[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor5d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 5) {
+ throw new Error('tensor5d() requires shape to have five numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 5 && inferredShape.length !== 1) {
+ throw new Error('tensor5d() requires values to be ' +
+ 'number[][][][][] or flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor5d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates rank-6 `tf.Tensor` with the provided values, shape and dtype.
+ *
+ * The same functionality can be achieved with `tf.tensor`, but in general
+ * we recommend using `tf.tensor6d` as it makes the code more readable.
+ *
+ * ```js
+ * // Pass a nested array.
+ * tf.tensor6d([[[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]]).print();
+ * ```
+ * ```js
+ * // Pass a flat array and specify a shape.
+ * tf.tensor6d([1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 2, 2, 2, 1]).print();
+ * ```
+ *
+ * @param values The values of the tensor. Can be nested array of numbers,
+ * or a flat array, or a `TypedArray`.
+ * @param shape The shape of the tensor. Optional. If not provided,
+ * it is inferred from `values`.
+ * @param dtype The data type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function tensor6d(values, shape, dtype) {
+ assertNonNull(values);
+ if (shape != null && shape.length !== 6) {
+ throw new Error('tensor6d() requires shape to have six numbers');
+ }
+ var inferredShape = inferShape(values, dtype);
+ if (inferredShape.length !== 6 && inferredShape.length !== 1) {
+ throw new Error('tensor6d() requires values to be number[][][][][][] or ' +
+ 'flat/TypedArray');
+ }
+ if (inferredShape.length === 1 && shape == null) {
+ throw new Error('tensor6d() requires shape to be provided when `values` ' +
+ 'are a flat array');
+ }
+ shape = shape ||
+ inferredShape;
+ return makeTensor(values, shape, inferredShape, dtype);
+ }
+
+ /**
+ * Finds the values and indices of the `k` largest entries along the last
+ * dimension.
+ *
+ * If the input is a vector (rank=1), finds the k largest entries in the vector
+ * and outputs their values and indices as vectors. Thus values[j] is the j-th
+ * largest entry in input, and its index is indices[j].
+ * For higher rank inputs, computes the top k entries along the last dimension.
+ *
+ * If two elements are equal, the lower-index element appears first.
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 5], [4, 3]]);
+ * const {values, indices} = tf.topk(a);
+ * values.print();
+ * indices.print();
+ * ```
+ * @param x 1-D or higher `tf.Tensor` with last dimension being at least `k`.
+ * @param k Number of top elements to look for along the last dimension.
+ * @param sorted If true, the resulting `k` elements will be sorted by the
+ * values in descending order.
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function topk_(x, k, sorted) {
+ if (k === void 0) { k = 1; }
+ if (sorted === void 0) { sorted = true; }
+ var $x = convertToTensor(x, 'x', 'topk');
+ if ($x.rank === 0) {
+ throw new Error('topk() expects the input to be of rank 1 or higher');
+ }
+ var lastDim = $x.shape[$x.shape.length - 1];
+ if (k < 0) {
+ throw new Error("'k' passed to topk() must be >= 0 but got " + k);
+ }
+ if (k > lastDim) {
+ throw new Error("'k' passed to topk() must be <= the last dimension (" + lastDim + ") " +
+ ("but got " + k));
+ }
+ var inputs = { x: $x };
+ var attrs = { k: k, sorted: sorted };
+ var _a = __read(ENGINE.runKernel(TopK, inputs, attrs), 2), values = _a[0], indices = _a[1];
+ return { values: values, indices: indices };
+ }
+ var topk = op({ topk_: topk_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a `tf.Tensor` with values sampled from a truncated normal
+ * distribution.
+ *
+ * ```js
+ * tf.truncatedNormal([2, 2]).print();
+ * ```
+ *
+ * The generated values follow a normal distribution with specified mean and
+ * standard deviation, except that values whose magnitude is more than 2
+ * standard deviations from the mean are dropped and re-picked.
+ *
+ * @param shape An array of integers defining the output tensor shape.
+ * @param mean The mean of the normal distribution.
+ * @param stdDev The standard deviation of the normal distribution.
+ * @param dtype The data type of the output tensor.
+ * @param seed The seed for the random number generator.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function truncatedNormal_(shape, mean, stdDev, dtype, seed) {
+ if (mean === void 0) { mean = 0; }
+ if (stdDev === void 0) { stdDev = 1; }
+ if (dtype != null && dtype === 'bool') {
+ throw new Error("Unsupported data type $ { dtype }");
+ }
+ var randGauss = new MPRandGauss(mean, stdDev, dtype, true /* truncated */, seed);
+ var res = buffer(shape, dtype);
+ for (var i = 0; i < res.values.length; i++) {
+ res.values[i] = randGauss.nextValue();
+ }
+ return res.toTensor();
+ }
+ var truncatedNormal = op({ truncatedNormal_: truncatedNormal_ });
+
+ /**
+ * Finds unique elements along an axis of a tensor.
+ *
+ * It returns a tensor `values` containing all of the unique elements along the
+ * `axis` of the given tensor `x` in the same order that they occur along the
+ * `axis` in `x`; `x` does not need to be sorted. It also returns a tensor
+ * `indices` the same size as the number of the elements in `x` along the `axis`
+ * dimension. It contains the index in the unique output `values`.
+ *
+ * ```js
+ * // A 1-D tensor
+ * const a = tf.tensor1d([1, 1, 2, 4, 4, 4, 7, 8, 8]);
+ * const {values, indices} = tf.unique(a);
+ * values.print(); // [1, 2, 4, 7, 8,]
+ * indices.print(); // [0, 0, 1, 2, 2, 2, 3, 4, 4]
+ * ```
+ *
+ * ```js
+ * // A 2-D tensor with axis=0
+ * //
+ * // 'a' is: [[1, 0, 0],
+ * // [1, 0, 0],
+ * // [2, 0, 0]]
+ * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]);
+ * const {values, indices} = tf.unique(a, 0)
+ * values.print(); // [[1, 0, 0],
+ * // [2, 0, 0]]
+ * indices.print(); // [0, 0, 1]
+ * ```
+ *
+ * ```js
+ * // A 2-D tensor with axis=1
+ * //
+ * // 'a' is: [[1, 0, 0],
+ * // [1, 0, 0],
+ * // [2, 0, 0]]
+ * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]);
+ * const {values, indices} = tf.unique(a, 1)
+ * values.print(); // [[1, 0],
+ * // [1, 0],
+ * // [2, 0]]
+ * indices.print(); // [0, 1, 1]
+ * ```
+ * @param x A tensor (int32, string, bool).
+ * @param axis The axis of the tensor to find the unique elements.
+ * @returns [uniqueElements, indices] (see above for details)
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function unique_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'unique', 'string_or_numeric');
+ assert($x.rank > 0, function () { return 'The input tensor must be at least 1D'; });
+ var inputs = { x: $x };
+ var attrs = { axis: axis };
+ var _a = __read(ENGINE.runKernel(Unique, inputs, attrs), 2), values = _a[0], indices = _a[1];
+ return { values: values, indices: indices };
+ }
+ var unique = op({ unique_: unique_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the sum along segments of a `tf.Tensor`.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ * const segmentIds = tf.tensor1d([1, 2, 0, 1], 'int32');
+ * const numSegments = 3;
+ *
+ * x.unsortedSegmentSum(segmentIds, numSegments).print()
+ * //or tf.unsortedSegmentSum(x, segmentIds, numSegments)
+ * ```
+ * @param x The `tf.Tensor` that will be summed along its segments.
+ * @param segmentIds A `tf.Tensor1D` whose rank is equal to the rank of `x`'s
+ * dimension along the `axis`. Maps each element of `x` to a segment.
+ * @param numSegments The number of distinct `segmentIds`.
+ *
+ * @doc {heading: 'Operations', subheading: 'Segment'}
+ */
+ function unsortedSegmentSum_(x, segmentIds, numSegments) {
+ var $x = convertToTensor(x, 'x', 'unsortedSegmentSum');
+ var $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'unsortedSegmentSum', 'int32');
+ assert(isInt(numSegments), function () { return 'numSegments must be of dtype int'; });
+ var inputs = { x: $x, segmentIds: $segmentIds };
+ var attrs = { numSegments: numSegments };
+ return ENGINE.runKernel(UnsortedSegmentSum, inputs, attrs);
+ }
+ var unsortedSegmentSum = op({ unsortedSegmentSum_: unsortedSegmentSum_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Unstacks a `tf.Tensor` of rank-`R` into a list of rank-`(R-1)` `tf.Tensor`s.
+ *
+ * ```js
+ * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ *
+ * tf.unstack(a).forEach(tensor => tensor.print());
+ * ```
+ *
+ * @param x A tensor object.
+ * @param axis The axis to unstack along. Defaults to 0 (the first dim).
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function unstack_(x, axis) {
+ if (axis === void 0) { axis = 0; }
+ var $x = convertToTensor(x, 'x', 'unstack', 'string_or_numeric');
+ assert(axis >= -$x.shape.length && axis < $x.shape.length, function () { return "Axis = " + axis + " is not in [-" + $x.shape.length + ", " + $x.shape.length + ")"; });
+ var inputs = { value: $x };
+ var attrs = { axis: axis };
+ return ENGINE.runKernel(Unpack, inputs, attrs);
+ }
+ var unstack = op({ unstack_: unstack_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new variable with the provided initial value.
+ * ```js
+ * const x = tf.variable(tf.tensor([1, 2, 3]));
+ * x.assign(tf.tensor([4, 5, 6]));
+ *
+ * x.print();
+ * ```
+ *
+ * @param initialValue Initial value for the tensor.
+ * @param trainable If true, optimizers are allowed to update it.
+ * @param name Name of the variable. Defaults to a unique id.
+ * @param dtype If set, initialValue will be converted to the given type.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Creation'}
+ */
+ function variable(initialValue, trainable, name, dtype) {
+ if (trainable === void 0) { trainable = true; }
+ return ENGINE.makeVariable(initialValue, trainable, name, dtype);
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function whereImpl(condShape, condVals) {
+ var indices = [];
+ for (var i = 0; i < condVals.length; i++) {
+ if (condVals[i]) {
+ indices.push(i);
+ }
+ }
+ var inBuffer = buffer(condShape, 'int32');
+ var out = buffer([indices.length, condShape.length], 'int32');
+ for (var i = 0; i < indices.length; i++) {
+ var loc = inBuffer.indexToLoc(indices[i]);
+ var offset = i * condShape.length;
+ out.values.set(loc, offset);
+ }
+ return out.toTensor();
+ }
+
+ /**
+ * Returns the coordinates of true elements of condition.
+ *
+ * The coordinates are returned in a 2-D tensor where the first dimension (rows)
+ * represents the number of true elements, and the second dimension (columns)
+ * represents the coordinates of the true elements. Keep in mind, the shape of
+ * the output tensor can vary depending on how many true values there are in
+ * input. Indices are output in row-major order. The resulting tensor has the
+ * shape `[numTrueElems, condition.rank]`.
+ *
+ * This is analogous to calling the python `tf.where(cond)` without an x or y.
+ *
+ * ```js
+ * const cond = tf.tensor1d([false, false, true], 'bool');
+ * const result = await tf.whereAsync(cond);
+ * result.print();
+ * ```
+ *
+ * @doc {heading: 'Operations', subheading: 'Logical'}
+ */
+ function whereAsync_(condition) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $condition, vals, res;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $condition = convertToTensor(condition, 'condition', 'whereAsync', 'bool');
+ return [4 /*yield*/, $condition.data()];
+ case 1:
+ vals = _a.sent();
+ res = whereImpl($condition.shape, vals);
+ if (condition !== $condition) {
+ $condition.dispose();
+ }
+ return [2 /*return*/, res];
+ }
+ });
+ });
+ }
+ var whereAsync = whereAsync_;
+
+ /**
+ * Apply boolean mask to tensor.
+ *
+ * ```js
+ * const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
+ * const mask = tf.tensor1d([1, 0, 1], 'bool');
+ * const result = await tf.booleanMaskAsync(tensor, mask);
+ * result.print();
+ * ```
+ *
+ * @param tensor N-D tensor.
+ * @param mask K-D boolean tensor, K <= N and K must be known statically.
+ * @param axis A 0-D int Tensor representing the axis in tensor to mask from.
+ * By default, axis is 0 which will mask from the first dimension.
+ * Otherwise K + axis <= N.
+ *
+ * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
+ */
+ function booleanMaskAsync_(tensor, mask, axis) {
+ return __awaiter(this, void 0, void 0, function () {
+ var $tensor, $mask, axisFrom, maskDim, tensorShape, leadingSize, i, targetTensorShape, reshapedTensor, reshapedMask, positivePositions, indices, res;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $tensor = convertToTensor(tensor, 'tensor', 'boolMask');
+ $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');
+ axisFrom = axis == null ? 0 : axis;
+ maskDim = $mask.rank;
+ tensorShape = $tensor.shape;
+ assert(maskDim > 0, function () { return 'mask cannot be scalar'; });
+ assertShapesMatch(tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape, "mask's shape must match the first K dimensions of tensor's shape,");
+ leadingSize = 1;
+ for (i = axisFrom; i < axisFrom + maskDim; i++) {
+ leadingSize *= tensorShape[i];
+ }
+ targetTensorShape = tensorShape.slice(0, axisFrom)
+ .concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
+ reshapedTensor = reshape($tensor, targetTensorShape);
+ reshapedMask = reshape($mask, [-1]);
+ return [4 /*yield*/, whereAsync(reshapedMask)];
+ case 1:
+ positivePositions = _a.sent();
+ indices = squeeze(positivePositions, [1]);
+ res = gather(reshapedTensor, indices, axisFrom);
+ // Ensure no memory leak.
+ if (tensor !== $tensor) {
+ $tensor.dispose();
+ }
+ if (mask !== $mask) {
+ $mask.dispose();
+ }
+ indices.dispose();
+ reshapedTensor.dispose();
+ reshapedMask.dispose();
+ positivePositions.dispose();
+ return [2 /*return*/, res];
+ }
+ });
+ });
+ }
+ var booleanMaskAsync = booleanMaskAsync_;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the norm of scalar, vectors, and matrices.
+ * This function can compute several different vector norms (the 1-norm, the
+ * Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0)
+ * and matrix norms (Frobenius, 1-norm, and inf-norm).
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 3, 4]);
+ *
+ * x.norm().print(); // or tf.norm(x)
+ * ```
+ *
+ * @param x The input array.
+ * @param ord Optional. Order of the norm. Supported norm types are
+ * following:
+ *
+ * | ord | norm for matrices | norm for vectors
+ * |------------|---------------------------|---------------------
+ * |'euclidean' |Frobenius norm |2-norm
+ * |'fro' |Frobenius norm |
+ * |Infinity |max(sum(abs(x), axis=1)) |max(abs(x))
+ * |-Infinity |min(sum(abs(x), axis=1)) |min(abs(x))
+ * |1 |max(sum(abs(x), axis=0)) |sum(abs(x))
+ * |2 | |sum(abs(x)^2)^1/2*
+ *
+ * @param axis Optional. If axis is null (the default), the input is
+ * considered a vector and a single vector norm is computed over the entire
+ * set of values in the Tensor, i.e. norm(x, ord) is equivalent
+ * to norm(x.reshape([-1]), ord). If axis is a integer, the input
+ * is considered a batch of vectors, and axis determines the axis in x
+ * over which to compute vector norms. If axis is a 2-tuple of integer it is
+ * considered a batch of matrices and axis determines the axes in NDArray
+ * over which to compute a matrix norm.
+ * @param keepDims Optional. If true, the norm have the same dimensionality
+ * as the input.
+ *
+ * @doc {heading: 'Operations', subheading: 'Matrices'}
+ */
+ function norm_(x, ord, axis, keepDims) {
+ if (ord === void 0) { ord = 'euclidean'; }
+ if (axis === void 0) { axis = null; }
+ if (keepDims === void 0) { keepDims = false; }
+ x = convertToTensor(x, 'x', 'norm');
+ var norm = normImpl(x, ord, axis);
+ var keepDimsShape = norm.shape;
+ if (keepDims) {
+ var axes = parseAxisParam(axis, x.shape);
+ keepDimsShape = expandShapeToKeepDim(norm.shape, axes);
+ }
+ return reshape(norm, keepDimsShape);
+ }
+ function normImpl(x, p, axis) {
+ if (axis === void 0) { axis = null; }
+ if (x.rank === 0) {
+ return abs(x);
+ }
+ // consider vector when no axis is specified
+ if (x.rank !== 1 && axis === null) {
+ return normImpl(reshape(x, [-1]), p, axis);
+ }
+ // vector
+ if (x.rank === 1 || typeof axis === 'number' ||
+ Array.isArray(axis) && axis.length === 1) {
+ if (p === 1) {
+ return sum(abs(x), axis);
+ }
+ if (p === Infinity) {
+ return max(abs(x), axis);
+ }
+ if (p === -Infinity) {
+ return min(abs(x), axis);
+ }
+ if (p === 'euclidean' || p === 2) {
+ // norm(x, 2) = sum(abs(xi) ^ 2) ^ 1/2
+ return sqrt(sum(pow(abs(x), scalar(2, 'int32')), axis));
+ }
+ throw new Error("Error in norm: invalid ord value: " + p);
+ }
+ // matrix (assumption axis[0] < axis[1])
+ if (Array.isArray(axis) && axis.length === 2) {
+ if (p === 1) {
+ return max(sum(abs(x), axis[0]), axis[1] - 1);
+ }
+ if (p === Infinity) {
+ return max(sum(abs(x), axis[1]), axis[0]);
+ }
+ if (p === -Infinity) {
+ return min(sum(abs(x), axis[1]), axis[0]);
+ }
+ if (p === 'fro' || p === 'euclidean') {
+ // norm(x) = sqrt(sum(pow(x, 2)))
+ return sqrt(sum(square(x), axis));
+ }
+ throw new Error("Error in norm: invalid ord value: " + p);
+ }
+ throw new Error("Error in norm: invalid axis: " + axis);
+ }
+ var norm = op({ norm_: norm_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Compute the moving average of a variable.
+ *
+ * Without zeroDebias, the moving average operation is defined by:
+ * `v += delta`
+ * where
+ * `delta = (1 - decay) * (x - v)`
+ *
+ * With zeroDebias (default), the `delta` term is scaled to debias the
+ * effect of the (assumed) zero-initialization of `v`.
+ * `delta /= (1 - decay ^ step)`
+ *
+ * For more details on the zero-debiasing algorithm, see:
+ * https://arxiv.org/abs/1412.6980
+ *
+ * Note that this function is completely stateless and does not keep track of
+ * step count. The step count needs to be maintained by the caller and passed
+ * in as `step`.
+ *
+ * @param v The current moving average value.
+ * @param x New input value, must have the same shape and dtype as `v`.
+ * @param decay The decay factor. Typical values are 0.95 and 0.99.
+ * @param step Step count.
+ * @param zeroDebias: Whether zeroDebias is to be performed (default: `true`).
+ * @returns The new moving average value.
+ *
+ * @doc {heading: 'Operations', subheading: 'Moving Average'}
+ */
+ function movingAverage_(v, x, decay, step, zeroDebias) {
+ if (zeroDebias === void 0) { zeroDebias = true; }
+ var $v = convertToTensor(v, 'v', 'movingAverage');
+ var $x = convertToTensor(x, 'x', 'movingAverage');
+ var $decay = convertToTensor(decay, 'decay', 'movingAverage');
+ assertTypesMatch($v, $x);
+ assert(arraysEqual($v.shape, $x.shape), function () { return 'Shape mismatch in v and x'; });
+ var one = scalar(1);
+ var oneMinusDecay = sub(one, $decay);
+ var update = mul(sub($x, $v), oneMinusDecay);
+ if (zeroDebias) {
+ assert(step != null, function () { return 'When using zeroDebias: true, step is required.'; });
+ var $step = convertToTensor(step, 'step', 'movingAverage');
+ update = div(update, sub(one, pow($decay, $step)));
+ }
+ return add($v, update);
+ }
+ var movingAverage = op({ movingAverage_: movingAverage_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates a new tensor by applying sparse updates to individual
+ * values or slices within a zero tensor of the given shape tensor according to
+ * indices. This operator is the inverse of the `tf.gatherND` operator which
+ * extracts values or slices from a given tensor.
+ *
+ * ```js
+ * const indices = tf.tensor2d([4, 3, 1, 7], [4, 1], 'int32');
+ * const updates = tf.tensor1d([9, 10, 11, 12]);
+ * const shape = [8];
+ * tf.scatterND(indices, updates, shape).print() //[0, 11, 0, 10, 9, 0, 0, 12]
+ * ```
+ *
+ * @param indices The tensor contains the indices into the output tensor.
+ * @param updates The tensor contains the value for the indices.
+ * @param shape: The shape of the output tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function scatterND_(indices, updates, shape) {
+ var $indices = convertToTensor(indices, 'indices', 'scatterND', 'int32');
+ var $updates = convertToTensor(updates, 'updates', 'scatterND');
+ validateInput$1($updates, $indices, shape);
+ var inputs = { indices: $indices, updates: $updates };
+ var attrs = { shape: shape };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(ScatterNd, inputs, attrs);
+ }
+ var scatterND = op({ scatterND_: scatterND_ });
+
+ /**
+ * Validate sparseToDense inputs.
+ *
+ * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32.
+ * sparseIndices[i] contains the complete index where sparseValues[i] will be
+ * placed.
+ * @param sparseValues A 0-D or 1-D Tensor. Values
+ * corresponding to each row of sparseIndices, or a scalar value to be used for
+ * all sparse indices.
+ * @param outputShape number[]. Shape of the dense output tensor.
+ * @param validateIndices boolean. indice validation is not supported, error
+ * will be thrown if it is set.
+ */
+ function validateInput(sparseIndices, sparseValues, outputShape, defaultValues) {
+ if (sparseIndices.dtype !== 'int32') {
+ throw new Error('tf.sparseToDense() expects the indices to be int32 type,' +
+ (" but the dtype was " + sparseIndices.dtype + "."));
+ }
+ if (sparseIndices.rank > 2) {
+ throw new Error('sparseIndices should be a scalar, vector, or matrix,' +
+ (" but got shape " + sparseIndices.shape + "."));
+ }
+ var numElems = sparseIndices.rank > 0 ? sparseIndices.shape[0] : 1;
+ var numDims = sparseIndices.rank > 1 ? sparseIndices.shape[1] : 1;
+ if (outputShape.length !== numDims) {
+ throw new Error('outputShape has incorrect number of elements:,' +
+ (" " + outputShape.length + ", should be: " + numDims + "."));
+ }
+ var numValues = sparseValues.size;
+ if (!(sparseValues.rank === 0 ||
+ sparseValues.rank === 1 && numValues === numElems)) {
+ throw new Error('sparseValues has incorrect shape ' +
+ (sparseValues.shape + ", should be [] or [" + numElems + "]"));
+ }
+ if (sparseValues.dtype !== defaultValues.dtype) {
+ throw new Error('sparseValues.dtype must match defaultValues.dtype');
+ }
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts a sparse representation into a dense tensor.
+ *
+ * Builds an array dense with shape outputShape such that:
+ *
+ * // If sparseIndices is scalar
+ * dense[i] = (i == sparseIndices ? sparseValues : defaultValue)
+ *
+ * // If sparseIndices is a vector, then for each i
+ * dense[sparseIndices[i]] = sparseValues[i]
+ *
+ * // If sparseIndices is an n by d matrix, then for each i in [0, n)
+ * dense[sparseIndices[i][0], ..., sparseIndices[i][d-1]] = sparseValues[i]
+ * All other values in dense are set to defaultValue. If sparseValues is a
+ * scalar, all sparse indices are set to this single value.
+ *
+ * If indices are repeated the final value is summed over all values for those
+ * indices.
+ *
+ * ```js
+ * const indices = tf.tensor1d([4, 5, 6, 1, 2, 3], 'int32');
+ * const values = tf.tensor1d([10, 11, 12, 13, 14, 15], 'float32');
+ * const shape = [8];
+ * tf.sparseToDense(indices, values, shape).print();
+ * ```
+ *
+ * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32.
+ * sparseIndices[i] contains the complete index where sparseValues[i] will be
+ * placed.
+ * @param sparseValues A 0-D or 1-D Tensor. Values
+ * corresponding to each row of sparseIndices, or a scalar value to be used for
+ * all sparse indices.
+ * @param outputShape Shape of the dense output tensor. the type is inferred.
+ * @param defaultValue Scalar. Value to set for indices not specified in
+ * sparseIndices. Defaults to zero.
+ *
+ * @doc {heading: 'Operations', subheading: 'Normalization'}
+ */
+ function sparseToDense_(sparseIndices, sparseValues, outputShape, defaultValue) {
+ if (defaultValue === void 0) { defaultValue = 0; }
+ var $sparseIndices = convertToTensor(sparseIndices, 'sparseIndices', 'sparseToDense', 'int32');
+ var $sparseValues = convertToTensor(sparseValues, 'sparseValues', 'sparseToDense');
+ var $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'sparseToDense', $sparseValues.dtype);
+ validateInput($sparseIndices, $sparseValues, outputShape, $defaultValue);
+ var inputs = {
+ sparseIndices: $sparseIndices,
+ sparseValues: $sparseValues,
+ defaultValue: $defaultValue
+ };
+ var attrs = { outputShape: outputShape };
+ return ENGINE.runKernel(SparseToDense, inputs, attrs);
+ }
+ var sparseToDense = op({ sparseToDense_: sparseToDense_ });
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gather slices from input tensor into a Tensor with shape specified by
+ * `indices`.
+ *
+ * `indices` is an K-dimensional integer tensor, best thought of as a
+ * (K-1)-dimensional tensor of indices into input, where each element defines a
+ * slice of input:
+ * output[\\(i_0, ..., i_{K-2}\\)] = input[indices[\\(i_0, ..., i_{K-2}\\)]]
+ *
+ * Whereas in `tf.gather`, `indices` defines slices into the first dimension of
+ * input, in `tf.gatherND`, `indices` defines slices into the first N dimensions
+ * of input, where N = indices.shape[-1].
+ *
+ * The last dimension of indices can be at most the rank of input:
+ * indices.shape[-1] <= input.rank
+ *
+ * The last dimension of `indices` corresponds to elements
+ * (if indices.shape[-1] == input.rank) or slices
+ * (if indices.shape[-1] < input.rank) along dimension indices.shape[-1] of
+ * input.
+ * The output tensor has shape
+ * indices.shape[:-1] + input.shape[indices.shape[-1]:]
+ *
+ * Note that on CPU, if an out of bound index is found, an error is returned. On
+ * GPU, if an out of bound index is found, a 0 is stored in the corresponding
+ * output value.
+ *
+ * ```js
+ * const indices = tf.tensor2d([0, 1, 1, 0], [2,2], 'int32');
+ * const input = tf.tensor2d([9, 10, 11, 12], [2, 2]);
+ * tf.gatherND(input, indices).print() // [10, 11]
+ * ```
+ *
+ * @param x The tensor from which to gather values.
+ * @param indices Index tensor, must be of type int32.
+ *
+ * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
+ */
+ function gatherND_(x, indices) {
+ var $indices = convertToTensor(indices, 'indices', 'gatherND', 'int32');
+ var $x = convertToTensor(x, 'x', 'gatherND', 'string_or_numeric');
+ var inputs = { params: $x, indices: $indices };
+ return ENGINE.runKernel(GatherNd, inputs);
+ }
+ var gatherND = op({ gatherND_: gatherND_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Normalize noise shape based on provided tensor and noise shape.
+ *
+ * @param x Tensor.
+ * @param noiseShape The shape for the randomly generated keep/drop flags, as
+ * an array of numbers. Optional.
+ * @returns Normalized noise shape.
+ */
+ function getNoiseShape(x, noiseShape) {
+ if (noiseShape == null) {
+ return x.shape.slice();
+ }
+ if (arraysEqual(x.shape, noiseShape)) {
+ return noiseShape;
+ }
+ if (x.shape.length === noiseShape.length) {
+ var newDimension = [];
+ for (var i = 0; i < x.shape.length; i++) {
+ if (noiseShape[i] == null && x.shape[i] != null) {
+ newDimension.push(x.shape[i]);
+ }
+ else {
+ newDimension.push(noiseShape[i]);
+ }
+ }
+ return newDimension;
+ }
+ return noiseShape;
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes dropout.
+ *
+ * ```js
+ * const x = tf.tensor1d([1, 2, 2, 1]);
+ * const rate = 0.75;
+ * const output = tf.dropout(x, rate);
+ * output.print();
+ * ```
+ *
+ * @param x A floating point Tensor or TensorLike.
+ * @param rate A float in the range [0, 1). The probability that each element
+ * of x is discarded.
+ * @param noiseShape An array of numbers of type int32, representing the
+ * shape for randomly generated keep/drop flags. If the noiseShape has null
+ * value, it will be automatically replaced with the x's relative dimension
+ * size. Optional.
+ * @param seed Used to create random seeds. Optional.
+ * @returns A Tensor of the same shape of x.
+ *
+ * @doc {heading: 'Operations', subheading: 'Dropout'}
+ */
+ function dropout_(x, rate, noiseShape, seed) {
+ var $x = convertToTensor(x, 'x', 'dropout');
+ assert($x.dtype === 'float32', function () { return "x has to be a floating point tensor since it's going to be " +
+ ("scaled, but got a " + $x.dtype + " tensor instead."); });
+ assert(rate >= 0 && rate < 1, function () { return "rate must be a float in the range [0, 1), but got " + rate + "."; });
+ if (rate === 0) {
+ return x instanceof Tensor ? $x.clone() : $x;
+ }
+ var $noiseShape = getNoiseShape($x, noiseShape);
+ var keepProb = 1 - rate;
+ var multiplier = div(floor(add(randomUniform($noiseShape, 0, 1, 'float32', seed), keepProb)), keepProb);
+ return mul($x, multiplier);
+ }
+ var dropout = op({ dropout_: dropout_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function enclosingPowerOfTwo(value) {
+ // Return 2**N for integer N such that 2**N >= value.
+ return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2.0))));
+ }
+ function cosineWindow(windowLength, a, b) {
+ var even = 1 - windowLength % 2;
+ var newValues = new Float32Array(windowLength);
+ for (var i = 0; i < windowLength; ++i) {
+ var cosArg = (2.0 * Math.PI * i) / (windowLength + even - 1);
+ newValues[i] = a - b * Math.cos(cosArg);
+ }
+ return tensor1d(newValues, 'float32');
+ }
+
+ /**
+ * Returns whether the targets are in the top K predictions.
+ *
+ * ```js
+ * const predictions = tf.tensor2d([[20, 10, 40, 30], [30, 50, -20, 10]]);
+ * const targets = tf.tensor1d([2, 0]);
+ * const precision = await tf.inTopKAsync(predictions, targets);
+ * precision.print();
+ * ```
+ * @param predictions 2-D or higher `tf.Tensor` with last dimension being
+ * at least `k`.
+ * @param targets 1-D or higher `tf.Tensor`.
+ * @param k Optional Number of top elements to look at for computing precision,
+ * default to 1.
+ *
+ * @doc {heading: 'Operations', subheading: 'Evaluation'}
+ */
+ function inTopKAsync_(predictions, targets, k) {
+ if (k === void 0) { k = 1; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $predictions, $targets, lastDim, predictionsVals, targetsVals, _a, batch, size, precision, b, offset, vals, valAndInd, i, i;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ $predictions = convertToTensor(predictions, 'predictions', 'inTopK');
+ $targets = convertToTensor(targets, 'targets', 'inTopK');
+ assert($predictions.rank > 1, function () { return 'inTopK() expects the predictions to be of rank 2 or higher, ' +
+ ("but got " + $predictions.rank); });
+ assert($predictions.rank - 1 === $targets.rank, function () { return "predictions rank should be 1 larger than " +
+ "targets rank, but got predictions rank " +
+ ($predictions.rank + " and targets rank " + $targets.rank); });
+ assertShapesMatch($predictions.shape.slice(0, $predictions.shape.length - 1), $targets.shape, "predictions's shape should be align with the targets' shape, " +
+ 'except the last dimension.');
+ lastDim = $predictions.shape[$predictions.shape.length - 1];
+ assert(k > 0 && k <= lastDim, function () { return "'k' passed to inTopK() must be > 0 && <= the predictions last " +
+ ("dimension (" + lastDim + "), but got " + k); });
+ return [4 /*yield*/, $predictions.data()];
+ case 1:
+ predictionsVals = _b.sent();
+ return [4 /*yield*/, $targets.data()];
+ case 2:
+ targetsVals = _b.sent();
+ _a = __read([predictionsVals.length / lastDim, lastDim], 2), batch = _a[0], size = _a[1];
+ precision = getTypedArrayFromDType('bool', batch);
+ for (b = 0; b < batch; b++) {
+ offset = b * size;
+ vals = predictionsVals.subarray(offset, offset + size);
+ valAndInd = [];
+ for (i = 0; i < vals.length; i++) {
+ valAndInd.push({ value: vals[i], index: i });
+ }
+ valAndInd.sort(function (a, b) { return b.value - a.value; });
+ precision[b] = 0;
+ for (i = 0; i < k; i++) {
+ if (valAndInd[i].index === targetsVals[b]) {
+ precision[b] = 1;
+ break;
+ }
+ }
+ }
+ if (predictions !== $predictions) {
+ $predictions.dispose();
+ }
+ if (targets !== $targets) {
+ $targets.dispose();
+ }
+ // Output precision has the same shape as targets.
+ return [2 /*return*/, tensor(precision, $targets.shape, 'bool')];
+ }
+ });
+ });
+ }
+ var inTopKAsync = inTopKAsync_;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the derivative of the filter of a 2D convolution.
+ *
+ * @param x The input tensor, of rank 4 or rank 3 of shape
+ * [batch, height, width, inChannels]. If rank 3, batch of 1 is assumed.
+ * @param dy The dy image, of rank 4 or rank 3, of shape
+ * [batch, height, width, outDepth]. If rank 3, batch of 1 is assumed.
+ * @param filterShape The shape of the filter, length 4,
+ * [filterHeight, filterWidth, inDepth, outDepth].
+ * @param strides The strides of the convolution: [strideHeight,
+ * strideWidth].
+ * @param pad A string from: 'same', 'valid'. The type of padding algorithm
+ * used in the forward prop of the op.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels].
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ */
+ function conv2DBackpropFilter_(x, dy, filterShape, strides, pad, dataFormat, dimRoundingMode) {
+ if (dataFormat === void 0) { dataFormat = 'NHWC'; }
+ var x4D = x;
+ if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ var dy4D = dy;
+ if (dy4D.rank === 3) {
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in conv2dDerFilter: input must be rank 4, but got shape " +
+ (x4D.shape + "."); });
+ assert(dy4D.rank === 4, function () { return "Error in conv2dDerFilter: dy must be rank 4, but got shape " +
+ (dy4D.shape + "."); });
+ assert(filterShape.length === 4, function () { return "Error in conv2dDerFilter: filterShape must be length 4, but got " +
+ (filterShape + "."); });
+ var inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];
+ var outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1];
+ assert(inDepth === filterShape[2], function () { return "Error in conv2dDerFilter: depth of input " + inDepth + ") must " +
+ ("match input depth in filter (" + filterShape[2] + "."); });
+ assert(outDepth === filterShape[3], function () { return "Error in conv2dDerFilter: depth of dy (" + outDepth + ") must " +
+ ("match output depth for filter (" + filterShape[3] + ")."); });
+ checkPadOnDimRoundingMode('conv2dDerFilter', pad, dimRoundingMode);
+ var inputs = { x: x4D, dy: dy4D };
+ var attrs = { strides: strides, pad: pad, dataFormat: dataFormat, dimRoundingMode: dimRoundingMode, filterShape: filterShape };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(Conv2DBackpropFilter, inputs, attrs);
+ }
+ var conv2DBackpropFilter = op({ conv2DBackpropFilter_: conv2DBackpropFilter_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Returns gradient for fused activation.
+ function getFusedDyActivation(dy, y, activation) {
+ if (activation == null || activation === 'linear') {
+ return dy;
+ }
+ if (activation === 'relu') {
+ return mul(dy, step(y));
+ }
+ throw new Error("Cannot compute gradient for fused activation " + activation + ".");
+ }
+ // Returns gradient for fused bias.
+ function getFusedBiasGradient(bias, dyActivation) {
+ var res = dyActivation;
+ var reduceAxes = getReductionAxes(bias.shape, dyActivation.shape);
+ if (reduceAxes.length > 0) {
+ res = sum(res, reduceAxes);
+ }
+ return reshape(res, bias.shape);
+ }
+ function applyActivation(x, activation, preluActivationWeights, leakyreluAlpha) {
+ if (activation === 'linear') {
+ return x;
+ }
+ else if (activation === 'relu') {
+ return relu(x);
+ }
+ else if (activation === 'elu') {
+ return elu(x);
+ }
+ else if (activation === 'relu6') {
+ return relu6(x);
+ }
+ else if (activation === 'prelu') {
+ return prelu(x, preluActivationWeights);
+ }
+ else if (activation === 'leakyrelu') {
+ return leakyRelu(x, leakyreluAlpha);
+ }
+ else if (activation === 'sigmoid') {
+ return sigmoid(x);
+ }
+ throw new Error("Unknown fused activation " + activation + ".");
+ }
+ // Whether we should call fused ops.
+ var shouldFuse = function (gradientDepth, activation) {
+ var gradientMode = gradientDepth > 0;
+ return !gradientMode || activation === 'linear';
+ };
+
+ /**
+ * Computes a 2D convolution over the input x, optionally fused with adding a
+ * bias and applying an activation.
+ *
+ * ```js
+ * const inputDepth = 2;
+ * const inShape = [2, 2, 2, inputDepth];
+ * const outputDepth = 2;
+ * const fSize = 1;
+ * const pad = 0;
+ * const strides = 1;
+ *
+ * const x = tf.tensor4d( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ * 16], inShape);
+ * const w = tf.tensor4d([-1, 1, -2, 0.5], [fSize, fSize, inputDepth,
+ * outputDepth]);
+ *
+ * tf.fused.conv2d({ x, filter: w, strides, pad, dataFormat: 'NHWC',
+ * dilations: [1, 1], bias: tf.scalar(5), activation: 'relu' }).print();
+ * ```
+ *
+ * @param obj An object with the following properties:
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter, rank 4, of shape
+ * `[filterHeight, filterWidth, inDepth, outDepth]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid` output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param bias Tensor to be added to the result.
+ * @param activation Name of activation kernel (defaults to `linear`) to be
+ * applied
+ * after biasAdd.
+ * @param preluActivationWeights Tensor of prelu weights to be applied as part
+ * of a `prelu` activation, typically the same shape as `x`.
+ * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`
+ * activation.
+ */
+ function fusedConv2d_(_a) {
+ var _b;
+ var x = _a.x, filter = _a.filter, strides = _a.strides, pad = _a.pad, _c = _a.dataFormat, dataFormat = _c === void 0 ? 'NHWC' : _c, _d = _a.dilations, dilations = _d === void 0 ? [1, 1] : _d, dimRoundingMode = _a.dimRoundingMode, bias = _a.bias, _e = _a.activation, activation = _e === void 0 ? 'linear' : _e, preluActivationWeights = _a.preluActivationWeights, leakyreluAlpha = _a.leakyreluAlpha;
+ activation = activation || 'linear';
+ if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
+ var result = conv2d$1(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
+ if (bias != null) {
+ result = add(result, bias);
+ }
+ return applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
+ }
+ var $x = convertToTensor(x, 'x', 'conv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in fused conv2d: input must be rank 4, but got rank " +
+ (x4D.rank + "."); });
+ assert($filter.rank === 4, function () { return "Error in fused conv2d: filter must be rank 4, but got rank " +
+ ($filter.rank + "."); });
+ checkPadOnDimRoundingMode('fused conv2d', pad, dimRoundingMode);
+ assert(x4D.shape[3] === $filter.shape[2], function () { return "Error in conv2d: depth of input (" + x4D.shape[3] + ") must match " +
+ ("input depth for filter " + $filter.shape[2] + "."); });
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in conv2D: Either strides or dilations must be 1. ' +
+ ("Got strides " + strides + " and dilations '" + dilations + "'"); });
+ assert(dataFormat === 'NHWC', function () { return "Error in conv2d: got dataFormat of " + dataFormat + " but only NHWC is currently supported."; });
+ var convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode);
+ var $bias;
+ if (bias != null) {
+ $bias = convertToTensor(bias, 'bias', 'fused conv2d');
+ _b = __read(makeTypesMatch($bias, $x), 1), $bias = _b[0];
+ assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);
+ }
+ var $preluActivationWeights;
+ if (preluActivationWeights != null) {
+ $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused conv2d');
+ }
+ var grad = function (dy, saved) {
+ var _a = __read(saved, 4), $filter = _a[0], x4D = _a[1], y = _a[2], $bias = _a[3];
+ var dyActivation = getFusedDyActivation(dy, y, activation);
+ assert(tupleValuesAreOne(dilations), function () { return 'Error in gradient of fused conv2D: ' +
+ "dilation rates greater than 1 " +
+ ("are not yet supported in gradients. Got dilations '" + dilations + "'"); });
+ var xDer = conv2DBackpropInput(x4D.shape, dyActivation, $filter, strides, pad);
+ var filterDer = conv2DBackpropFilter(x4D, dyActivation, $filter.shape, strides, pad);
+ var der = [xDer, filterDer];
+ if ($bias != null) {
+ var biasDer = getFusedBiasGradient($bias, dyActivation);
+ der.push(biasDer);
+ }
+ return der;
+ };
+ var inputs = {
+ x: x4D,
+ filter: $filter,
+ bias: $bias,
+ preluActivationWeights: $preluActivationWeights
+ };
+ var attrs = {
+ strides: strides,
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: dilations,
+ dimRoundingMode: dimRoundingMode,
+ activation: activation,
+ leakyreluAlpha: leakyreluAlpha
+ };
+ // Depending on the the params passed in we will have different number of
+ // inputs and thus a a different number of elements in the gradient.
+ if (bias == null) {
+ var customOp = customGrad(function (x4D, filter, save) {
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(FusedConv2D, inputs, attrs);
+ save([filter, x4D, res]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOp(x4D, $filter);
+ }
+ else {
+ var customOpWithBias = customGrad(function (x4D, filter, bias, save) {
+ var res = ENGINE.runKernel(FusedConv2D, inputs, attrs);
+ save([filter, x4D, res, bias]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOpWithBias(x4D, $filter, $bias);
+ }
+ }
+ var conv2d = op({ fusedConv2d_: fusedConv2d_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, strides, pad, dilations, dimRoundingMode) {
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var x4D = x;
+ if (x.rank === 3) {
+ x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);
+ }
+ var dy4D = dy;
+ if (dy4D.rank === 3) {
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ var inputs = { x: x4D, dy: dy4D };
+ var attrs = { strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dilations: dilations, filterShape: filterShape };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ return ENGINE.runKernel(DepthwiseConv2dNativeBackpropFilter, inputs, attrs);
+ }
+ var depthwiseConv2dNativeBackpropFilter = op({ depthwiseConv2dNativeBackpropFilter_: depthwiseConv2dNativeBackpropFilter_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, strides, pad, dilations, dimRoundingMode) {
+ if (dilations === void 0) { dilations = [1, 1]; }
+ var dy4D = dy;
+ var reshapedTo4D = false;
+ if (dy.rank === 3) {
+ reshapedTo4D = true;
+ dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);
+ }
+ var inputs = { dy: dy4D, filter: filter };
+ var attrs = { strides: strides, pad: pad, dimRoundingMode: dimRoundingMode, dilations: dilations, inputShape: xShape };
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(DepthwiseConv2dNativeBackpropInput, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var depthwiseConv2dNativeBackpropInput = op({ depthwiseConv2dNativeBackpropInput_: depthwiseConv2dNativeBackpropInput_ });
+
+ /**
+ * Computes depthwise 2D convolution, optionally fused with adding a
+ * bias and applying an activation.
+ *
+ * Given a 4D `input` array and a `filter` array of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
+ * `inChannels` convolutional filters of depth 1, this op applies a
+ * different filter to each input channel (expanding from 1 channel to
+ * `channelMultiplier` channels for each), then concatenates the results
+ * together. The output has `inChannels * channelMultiplier` channels.
+ *
+ * See
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
+ * for more details.
+ *
+ * @param obj An object with the following properties:
+ * @param x The input tensor, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
+ * assumed.
+ * @param filter The filter tensor, rank 4, of shape
+ * `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
+ * @param strides The strides of the convolution: `[strideHeight,
+ * strideWidth]`. If strides is a single number, then `strideHeight ==
+ * strideWidth`.
+ * @param pad The type of padding algorithm.
+ * - `same` and stride 1: output will be of same size as input,
+ * regardless of filter size.
+ * - `valid`: output will be smaller than input if filter is larger
+ * than 1x1.
+ * - For more info, see this guide:
+ * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
+ * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
+ * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
+ * in which we sample input values across the height and width dimensions
+ * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
+ * number, then `dilationHeight == dilationWidth`. If it is greater than
+ * 1, then all values of `strides` must be 1.
+ * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
+ * "NHWC". Specify the data format of the input and output data. With the
+ * default format "NHWC", the data is stored in the order of: [batch,
+ * height, width, channels]. Only "NHWC" is currently supported.
+ * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
+ * provided, it will default to truncate.
+ * @param bias Tensor to be added to the result.
+ * @param activation Name of activation kernel (defaults to `linear`).
+ * @param preluActivationWeights Tensor of prelu weights to be applied as part
+ * of a `prelu` activation, typically the same shape as `x`.
+ * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`
+ * activation.
+ */
+ function fusedDepthwiseConv2d_(_a) {
+ var _b;
+ var x = _a.x, filter = _a.filter, strides = _a.strides, pad = _a.pad, _c = _a.dataFormat, dataFormat = _c === void 0 ? 'NHWC' : _c, _d = _a.dilations, dilations = _d === void 0 ? [1, 1] : _d, dimRoundingMode = _a.dimRoundingMode, bias = _a.bias, _e = _a.activation, activation = _e === void 0 ? 'linear' : _e, preluActivationWeights = _a.preluActivationWeights, leakyreluAlpha = _a.leakyreluAlpha;
+ if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
+ var result = depthwiseConv2d$1(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
+ if (bias != null) {
+ result = add(result, bias);
+ }
+ return applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
+ }
+ var $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32');
+ var $filter = convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32');
+ var x4D = $x;
+ var reshapedTo4D = false;
+ if ($x.rank === 3) {
+ reshapedTo4D = true;
+ x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
+ }
+ assert(x4D.rank === 4, function () { return "Error in fused depthwiseConv2d: input must be rank 4, but got " +
+ ("rank " + x4D.rank + "."); });
+ assert($filter.rank === 4, function () { return "Error in fused depthwiseConv2d: filter must be rank 4, " +
+ ("but got rank " + $filter.rank + "."); });
+ assert(x4D.shape[3] === $filter.shape[2], function () { return "Error in fused depthwiseConv2d: number of input channels " +
+ ("(" + x4D.shape[3] + ") must match the inChannels dimension in ") +
+ ("filter " + $filter.shape[2] + "."); });
+ if (dilations == null) {
+ dilations = [1, 1];
+ }
+ assert(eitherStridesOrDilationsAreOne(strides, dilations), function () { return 'Error in fused depthwiseConv2d: Either strides or dilations must ' +
+ ("be 1. Got strides " + strides + " and dilations '" + dilations + "'"); });
+ checkPadOnDimRoundingMode('fused depthwiseConv2d', pad, dimRoundingMode);
+ var convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */);
+ var $bias;
+ if (bias != null) {
+ $bias = convertToTensor(bias, 'bias', 'fused conv2d');
+ _b = __read(makeTypesMatch($bias, $x), 1), $bias = _b[0];
+ assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);
+ }
+ var $preluActivationWeights;
+ if (preluActivationWeights != null) {
+ $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused depthwiseConv2d');
+ }
+ var grad = function (dy, saved) {
+ assert(tupleValuesAreOne(dilations), function () { return 'Error in gradient of fused depthwiseConv2d: dilation rates ' +
+ "greater than 1 are not yet supported. Got dilations " +
+ ("'" + dilations + "'"); });
+ var _a = __read(saved, 4), $filter = _a[0], x4D = _a[1], y = _a[2], bias = _a[3];
+ var dyActivation = getFusedDyActivation(dy, y, activation);
+ var xDer = depthwiseConv2dNativeBackpropInput(x4D.shape, dyActivation, $filter, strides, pad, dilations, dimRoundingMode);
+ var filterDer = depthwiseConv2dNativeBackpropFilter(x4D, dyActivation, $filter.shape, strides, pad, dilations, dimRoundingMode);
+ if (bias != null) {
+ var biasDer = getFusedBiasGradient($bias, dyActivation);
+ return [xDer, filterDer, biasDer];
+ }
+ return [xDer, filterDer];
+ };
+ var inputs = {
+ x: x4D,
+ filter: $filter,
+ bias: $bias,
+ preluActivationWeights: $preluActivationWeights
+ };
+ var attrs = {
+ strides: strides,
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: dilations,
+ dimRoundingMode: dimRoundingMode,
+ activation: activation,
+ leakyreluAlpha: leakyreluAlpha
+ };
+ // Depending on the the params passed in we will have different number of
+ // inputs and thus a a different number of elements in the gradient.
+ if (bias == null) {
+ var customOp = customGrad(function (x4D, filter, save) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(FusedDepthwiseConv2D, inputs, attrs);
+ save([filter, x4D, res]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOp(x4D, $filter);
+ }
+ else {
+ var customOpWithBias = customGrad(function (x4D, filter, bias, save) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(FusedDepthwiseConv2D, inputs, attrs);
+ save([filter, x4D, res, bias]);
+ if (reshapedTo4D) {
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return { value: res, gradFunc: grad };
+ });
+ return customOpWithBias(x4D, $filter, $bias);
+ }
+ }
+ var depthwiseConv2d = op({ fusedDepthwiseConv2d_: fusedDepthwiseConv2d_ });
+
+ /**
+ * Computes the dot product of two matrices with optional activation and bias.
+ *
+ * ```js
+ * const a = tf.tensor2d([-1, -2], [1, 2]);
+ * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
+ * const bias = tf.tensor2d([1, 2], [1, 2]);
+ *
+ * tf.fused.matMul({a, b, bias, activation: 'relu'}).print();
+ * ```
+ *
+ * @param obj An object with the following properties:
+ * - `a` First matrix in dot product operation.
+ * - `b` Second matrix in dot product operation.
+ * - `transposeA` If true, `a` is transposed before multiplication.
+ * - `transposeB` If true, `b` is transposed before multiplication.
+ * - `bias` Matrix to be added to the result.
+ * - `activation` Name of activation kernel (defaults to `linear`).
+ * - `preluActivationWeights` Tensor of prelu weights.
+ * - `leakyreluAlpha` Alpha of leakyrelu.
+ */
+ function fusedMatMul_(_a) {
+ var _b, _c;
+ var a = _a.a, b = _a.b, _d = _a.transposeA, transposeA = _d === void 0 ? false : _d, _e = _a.transposeB, transposeB = _e === void 0 ? false : _e, bias = _a.bias, _f = _a.activation, activation = _f === void 0 ? 'linear' : _f, preluActivationWeights = _a.preluActivationWeights, leakyreluAlpha = _a.leakyreluAlpha;
+ if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
+ var result = matMul$1(a, b, transposeA, transposeB);
+ if (bias != null) {
+ result = add(result, bias);
+ }
+ return applyActivation(result, activation, preluActivationWeights, leakyreluAlpha);
+ }
+ var $a = convertToTensor(a, 'a', 'fused matMul');
+ var $b = convertToTensor(b, 'b', 'fused matMul');
+ _b = __read(makeTypesMatch($a, $b), 2), $a = _b[0], $b = _b[1];
+ var innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];
+ var innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];
+ var outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];
+ var outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];
+ var outerDimsA = $a.shape.slice(0, -2);
+ var outerDimsB = $b.shape.slice(0, -2);
+ var batchDimA = sizeFromShape(outerDimsA);
+ var batchDimB = sizeFromShape(outerDimsB);
+ assert(innerShapeA === innerShapeB, function () { return "Error in fused matMul: inner shapes (" + innerShapeA + ") and (" +
+ (innerShapeB + ") of Tensors with shapes " + $a.shape + " and ") +
+ ($b.shape + " and transposeA=" + transposeA) +
+ (" and transposeB=" + transposeB + " must match."); });
+ var outShapeOuterDims = assertAndGetBroadcastShape($a.shape.slice(0, -2), $b.shape.slice(0, -2));
+ var outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);
+ var a3D = transposeA ?
+ reshape($a, [batchDimA, innerShapeA, outerShapeA]) :
+ reshape($a, [batchDimA, outerShapeA, innerShapeA]);
+ var b3D = transposeB ?
+ reshape($b, [batchDimB, outerShapeB, innerShapeB]) :
+ reshape($b, [batchDimB, innerShapeB, outerShapeB]);
+ var $bias;
+ if (bias != null) {
+ $bias = convertToTensor(bias, 'bias', 'fused matMul');
+ _c = __read(makeTypesMatch($bias, $a), 1), $bias = _c[0];
+ assertAndGetBroadcastShape(outShape, $bias.shape);
+ }
+ var $preluActivationWeights;
+ if (preluActivationWeights != null) {
+ $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused matMul');
+ }
+ var grad = function (dy, saved) {
+ var _a = __read(saved, 4), a3D = _a[0], b3D = _a[1], y = _a[2], $bias = _a[3];
+ // we reshape dy because the result of the forward is not
+ // necessarily going to be a 3d tensor due to a reshape done at the end of
+ // the customOp.
+ var dyActivation = getFusedDyActivation(reshape(dy, y.shape), y, activation);
+ var aDer;
+ var bDer;
+ if (!transposeA && !transposeB) {
+ aDer = matMul$1(dyActivation, b3D, false, true);
+ bDer = matMul$1(a3D, dyActivation, true, false);
+ }
+ else if (!transposeA && transposeB) {
+ aDer = matMul$1(dyActivation, b3D, false, false);
+ bDer = matMul$1(dyActivation, a3D, true, false);
+ }
+ else if (transposeA && !transposeB) {
+ aDer = matMul$1(b3D, dyActivation, false, true);
+ bDer = matMul$1(a3D, dyActivation, false, false);
+ }
+ else {
+ aDer = matMul$1(b3D, dyActivation, true, true);
+ bDer = matMul$1(dyActivation, a3D, true, true);
+ }
+ if (bias != null) {
+ var biasDer = getFusedBiasGradient($bias, dyActivation);
+ return [aDer, bDer, biasDer];
+ }
+ else {
+ return [aDer, bDer];
+ }
+ };
+ var inputs = {
+ a: a3D,
+ b: b3D,
+ bias: $bias,
+ preluActivationWeights: $preluActivationWeights
+ };
+ var attrs = { transposeA: transposeA, transposeB: transposeB, activation: activation, leakyreluAlpha: leakyreluAlpha };
+ // Depending on the the params passed in we will have different number of
+ // inputs and thus a a different number of elements in the gradient.
+ if (bias == null) {
+ var customOp = customGrad(function (a3D, b3D, save) {
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(_FusedMatMul, inputs, attrs);
+ save([a3D, b3D, res]);
+ return { value: reshape(res, outShape), gradFunc: grad };
+ });
+ return customOp(a3D, b3D);
+ }
+ else {
+ var customOpWithBias = customGrad(function (a3D, b3D, $bias, save) {
+ var res =
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ ENGINE.runKernel(_FusedMatMul, inputs, attrs);
+ save([a3D, b3D, res, $bias]);
+ return { value: reshape(res, outShape), gradFunc: grad };
+ });
+ return customOpWithBias(a3D, b3D, $bias);
+ }
+ }
+ var matMul = op({ fusedMatMul_: fusedMatMul_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var fused_ops = {
+ __proto__: null,
+ conv2d: conv2d,
+ depthwiseConv2d: depthwiseConv2d,
+ matMul: matMul
+ };
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generate a hamming window.
+ *
+ * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
+ *
+ * ```js
+ * tf.signal.hammingWindow(10).print();
+ * ```
+ * @param The length of window
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function hammingWindow_(windowLength) {
+ return cosineWindow(windowLength, 0.54, 0.46);
+ }
+ var hammingWindow = op({ hammingWindow_: hammingWindow_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generate a Hann window.
+ *
+ * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
+ *
+ * ```js
+ * tf.signal.hannWindow(10).print();
+ * ```
+ * @param The length of window
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function hannWindow_(windowLength) {
+ return cosineWindow(windowLength, 0.5, 0.5);
+ }
+ var hannWindow = op({ hannWindow_: hannWindow_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Expands input into frames of frameLength.
+ * Slides a window size with frameStep.
+ *
+ * ```js
+ * tf.signal.frame([1, 2, 3], 2, 1).print();
+ * ```
+ * @param signal The input tensor to be expanded
+ * @param frameLength Length of each frame
+ * @param frameStep The frame hop size in samples.
+ * @param padEnd Whether to pad the end of signal with padValue.
+ * @param padValue An number to use where the input signal does
+ * not exist when padEnd is True.
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function frame_(signal, frameLength, frameStep, padEnd, padValue) {
+ if (padEnd === void 0) { padEnd = false; }
+ if (padValue === void 0) { padValue = 0; }
+ var start = 0;
+ var output = [];
+ while (start + frameLength <= signal.size) {
+ output.push(slice(signal, start, frameLength));
+ start += frameStep;
+ }
+ if (padEnd) {
+ while (start < signal.size) {
+ var padLen = (start + frameLength) - signal.size;
+ var pad = concat([
+ slice(signal, start, frameLength - padLen), fill([padLen], padValue)
+ ]);
+ output.push(pad);
+ start += frameStep;
+ }
+ }
+ if (output.length === 0) {
+ return tensor2d([], [0, frameLength]);
+ }
+ return reshape(concat(output), [output.length, frameLength]);
+ }
+ var frame = op({ frame_: frame_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the Short-time Fourier Transform of signals
+ * See: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
+ *
+ * ```js
+ * const input = tf.tensor1d([1, 1, 1, 1, 1])
+ * tf.signal.stft(input, 3, 1).print();
+ * ```
+ * @param signal 1-dimensional real value tensor.
+ * @param frameLength The window length of samples.
+ * @param frameStep The number of samples to step.
+ * @param fftLength The size of the FFT to apply.
+ * @param windowFn A callable that takes a window length and returns 1-d tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
+ */
+ function stft_(signal, frameLength, frameStep, fftLength, windowFn) {
+ if (windowFn === void 0) { windowFn = hannWindow; }
+ if (fftLength == null) {
+ fftLength = enclosingPowerOfTwo(frameLength);
+ }
+ var framedSignal = frame(signal, frameLength, frameStep);
+ var windowedSignal = mul(framedSignal, windowFn(frameLength));
+ return rfft(windowedSignal, fftLength);
+ }
+ var stft = op({ stft_: stft_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Extracts crops from the input image tensor and resizes them using bilinear
+ * sampling or nearest neighbor sampling (possibly with aspect ratio change)
+ * to a common output size specified by cropSize.
+ *
+ * @param image 4d tensor of shape `[batch,imageHeight,imageWidth, depth]`,
+ * where imageHeight and imageWidth must be positive, specifying the
+ * batch of images from which to take crops
+ * @param boxes 2d float32 tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the normalized
+ * coordinates of the box in the boxInd[i]'th image in the batch
+ * @param boxInd 1d int32 tensor of shape `[numBoxes]` with values in range
+ * `[0, batch)` that specifies the image that the `i`-th box refers to.
+ * @param cropSize 1d int32 tensor of 2 elements `[cropHeigh, cropWidth]`
+ * specifying the size to which all crops are resized to.
+ * @param method Optional string from `'bilinear' | 'nearest'`,
+ * defaults to bilinear, which specifies the sampling method for resizing
+ * @param extrapolationValue A threshold for deciding when to remove boxes based
+ * on score. Defaults to 0.
+ * @return A 4D tensor of the shape `[numBoxes,cropHeight,cropWidth,depth]`
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function cropAndResize_(image, boxes, boxInd, cropSize, method, extrapolationValue) {
+ if (method === void 0) { method = 'bilinear'; }
+ if (extrapolationValue === void 0) { extrapolationValue = 0; }
+ var $image = convertToTensor(image, 'image', 'cropAndResize');
+ var $boxes = convertToTensor(boxes, 'boxes', 'cropAndResize', 'float32');
+ var $boxInd = convertToTensor(boxInd, 'boxInd', 'cropAndResize', 'int32');
+ var numBoxes = $boxes.shape[0];
+ assert($image.rank === 4, function () { return 'Error in cropAndResize: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ assert($boxes.rank === 2 && $boxes.shape[1] === 4, function () { return "Error in cropAndResize: boxes must be have size [" + numBoxes + ",4] " +
+ ("but had shape " + $boxes.shape + "."); });
+ assert($boxInd.rank === 1 && $boxInd.shape[0] === numBoxes, function () { return "Error in cropAndResize: boxInd must be have size [" + numBoxes + "] " +
+ ("but had shape " + $boxes.shape + "."); });
+ assert(cropSize.length === 2, function () { return "Error in cropAndResize: cropSize must be of length 2, but got " +
+ ("length " + cropSize.length + "."); });
+ assert(cropSize[0] >= 1 && cropSize[1] >= 1, function () { return "cropSize must be atleast [1,1], but was " + cropSize; });
+ assert(method === 'bilinear' || method === 'nearest', function () { return "method must be bilinear or nearest, but was " + method; });
+ var inputs = { image: $image, boxes: $boxes, boxInd: $boxInd };
+ var attrs = { method: method, extrapolationValue: extrapolationValue, cropSize: cropSize };
+ var res = ENGINE.runKernel(CropAndResize, inputs, attrs);
+ return res;
+ }
+ var cropAndResize = op({ cropAndResize_: cropAndResize_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Flips the image left to right. Currently available in the CPU, WebGL, and
+ * WASM backends.
+ *
+ * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
+ */
+ /** @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} */
+ function flipLeftRight_(image) {
+ var $image = convertToTensor(image, 'image', 'flipLeftRight', 'float32');
+ assert($image.rank === 4, function () { return 'Error in flipLeftRight: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ var inputs = { image: $image };
+ var res = ENGINE.runKernel(FlipLeftRight, inputs, {});
+ return res;
+ }
+ var flipLeftRight = op({ flipLeftRight_: flipLeftRight_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts images from grayscale to RGB format.
+ *
+ * @param image A grayscale tensor to convert. The `image`'s last dimension must
+ * be size 1 with at least a two-dimensional shape.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function grayscaleToRGB_(image) {
+ var $image = convertToTensor(image, 'image', 'grayscaleToRGB');
+ var lastDimsIdx = $image.rank - 1;
+ var lastDims = $image.shape[lastDimsIdx];
+ assert($image.rank >= 2, function () { return 'Error in grayscaleToRGB: images must be at least rank 2, ' +
+ ("but got rank " + $image.rank + "."); });
+ assert(lastDims === 1, function () { return 'Error in grayscaleToRGB: last dimension of a grayscale image ' +
+ ("should be size 1, but got size " + lastDims + "."); });
+ var reps = new Array($image.rank);
+ reps.fill(1, 0, lastDimsIdx);
+ reps[lastDimsIdx] = 3;
+ return tile($image, reps);
+ }
+ var grayscaleToRGB = op({ grayscaleToRGB_: grayscaleToRGB_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Rotates the input image tensor counter-clockwise with an optional offset
+ * center of rotation. Currently available in the CPU, WebGL, and WASM backends.
+ *
+ * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
+ * @param radians The amount of rotation.
+ * @param fillValue The value to fill in the empty space leftover
+ * after rotation. Can be either a single grayscale value (0-255), or an
+ * array of three numbers `[red, green, blue]` specifying the red, green,
+ * and blue channels. Defaults to `0` (black).
+ * @param center The center of rotation. Can be either a single value (0-1), or
+ * an array of two numbers `[centerX, centerY]`. Defaults to `0.5` (rotates
+ * the image around its center).
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function rotateWithOffset_(image, radians, fillValue, center) {
+ if (fillValue === void 0) { fillValue = 0; }
+ if (center === void 0) { center = 0.5; }
+ var $image = convertToTensor(image, 'image', 'rotateWithOffset', 'float32');
+ assert($image.rank === 4, function () { return 'Error in rotateWithOffset: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ var inputs = { image: $image };
+ var attrs = { radians: radians, fillValue: fillValue, center: center };
+ var res = ENGINE.runKernel(RotateWithOffset, inputs, attrs);
+ return res;
+ }
+ var rotateWithOffset = op({ rotateWithOffset_: rotateWithOffset_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ if (iouThreshold == null) {
+ iouThreshold = 0.5;
+ }
+ if (scoreThreshold == null) {
+ scoreThreshold = Number.NEGATIVE_INFINITY;
+ }
+ if (softNmsSigma == null) {
+ softNmsSigma = 0.0;
+ }
+ var numBoxes = boxes.shape[0];
+ maxOutputSize = Math.min(maxOutputSize, numBoxes);
+ assert(0 <= iouThreshold && iouThreshold <= 1, function () { return "iouThreshold must be in [0, 1], but was '" + iouThreshold + "'"; });
+ assert(boxes.rank === 2, function () { return "boxes must be a 2D tensor, but was of rank '" + boxes.rank + "'"; });
+ assert(boxes.shape[1] === 4, function () { return "boxes must have 4 columns, but 2nd dimension was " + boxes.shape[1]; });
+ assert(scores.rank === 1, function () { return 'scores must be a 1D tensor'; });
+ assert(scores.shape[0] === numBoxes, function () { return "scores has incompatible shape with boxes. Expected " + numBoxes + ", " +
+ ("but was " + scores.shape[0]); });
+ assert(0 <= softNmsSigma && softNmsSigma <= 1, function () { return "softNmsSigma must be in [0, 1], but was '" + softNmsSigma + "'"; });
+ return { maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold, softNmsSigma: softNmsSigma };
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @return A 1D tensor with the selected box indices.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ var $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression', 'float32');
+ var $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression', 'float32');
+ var inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);
+ maxOutputSize = inputs.maxOutputSize;
+ iouThreshold = inputs.iouThreshold;
+ scoreThreshold = inputs.scoreThreshold;
+ var attrs = { maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold };
+ return ENGINE.runKernel(NonMaxSuppressionV3, { boxes: $boxes, scores: $scores }, attrs);
+ }
+ var nonMaxSuppression = op({ nonMaxSuppression_: nonMaxSuppression_ });
+
+ /**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Inserts a value into a sorted array. This method allows duplicate, meaning it
+ * allows inserting duplicate value, in which case, the element will be inserted
+ * at the lowest index of the value.
+ * @param arr The array to modify.
+ * @param element The element to insert.
+ * @param comparator Optional. If no comparator is specified, elements are
+ * compared using array_util.defaultComparator, which is suitable for Strings
+ * and Numbers in ascending arrays. If the array contains multiple instances of
+ * the target value, the left-most instance will be returned. To provide a
+ * comparator, it should take 2 arguments to compare and return a negative,
+ * zero, or a positive number.
+ */
+ function binaryInsert(arr, element, comparator) {
+ var index = binarySearch(arr, element, comparator);
+ var insertionPoint = index < 0 ? -(index + 1) : index;
+ arr.splice(insertionPoint, 0, element);
+ }
+ /**
+ * Searches the array for the target using binary search, returns the index
+ * of the found element, or position to insert if element not found. If no
+ * comparator is specified, elements are compared using array_
+ * util.defaultComparator, which is suitable for Strings and Numbers in
+ * ascending arrays. If the array contains multiple instances of the target
+ * value, the left-most instance will be returned.
+ * @param arr The array to be searched in.
+ * @param target The target to be searched for.
+ * @param comparator Should take 2 arguments to compare and return a negative,
+ * zero, or a positive number.
+ * @return Lowest index of the target value if found, otherwise the insertion
+ * point where the target should be inserted, in the form of
+ * (-insertionPoint - 1).
+ */
+ function binarySearch(arr, target, comparator) {
+ return binarySearch_(arr, target, comparator || defaultComparator);
+ }
+ /**
+ * Compares its two arguments for order.
+ * @param a The first element to be compared.
+ * @param b The second element to be compared.
+ * @return A negative number, zero, or a positive number as the first
+ * argument is less than, equal to, or greater than the second.
+ */
+ function defaultComparator(a, b) {
+ return a > b ? 1 : a < b ? -1 : 0;
+ }
+ function binarySearch_(arr, target, comparator) {
+ var left = 0;
+ var right = arr.length;
+ var middle = 0;
+ var found = false;
+ while (left < right) {
+ middle = left + ((right - left) >>> 1);
+ var compareResult = comparator(target, arr[middle]);
+ if (compareResult > 0) {
+ left = middle + 1;
+ }
+ else {
+ right = middle;
+ // If compareResult is 0, the value is found. We record it is found,
+ // and then keep looking because there may be duplicate.
+ found = !compareResult;
+ }
+ }
+ return found ? left : -left - 1;
+ }
+
+ function nonMaxSuppressionV3Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0 /* softNmsSigma */);
+ }
+ function nonMaxSuppressionV4Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) {
+ return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0 /* softNmsSigma */, false /* returnScoresTensor */, padToMaxOutputSize /* padToMaxOutputSize */, true
+ /* returnValidOutputs */ );
+ }
+ function nonMaxSuppressionV5Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, true /* returnScoresTensor */);
+ }
+ function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, returnScoresTensor, padToMaxOutputSize, returnValidOutputs) {
+ if (returnScoresTensor === void 0) { returnScoresTensor = false; }
+ if (padToMaxOutputSize === void 0) { padToMaxOutputSize = false; }
+ if (returnValidOutputs === void 0) { returnValidOutputs = false; }
+ // The list is sorted in ascending order, so that we can always pop the
+ // candidate with the largest score in O(1) time.
+ var candidates = [];
+ for (var i = 0; i < scores.length; i++) {
+ if (scores[i] > scoreThreshold) {
+ candidates.push({ score: scores[i], boxIndex: i, suppressBeginIndex: 0 });
+ }
+ }
+ candidates.sort(ascendingComparator);
+ // If softNmsSigma is 0, the outcome of this algorithm is exactly same as
+ // before.
+ var scale = softNmsSigma > 0 ? (-0.5 / softNmsSigma) : 0.0;
+ var selectedIndices = [];
+ var selectedScores = [];
+ while (selectedIndices.length < maxOutputSize && candidates.length > 0) {
+ var candidate = candidates.pop();
+ var originalScore = candidate.score, boxIndex = candidate.boxIndex, suppressBeginIndex = candidate.suppressBeginIndex;
+ if (originalScore < scoreThreshold) {
+ break;
+ }
+ // Overlapping boxes are likely to have similar scores, therefore we
+ // iterate through the previously selected boxes backwards in order to
+ // see if candidate's score should be suppressed. We use
+ // suppressBeginIndex to track and ensure a candidate can be suppressed
+ // by a selected box no more than once. Also, if the overlap exceeds
+ // iouThreshold, we simply ignore the candidate.
+ var ignoreCandidate = false;
+ for (var j = selectedIndices.length - 1; j >= suppressBeginIndex; --j) {
+ var iou = intersectionOverUnion(boxes, boxIndex, selectedIndices[j]);
+ if (iou >= iouThreshold) {
+ ignoreCandidate = true;
+ break;
+ }
+ candidate.score =
+ candidate.score * suppressWeight(iouThreshold, scale, iou);
+ if (candidate.score <= scoreThreshold) {
+ break;
+ }
+ }
+ // At this point, if `candidate.score` has not dropped below
+ // `scoreThreshold`, then we know that we went through all of the
+ // previous selections and can safely update `suppressBeginIndex` to the
+ // end of the selected array. Then we can re-insert the candidate with
+ // the updated score and suppressBeginIndex back in the candidate list.
+ // If on the other hand, `candidate.score` has dropped below the score
+ // threshold, we will not add it back to the candidates list.
+ candidate.suppressBeginIndex = selectedIndices.length;
+ if (!ignoreCandidate) {
+ // Candidate has passed all the tests, and is not suppressed, so
+ // select the candidate.
+ if (candidate.score === originalScore) {
+ selectedIndices.push(boxIndex);
+ selectedScores.push(candidate.score);
+ }
+ else if (candidate.score > scoreThreshold) {
+ // Candidate's score is suppressed but is still high enough to be
+ // considered, so add back to the candidates list.
+ binaryInsert(candidates, candidate, ascendingComparator);
+ }
+ }
+ }
+ // NonMaxSuppressionV4 feature: padding output to maxOutputSize.
+ var validOutputs = selectedIndices.length;
+ var elemsToPad = maxOutputSize - validOutputs;
+ if (padToMaxOutputSize && elemsToPad > 0) {
+ selectedIndices.push.apply(selectedIndices, __spread(new Array(elemsToPad).fill(0)));
+ selectedScores.push.apply(selectedScores, __spread(new Array(elemsToPad).fill(0.0)));
+ }
+ var result = { selectedIndices: selectedIndices };
+ if (returnScoresTensor) {
+ result['selectedScores'] = selectedScores;
+ }
+ if (returnValidOutputs) {
+ result['validOutputs'] = validOutputs;
+ }
+ return result;
+ }
+ function intersectionOverUnion(boxes, i, j) {
+ var iCoord = boxes.subarray(i * 4, i * 4 + 4);
+ var jCoord = boxes.subarray(j * 4, j * 4 + 4);
+ var yminI = Math.min(iCoord[0], iCoord[2]);
+ var xminI = Math.min(iCoord[1], iCoord[3]);
+ var ymaxI = Math.max(iCoord[0], iCoord[2]);
+ var xmaxI = Math.max(iCoord[1], iCoord[3]);
+ var yminJ = Math.min(jCoord[0], jCoord[2]);
+ var xminJ = Math.min(jCoord[1], jCoord[3]);
+ var ymaxJ = Math.max(jCoord[0], jCoord[2]);
+ var xmaxJ = Math.max(jCoord[1], jCoord[3]);
+ var areaI = (ymaxI - yminI) * (xmaxI - xminI);
+ var areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ);
+ if (areaI <= 0 || areaJ <= 0) {
+ return 0.0;
+ }
+ var intersectionYmin = Math.max(yminI, yminJ);
+ var intersectionXmin = Math.max(xminI, xminJ);
+ var intersectionYmax = Math.min(ymaxI, ymaxJ);
+ var intersectionXmax = Math.min(xmaxI, xmaxJ);
+ var intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) *
+ Math.max(intersectionXmax - intersectionXmin, 0.0);
+ return intersectionArea / (areaI + areaJ - intersectionArea);
+ }
+ // A Gaussian penalty function, this method always returns values in [0, 1].
+ // The weight is a function of similarity, the more overlap two boxes are, the
+ // smaller the weight is, meaning highly overlapping boxe will be significantly
+ // penalized. On the other hand, a non-overlapping box will not be penalized.
+ function suppressWeight(iouThreshold, scale, iou) {
+ var weight = Math.exp(scale * iou * iou);
+ return iou <= iouThreshold ? weight : 0.0;
+ }
+ function ascendingComparator(c1, c2) {
+ // For objects with same scores, we make the object with the larger index go
+ // first. In an array that pops from the end, this means that the object with
+ // the smaller index will be popped first. This ensures the same output as
+ // the TensorFlow python version.
+ return (c1.score - c2.score) ||
+ ((c1.score === c2.score) && (c2.boxIndex - c1.boxIndex));
+ }
+
+ /**
+ * Performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * This is the async version of `nonMaxSuppression`
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @return A 1D tensor with the selected box indices.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionAsync_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $boxes, $scores, inputs, boxesAndScores, boxesVals, scoresVals, selectedIndices;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');
+ $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');
+ inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);
+ maxOutputSize = inputs.maxOutputSize;
+ iouThreshold = inputs.iouThreshold;
+ scoreThreshold = inputs.scoreThreshold;
+ return [4 /*yield*/, Promise.all([$boxes.data(), $scores.data()])];
+ case 1:
+ boxesAndScores = _a.sent();
+ boxesVals = boxesAndScores[0];
+ scoresVals = boxesAndScores[1];
+ selectedIndices = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold).selectedIndices;
+ if ($boxes !== boxes) {
+ $boxes.dispose();
+ }
+ if ($scores !== scores) {
+ $scores.dispose();
+ }
+ return [2 /*return*/, tensor1d(selectedIndices, 'int32')];
+ }
+ });
+ });
+ }
+ var nonMaxSuppressionAsync = nonMaxSuppressionAsync_;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * This op also supports a Soft-NMS mode (c.f.
+ * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
+ * of other overlapping boxes, therefore favoring different regions of the image
+ * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`
+ * parameter to be larger than 0.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param softNmsSigma A float representing the sigma parameter for Soft NMS.
+ * When sigma is 0, it falls back to nonMaxSuppression.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - selectedScores: A 1D tensor with the corresponding scores for each
+ * selected box.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionWithScore_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (softNmsSigma === void 0) { softNmsSigma = 0.0; }
+ var $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression');
+ var $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression');
+ var params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
+ maxOutputSize = params.maxOutputSize;
+ iouThreshold = params.iouThreshold;
+ scoreThreshold = params.scoreThreshold;
+ softNmsSigma = params.softNmsSigma;
+ var inputs = { boxes: $boxes, scores: $scores };
+ var attrs = { maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold, softNmsSigma: softNmsSigma };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var result = ENGINE.runKernel(NonMaxSuppressionV5, inputs, attrs);
+ return { selectedIndices: result[0], selectedScores: result[1] };
+ }
+ var nonMaxSuppressionWithScore = op({ nonMaxSuppressionWithScore_: nonMaxSuppressionWithScore_ });
+
+ /**
+ * Asynchronously performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union).
+ *
+ * This op also supports a Soft-NMS mode (c.f.
+ * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
+ * of other overlapping boxes, therefore favoring different regions of the image
+ * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`
+ * parameter to be larger than 0.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param softNmsSigma A float representing the sigma parameter for Soft NMS.
+ * When sigma is 0, it falls back to nonMaxSuppression.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - selectedScores: A 1D tensor with the corresponding scores for each
+ * selected box.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (softNmsSigma === void 0) { softNmsSigma = 0.0; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $boxes, $scores, params, boxesAndScores, boxesVals, scoresVals, _a, selectedIndices, selectedScores;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');
+ $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');
+ params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
+ maxOutputSize = params.maxOutputSize;
+ iouThreshold = params.iouThreshold;
+ scoreThreshold = params.scoreThreshold;
+ softNmsSigma = params.softNmsSigma;
+ return [4 /*yield*/, Promise.all([$boxes.data(), $scores.data()])];
+ case 1:
+ boxesAndScores = _b.sent();
+ boxesVals = boxesAndScores[0];
+ scoresVals = boxesAndScores[1];
+ _a = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma), selectedIndices = _a.selectedIndices, selectedScores = _a.selectedScores;
+ if ($boxes !== boxes) {
+ $boxes.dispose();
+ }
+ if ($scores !== scores) {
+ $scores.dispose();
+ }
+ return [2 /*return*/, {
+ selectedIndices: tensor1d(selectedIndices, 'int32'),
+ selectedScores: tensor1d(selectedScores)
+ }];
+ }
+ });
+ });
+ }
+ var nonMaxSuppressionWithScoreAsync = nonMaxSuppressionWithScoreAsync_;
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Asynchronously performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union), with an option to pad results.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param padToMaxOutputSize Defalts to false. If true, size of output
+ * `selectedIndices` is padded to maxOutputSize.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - validOutputs: A scalar denoting how many elements in `selectedIndices`
+ * are valid. Valid elements occur first, then padding.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (padToMaxOutputSize === void 0) { padToMaxOutputSize = false; }
+ var $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression');
+ var $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression');
+ var params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */);
+ var $maxOutputSize = params.maxOutputSize;
+ var $iouThreshold = params.iouThreshold;
+ var $scoreThreshold = params.scoreThreshold;
+ var inputs = { boxes: $boxes, scores: $scores };
+ var attrs = {
+ maxOutputSize: $maxOutputSize,
+ iouThreshold: $iouThreshold,
+ scoreThreshold: $scoreThreshold,
+ padToMaxOutputSize: padToMaxOutputSize
+ };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var result = ENGINE.runKernel(NonMaxSuppressionV4, inputs, attrs);
+ return { selectedIndices: result[0], validOutputs: result[1] };
+ }
+ var nonMaxSuppressionPadded = op({ nonMaxSuppressionPadded_: nonMaxSuppressionPadded_ });
+
+ /**
+ * Asynchronously performs non maximum suppression of bounding boxes based on
+ * iou (intersection over union), with an option to pad results.
+ *
+ * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
+ * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
+ * the bounding box.
+ * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
+ * @param maxOutputSize The maximum number of boxes to be selected.
+ * @param iouThreshold A float representing the threshold for deciding whether
+ * boxes overlap too much with respect to IOU. Must be between [0, 1].
+ * Defaults to 0.5 (50% box overlap).
+ * @param scoreThreshold A threshold for deciding when to remove boxes based
+ * on score. Defaults to -inf, which means any score is accepted.
+ * @param padToMaxOutputSize Defalts to false. If true, size of output
+ * `selectedIndices` is padded to maxOutputSize.
+ * @return A map with the following properties:
+ * - selectedIndices: A 1D tensor with the selected box indices.
+ * - validOutputs: A scalar denoting how many elements in `selectedIndices`
+ * are valid. Valid elements occur first, then padding.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) {
+ if (iouThreshold === void 0) { iouThreshold = 0.5; }
+ if (scoreThreshold === void 0) { scoreThreshold = Number.NEGATIVE_INFINITY; }
+ if (padToMaxOutputSize === void 0) { padToMaxOutputSize = false; }
+ return __awaiter(this, void 0, void 0, function () {
+ var $boxes, $scores, params, $maxOutputSize, $iouThreshold, $scoreThreshold, _a, boxesVals, scoresVals, _b, selectedIndices, validOutputs;
+ return __generator(this, function (_c) {
+ switch (_c.label) {
+ case 0:
+ $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');
+ $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');
+ params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */);
+ $maxOutputSize = params.maxOutputSize;
+ $iouThreshold = params.iouThreshold;
+ $scoreThreshold = params.scoreThreshold;
+ return [4 /*yield*/, Promise.all([$boxes.data(), $scores.data()])];
+ case 1:
+ _a = __read.apply(void 0, [_c.sent(), 2]), boxesVals = _a[0], scoresVals = _a[1];
+ _b = nonMaxSuppressionV4Impl(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize), selectedIndices = _b.selectedIndices, validOutputs = _b.validOutputs;
+ if ($boxes !== boxes) {
+ $boxes.dispose();
+ }
+ if ($scores !== scores) {
+ $scores.dispose();
+ }
+ return [2 /*return*/, {
+ selectedIndices: tensor1d(selectedIndices, 'int32'),
+ validOutputs: scalar(validOutputs, 'int32')
+ }];
+ }
+ });
+ });
+ }
+ var nonMaxSuppressionPaddedAsync = nonMaxSuppressionPaddedAsync_;
+
+ /**
+ * Bilinear resize a single 3D image or a batch of 3D images to a new shape.
+ *
+ * @param images The images, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param size The new shape `[newHeight, newWidth]` to resize the
+ * images to. Each channel is resized individually.
+ * @param alignCorners Defaults to `false`. If true, rescale
+ * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4
+ * corners of images and resized images. If false, rescale by
+ * `new_height / height`. Treat similarly the width dimension.
+ * @param halfPixelCenters Defaults to `false`. Whether to assume pixel centers
+ * are at 0.5, which would make the floating point coordinates of the top
+ * left pixel 0.5, 0.5.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function resizeBilinear_(images, size, alignCorners, halfPixelCenters) {
+ if (alignCorners === void 0) { alignCorners = false; }
+ if (halfPixelCenters === void 0) { halfPixelCenters = false; }
+ var $images = convertToTensor(images, 'images', 'resizeBilinear');
+ assert($images.rank === 3 || $images.rank === 4, function () { return "Error in resizeBilinear: x must be rank 3 or 4, but got " +
+ ("rank " + $images.rank + "."); });
+ assert(size.length === 2, function () { return "Error in resizeBilinear: new shape must 2D, but got shape " +
+ (size + "."); });
+ assert(halfPixelCenters === false || alignCorners === false, function () { return "Error in resizeBilinear: If halfPixelCenters is true, " +
+ "alignCorners must be false."; });
+ var batchImages = $images;
+ var reshapedTo4D = false;
+ if ($images.rank === 3) {
+ reshapedTo4D = true;
+ batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]);
+ }
+ __read(size, 0);
+ var inputs = { images: batchImages };
+ var attrs = { alignCorners: alignCorners, halfPixelCenters: halfPixelCenters, size: size };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(ResizeBilinear, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var resizeBilinear = op({ resizeBilinear_: resizeBilinear_ });
+
+ /**
+ * NearestNeighbor resize a batch of 3D images to a new shape.
+ *
+ * @param images The images, of rank 4 or rank 3, of shape
+ * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
+ * @param size The new shape `[newHeight, newWidth]` to resize the
+ * images to. Each channel is resized individually.
+ * @param alignCorners Defaults to False. If true, rescale
+ * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4
+ * corners of images and resized images. If false, rescale by
+ * `new_height / height`. Treat similarly the width dimension.
+ * @param halfPixelCenters Defaults to `false`. Whether to assumes pixels are of
+ * half the actual dimensions, and yields more accurate resizes. This flag
+ * would also make the floating point coordinates of the top left pixel
+ * 0.5, 0.5.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function resizeNearestNeighbor_(images, size, alignCorners, halfPixelCenters) {
+ if (alignCorners === void 0) { alignCorners = false; }
+ if (halfPixelCenters === void 0) { halfPixelCenters = false; }
+ var $images = convertToTensor(images, 'images', 'resizeNearestNeighbor');
+ assert($images.rank === 3 || $images.rank === 4, function () { return "Error in resizeNearestNeighbor: x must be rank 3 or 4, but got " +
+ ("rank " + $images.rank + "."); });
+ assert(size.length === 2, function () { return "Error in resizeNearestNeighbor: new shape must 2D, but got shape " +
+ (size + "."); });
+ assert($images.dtype === 'float32' || $images.dtype === 'int32', function () { return '`images` must have `int32` or `float32` as dtype'; });
+ assert(halfPixelCenters === false || alignCorners === false, function () { return "Error in resizeNearestNeighbor: If halfPixelCenters is true, " +
+ "alignCorners must be false."; });
+ var batchImages = $images;
+ var reshapedTo4D = false;
+ if ($images.rank === 3) {
+ reshapedTo4D = true;
+ batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]);
+ }
+ __read(size, 0);
+ var inputs = { images: batchImages };
+ var attrs = { alignCorners: alignCorners, halfPixelCenters: halfPixelCenters, size: size };
+ // tslint:disable-next-line: no-unnecessary-type-assertion
+ var res = ENGINE.runKernel(ResizeNearestNeighbor, inputs, attrs);
+ if (reshapedTo4D) {
+ return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]);
+ }
+ return res;
+ }
+ var resizeNearestNeighbor = op({ resizeNearestNeighbor_: resizeNearestNeighbor_ });
+
+ /**
+ * Performs image binarization with corresponding threshold
+ * (depends on the method)value, which creates a binary image from a grayscale.
+ * @param image 3d tensor of shape [imageHeight,imageWidth, depth],
+ * where imageHeight and imageWidth must be positive.The image color
+ * range should be [0, 255].
+ * @param method Optional string from `'binary' | 'otsu'`
+ * which specifies the method for thresholding. Defaults to 'binary'.
+ * @param inverted Optional boolean whichspecifies
+ * if colours should be inverted. Defaults to false.
+ * @param threshValue Optional number which defines threshold value from 0 to 1.
+ * Defaults to 0.5.
+ * @return A 3d tensor of shape [imageHeight,imageWidth, depth], which
+ * contains binarized image.
+ */
+ function threshold_(image, method, inverted, threshValue) {
+ var _a;
+ if (method === void 0) { method = 'binary'; }
+ if (inverted === void 0) { inverted = false; }
+ if (threshValue === void 0) { threshValue = 0.5; }
+ var $image = convertToTensor(image, 'image', 'threshold');
+ /* 0.2989, 0.5870, 0.1140 are represent luma coefficients in CCIR601.
+ Reference for converting between RGB and grayscale: https://en.wikipedia.org/wiki/Luma_%28video%29 */
+ var RED_INTENCITY_COEF = 0.2989;
+ var GREEN_INTENCITY_COEF = 0.5870;
+ var BLUE_INTENCITY_COEF = 0.1140;
+ var totalPixelsInImage = $image.shape[0] * $image.shape[1];
+ var $threshold = mul(tensor1d([threshValue]), 255);
+ var r, g, b, grayscale;
+ assert($image.rank === 3, function () { return 'Error in threshold: image must be rank 3,' +
+ ("but got rank " + $image.rank + "."); });
+ assert($image.shape[2] === 3 || $image.shape[2] === 1, function () { return 'Error in threshold: ' +
+ 'image color channel must be equal to 3 or 1' +
+ ("but got " + $image.shape[2] + "."); });
+ assert($image.dtype === 'int32' || $image.dtype === 'float32', function () { return 'Error in dtype: image dtype must be int32 or float32,' +
+ ("but got dtype " + $image.dtype + "."); });
+ assert(method === 'otsu' || method === 'binary', function () { return "Method must be binary or otsu, but was " + method; });
+ if ($image.shape[2] === 3) {
+ _a = __read(split($image, [1, 1, 1], -1), 3), r = _a[0], g = _a[1], b = _a[2];
+ var $r = mul(r, RED_INTENCITY_COEF);
+ var $g = mul(g, GREEN_INTENCITY_COEF);
+ var $b = mul(b, BLUE_INTENCITY_COEF);
+ grayscale = add(add($r, $g), $b);
+ }
+ else {
+ grayscale = image;
+ }
+ if (method === 'otsu') {
+ var $histogram = bincount(cast(round(grayscale), 'int32'), tensor([]), 256);
+ $threshold = otsu($histogram, totalPixelsInImage);
+ }
+ var invCondition = inverted ?
+ lessEqual(grayscale, $threshold) : greater(grayscale, $threshold);
+ var result = cast(mul(invCondition, 255), 'int32');
+ return result;
+ }
+ function otsu(histogram, total) {
+ var bestThresh = tensor1d([-1]);
+ var bestInBetVar = tensor1d([0]);
+ var cInBetVar = tensor1d([0]);
+ var classFirst, classSecond, meanFirst, meanSec, weightForeground, weightBack;
+ for (var index = 0; index < histogram.size - 1; index++) {
+ classFirst = slice(histogram, 0, index + 1);
+ classSecond = slice(histogram, index + 1);
+ weightForeground = div(sum(classFirst), total);
+ weightBack = div(sum(classSecond), total);
+ var meanFirstDivA = sum(mul(classFirst, range(0, classFirst.size)));
+ meanFirst = div(meanFirstDivA, sum(classFirst));
+ var meanSecFill = fill(classSecond.shape, classFirst.size);
+ var meanSecAdd = add(range(0, classSecond.size), meanSecFill);
+ var meanSecMul = mul(classSecond, (meanSecAdd));
+ meanSec = div(sum(meanSecMul), sum(classSecond));
+ var cInBetVarSubA = sub(meanFirst, meanSec);
+ var cInBetVarSubB = sub(meanFirst, meanSec);
+ var cInBetVarMul = mul(weightForeground, weightBack);
+ cInBetVar = mul(mul(cInBetVarMul, cInBetVarSubA), cInBetVarSubB);
+ var condition = greater(cInBetVar, bestInBetVar);
+ bestInBetVar = where(condition, cInBetVar, bestInBetVar);
+ bestThresh = where(condition, tensor1d([index]), bestThresh);
+ }
+ return bestThresh;
+ }
+ var threshold = op({ threshold_: threshold_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Applies the given transform(s) to the image(s).
+ *
+ * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
+ * @param transforms Projective transform matrix/matrices. A tensor1d of length
+ * 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0
+ * b1, b2, c0, c1], then it maps the output point (x, y) to a transformed
+ * input point (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k),
+ * where k = c0 x + c1 y + 1. The transforms are inverted compared to the
+ * transform mapping input points to output points.
+ * @param interpolation Interpolation mode.
+ * Supported values: 'nearest', 'bilinear'. Default to 'nearest'.
+ * @param fillMode Points outside the boundaries of the input are filled
+ * according to the given mode, one of 'constant', 'reflect', 'wrap',
+ * 'nearest'. Default to 'constant'.
+ * 'reflect': (d c b a | a b c d | d c b a ) The input is extended by
+ * reflecting about the edge of the last pixel.
+ * 'constant': (k k k k | a b c d | k k k k) The input is extended by
+ * filling all values beyond the edge with the same constant value k.
+ * 'wrap': (a b c d | a b c d | a b c d) The input is extended by
+ * wrapping around to the opposite edge.
+ * 'nearest': (a a a a | a b c d | d d d d) The input is extended by
+ * the nearest pixel.
+ * @param fillValue A float represents the value to be filled outside the
+ * boundaries when fillMode is 'constant'.
+ * @param Output dimension after the transform, [height, width]. If undefined,
+ * output is the same size as input image.
+ *
+ * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
+ */
+ function transform_(image, transforms, interpolation, fillMode, fillValue, outputShape) {
+ if (interpolation === void 0) { interpolation = 'nearest'; }
+ if (fillMode === void 0) { fillMode = 'constant'; }
+ if (fillValue === void 0) { fillValue = 0; }
+ var $image = convertToTensor(image, 'image', 'transform', 'float32');
+ var $transforms = convertToTensor(transforms, 'transforms', 'transform', 'float32');
+ assert($image.rank === 4, function () { return 'Error in transform: image must be rank 4,' +
+ ("but got rank " + $image.rank + "."); });
+ assert($transforms.rank === 2 &&
+ ($transforms.shape[0] === $image.shape[0] ||
+ $transforms.shape[0] === 1) &&
+ $transforms.shape[1] === 8, function () { return "Error in transform: Input transform should be batch x 8 or 1 x 8"; });
+ assert(outputShape == null || outputShape.length === 2, function () { return 'Error in transform: outputShape must be [height, width] or null, ' +
+ ("but got " + outputShape + "."); });
+ var inputs = { image: $image, transforms: $transforms };
+ var attrs = { interpolation: interpolation, fillMode: fillMode, fillValue: fillValue, outputShape: outputShape };
+ return ENGINE.runKernel(Transform, inputs, attrs);
+ }
+ var transform = op({ transform_: transform_ });
+
+ /**
+ * Copy a tensor setting everything outside a central band in each innermost
+ * matrix to zero.
+ *
+ * The band part is computed as follows: Assume input has `k` dimensions
+ * `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where
+ * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
+ * The indicator function
+ * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower))`
+ * `&& (num_upper < 0 || (n-m) <= num_upper)`
+ *
+ * ```js
+ * const x = tf.tensor2d([[ 0, 1, 2, 3],
+ * [-1, 0, 1, 2],
+ * [-2, -1, 0, 1],
+ * [-3, -2, -1, 0]]);
+ * let y = tf.linalg.bandPart(x, 1, -1);
+ * y.print(); // [[ 0, 1, 2, 3],
+ * // [-1, 0, 1, 2],
+ * // [ 0, -1, 0, 1],
+ * // [ 0, 0 , -1, 0]]
+ * let z = tf.linalg.bandPart(x, 2, 1);
+ * z.print(); // [[ 0, 1, 0, 0],
+ * // [-1, 0, 1, 0],
+ * // [-2, -1, 0, 1],
+ * // [ 0, -2, -1, 0]]
+ * ```
+ *
+ * @param x Rank `k` tensor
+ * @param numLower Number of subdiagonals to keep.
+ * If negative, keep entire lower triangle.
+ * @param numUpper Number of subdiagonals to keep.
+ * If negative, keep entire upper triangle.
+ * @returns Rank `k` tensor of the same shape as input.
+ * The extracted banded tensor.
+ *
+ * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}
+ */
+ function bandPart_(a, numLower, numUpper) {
+ assert(numLower % 1 === 0, function () { return "bandPart(): numLower must be an integer, got " + numLower + "."; });
+ assert(numUpper % 1 === 0, function () { return "bandPart(): numUpper must be an integer, got " + numUpper + "."; });
+ var $a = convertToTensor(a, 'a', 'bandPart');
+ assert($a.rank >= 2, function () { return "bandPart(): Rank must be at least 2, got " + $a.rank + "."; });
+ var shape = $a.shape;
+ var _a = __read($a.shape.slice(-2), 2), M = _a[0], N = _a[1];
+ if (!(numLower <= M)) {
+ throw new Error("bandPart(): numLower (" + numLower + ")" +
+ (" must not be greater than the number of rows (" + M + ")."));
+ }
+ if (!(numUpper <= N)) {
+ throw new Error("bandPart(): numUpper (" + numUpper + ")" +
+ (" must not be greater than the number of columns (" + N + ")."));
+ }
+ if (numLower < 0) {
+ numLower = M;
+ }
+ if (numUpper < 0) {
+ numUpper = N;
+ }
+ var i = reshape(range(0, M, 1, 'int32'), [-1, 1]);
+ var j = range(0, N, 1, 'int32');
+ var ij = sub(i, j);
+ var inBand = logicalAnd(lessEqual(ij, scalar(+numLower, 'int32')), greaterEqual(ij, scalar(-numUpper, 'int32')));
+ var zero = zeros([M, N], $a.dtype);
+ return reshape(stack(unstack(reshape($a, [-1, M, N]))
+ .map(function (mat) { return where(inBand, mat, zero); })), shape);
+ }
+ var bandPart = op({ bandPart_: bandPart_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gram-Schmidt orthogonalization.
+ *
+ * ```js
+ * const x = tf.tensor2d([[1, 2], [3, 4]]);
+ * let y = tf.linalg.gramSchmidt(x);
+ * y.print();
+ * console.log('Othogonalized:');
+ * y.dot(y.transpose()).print(); // should be nearly the identity matrix.
+ * console.log('First row direction maintained:');
+ * const data = await y.array();
+ * console.log(data[0][1] / data[0][0]); // should be nearly 2.
+ * ```
+ *
+ * @param xs The vectors to be orthogonalized, in one of the two following
+ * formats:
+ * - An Array of `tf.Tensor1D`.
+ * - A `tf.Tensor2D`, i.e., a matrix, in which case the vectors are the rows
+ * of `xs`.
+ * In each case, all the vectors must have the same length and the length
+ * must be greater than or equal to the number of vectors.
+ * @returns The orthogonalized and normalized vectors or matrix.
+ * Orthogonalization means that the vectors or the rows of the matrix
+ * are orthogonal (zero inner products). Normalization means that each
+ * vector or each row of the matrix has an L2 norm that equals `1`.
+ *
+ * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}
+ */
+ function gramSchmidt_(xs) {
+ var inputIsTensor2D;
+ if (Array.isArray(xs)) {
+ inputIsTensor2D = false;
+ assert(xs != null && xs.length > 0, function () { return 'Gram-Schmidt process: input must not be null, undefined, or ' +
+ 'empty'; });
+ var dim_1 = xs[0].shape[0];
+ var _loop_1 = function (i) {
+ assert(xs[i].shape[0] === dim_1, function () { return 'Gram-Schmidt: Non-unique lengths found in the input vectors: ' +
+ ("(" + xs[i].shape[0] + " vs. " + dim_1 + ")"); });
+ };
+ for (var i = 1; i < xs.length; ++i) {
+ _loop_1(i);
+ }
+ }
+ else {
+ inputIsTensor2D = true;
+ xs = split(xs, xs.shape[0], 0).map(function (x) { return squeeze(x, [0]); });
+ }
+ assert(xs.length <= xs[0].shape[0], function () { return "Gram-Schmidt: Number of vectors (" + xs.length + ") exceeds " +
+ ("number of dimensions (" + xs[0].shape[0] + ")."); });
+ var ys = [];
+ var xs1d = xs;
+ var _loop_2 = function (i) {
+ ys.push(ENGINE.tidy(function () {
+ var x = xs1d[i];
+ if (i > 0) {
+ for (var j = 0; j < i; ++j) {
+ var proj = mul(sum(mul(ys[j], x)), ys[j]);
+ x = sub(x, proj);
+ }
+ }
+ return div(x, norm(x, 'euclidean'));
+ }));
+ };
+ for (var i = 0; i < xs.length; ++i) {
+ _loop_2(i);
+ }
+ if (inputIsTensor2D) {
+ return stack(ys, 0);
+ }
+ else {
+ return ys;
+ }
+ }
+ var gramSchmidt = op({ gramSchmidt_: gramSchmidt_ });
+
+ /**
+ * Compute QR decomposition of m-by-n matrix using Householder transformation.
+ *
+ * Implementation based on
+ * [http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf]
+ * (http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf)
+ *
+ * ```js
+ * const a = tf.tensor2d([[1, 2], [3, 4]]);
+ * let [q, r] = tf.linalg.qr(a);
+ * console.log('Q');
+ * q.print();
+ * console.log('R');
+ * r.print();
+ * console.log('Orthogonalized');
+ * q.dot(q.transpose()).print() // should be nearly the identity matrix.
+ * console.log('Reconstructed');
+ * q.dot(r).print(); // should be nearly [[1, 2], [3, 4]];
+ * ```
+ *
+ * @param x The `tf.Tensor` to be QR-decomposed. Must have rank >= 2. Suppose
+ * it has the shape `[..., M, N]`.
+ * @param fullMatrices An optional boolean parameter. Defaults to `false`.
+ * If `true`, compute full-sized `Q`. If `false` (the default),
+ * compute only the leading N columns of `Q` and `R`.
+ * @returns An `Array` of two `tf.Tensor`s: `[Q, R]`. `Q` is a unitary matrix,
+ * i.e., its columns all have unit norm and are mutually orthogonal.
+ * If `M >= N`,
+ * If `fullMatrices` is `false` (default),
+ * - `Q` has a shape of `[..., M, N]`,
+ * - `R` has a shape of `[..., N, N]`.
+ * If `fullMatrices` is `true` (default),
+ * - `Q` has a shape of `[..., M, M]`,
+ * - `R` has a shape of `[..., M, N]`.
+ * If `M < N`,
+ * - `Q` has a shape of `[..., M, M]`,
+ * - `R` has a shape of `[..., M, N]`.
+ * @throws If the rank of `x` is less than 2.
+ *
+ * @doc {heading:'Operations',
+ * subheading:'Linear Algebra',
+ * namespace:'linalg'}
+ */
+ function qr_(x, fullMatrices) {
+ if (fullMatrices === void 0) { fullMatrices = false; }
+ assert(x.rank >= 2, function () { return "qr() requires input tensor to have a rank >= 2, but got rank " + x.rank; });
+ if (x.rank === 2) {
+ return qr2d(x, fullMatrices);
+ }
+ else {
+ // Rank > 2.
+ // TODO(cais): Below we split the input into individual 2D tensors,
+ // perform QR decomposition on them and then stack the results back
+ // together. We should explore whether this can be parallelized.
+ var outerDimsProd = x.shape.slice(0, x.shape.length - 2)
+ .reduce(function (value, prev) { return value * prev; });
+ var x2ds = unstack(reshape(x, [
+ outerDimsProd, x.shape[x.shape.length - 2],
+ x.shape[x.shape.length - 1]
+ ]), 0);
+ var q2ds_1 = [];
+ var r2ds_1 = [];
+ x2ds.forEach(function (x2d) {
+ var _a = __read(qr2d(x2d, fullMatrices), 2), q2d = _a[0], r2d = _a[1];
+ q2ds_1.push(q2d);
+ r2ds_1.push(r2d);
+ });
+ var q = reshape(stack(q2ds_1, 0), x.shape);
+ var r = reshape(stack(r2ds_1, 0), x.shape);
+ return [q, r];
+ }
+ }
+ function qr2d(x, fullMatrices) {
+ if (fullMatrices === void 0) { fullMatrices = false; }
+ return ENGINE.tidy(function () {
+ assert(x.shape.length === 2, function () { return "qr2d() requires a 2D Tensor, but got a " + x.shape.length + "D Tensor."; });
+ var m = x.shape[0];
+ var n = x.shape[1];
+ var q = eye(m); // Orthogonal transform so far.
+ var r = clone(x); // Transformed matrix so far.
+ var one2D = tensor2d([[1]], [1, 1]);
+ var w = clone(one2D);
+ var iters = m >= n ? n : m;
+ var _loop_1 = function (j) {
+ var _a;
+ // This tidy within the for-loop ensures we clean up temporary
+ // tensors as soon as they are no longer needed.
+ var rTemp = r;
+ var wTemp = w;
+ var qTemp = q;
+ _a = __read(ENGINE.tidy(function () {
+ // Find H = I - tau * w * w', to put zeros below R(j, j).
+ var rjEnd1 = slice(r, [j, j], [m - j, 1]);
+ var normX = norm(rjEnd1);
+ var rjj = slice(r, [j, j], [1, 1]);
+ // The sign() function returns 0 on 0, which causes division by zero.
+ var s = where(greater(rjj, 0), tensor2d([[-1]]), tensor2d([[1]]));
+ var u1 = sub(rjj, mul(s, normX));
+ var wPre = div(rjEnd1, u1);
+ if (wPre.shape[0] === 1) {
+ w = clone(one2D);
+ }
+ else {
+ w = concat([
+ one2D,
+ slice(wPre, [1, 0], [wPre.shape[0] - 1, wPre.shape[1]])
+ ], 0);
+ }
+ var tau = neg(div(matMul$1(s, u1), normX));
+ // -- R := HR, Q := QH.
+ var rjEndAll = slice(r, [j, 0], [m - j, n]);
+ var tauTimesW = mul(tau, w);
+ var wT = transpose(w);
+ if (j === 0) {
+ r = sub(rjEndAll, matMul$1(tauTimesW, matMul$1(wT, rjEndAll)));
+ }
+ else {
+ var rTimesTau = sub(rjEndAll, matMul$1(tauTimesW, matMul$1(wT, rjEndAll)));
+ r = concat([slice(r, [0, 0], [j, n]), rTimesTau], 0);
+ }
+ var tawTimesWT = transpose(tauTimesW);
+ var qAllJEnd = slice(q, [0, j], [m, q.shape[1] - j]);
+ if (j === 0) {
+ q = sub(qAllJEnd, matMul$1(matMul$1(qAllJEnd, w), tawTimesWT));
+ }
+ else {
+ var qTimesTau = sub(qAllJEnd, matMul$1(matMul$1(qAllJEnd, w), tawTimesWT));
+ q = concat([slice(q, [0, 0], [m, j]), qTimesTau], 1);
+ }
+ return [w, r, q];
+ }), 3), w = _a[0], r = _a[1], q = _a[2];
+ dispose([rTemp, wTemp, qTemp]);
+ };
+ for (var j = 0; j < iters; ++j) {
+ _loop_1(j);
+ }
+ if (!fullMatrices && m > n) {
+ q = slice(q, [0, 0], [m, n]);
+ r = slice(r, [0, 0], [n, n]);
+ }
+ return [q, r];
+ });
+ }
+ var qr = op({ qr_: qr_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ exports.Reduction = void 0;
+ (function (Reduction) {
+ Reduction[Reduction["NONE"] = 0] = "NONE";
+ Reduction[Reduction["MEAN"] = 1] = "MEAN";
+ Reduction[Reduction["SUM"] = 2] = "SUM";
+ Reduction[Reduction["SUM_BY_NONZERO_WEIGHTS"] = 3] = "SUM_BY_NONZERO_WEIGHTS";
+ })(exports.Reduction || (exports.Reduction = {}));
+
+ /**
+ * Computes the weighted loss between two tensors.
+ *
+ * @param losses Tensor of shape `[batch_size, d1, ... dN]`.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `losses`, and must be broadcastable to `losses` (i.e., all
+ * dimensions must be either `1`, or the same as the corresponding
+ * `losses` dimension).
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function computeWeightedLoss_(losses, weights, reduction) {
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $losses = convertToTensor(losses, 'losses', 'computeWeightedLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'computeWeightedLoss');
+ }
+ var weightedLoss = ($weights == null) ? $losses : mul($losses, $weights);
+ if (reduction === exports.Reduction.NONE) {
+ return weightedLoss;
+ }
+ if (reduction === exports.Reduction.SUM) {
+ return sum(weightedLoss);
+ }
+ if (reduction === exports.Reduction.MEAN) {
+ if ($weights == null) {
+ return mean(weightedLoss);
+ }
+ else {
+ var broadcastFactor = $losses.size / $weights.size;
+ var result = div(sum(weightedLoss), sum($weights));
+ return broadcastFactor > 1 ? div(result, scalar(broadcastFactor)) :
+ result;
+ }
+ }
+ if (reduction === exports.Reduction.SUM_BY_NONZERO_WEIGHTS) {
+ if ($weights == null) {
+ return div(sum(weightedLoss), scalar($losses.size));
+ }
+ else {
+ var broadcastedWeights = mul($weights, ones($losses.shape));
+ var numNonZeros = cast(sum(notEqual(broadcastedWeights, scalar(0))), 'float32');
+ return div(sum(weightedLoss), numNonZeros);
+ }
+ }
+ throw Error("Unknown reduction: " + reduction);
+ }
+ var computeWeightedLoss = op({ computeWeightedLoss_: computeWeightedLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the absolute difference loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function absoluteDifference_(labels, predictions, weights, reduction) {
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'absoluteDifference');
+ var $predictions = convertToTensor(predictions, 'predictions', 'absoluteDifference');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'absoluteDifference');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in absoluteDifference: ');
+ var losses = abs(sub($labels, $predictions));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var absoluteDifference = op({ absoluteDifference_: absoluteDifference_ });
+
+ /**
+ * Computes the cosine distance loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param axis The dimension along which the cosine distance is computed.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function cosineDistance_(labels, predictions, axis, weights, reduction) {
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'cosineDistance');
+ var $predictions = convertToTensor(predictions, 'predictions', 'cosineDistance');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'cosineDistance');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in cosineDistance: ');
+ var one = scalar(1);
+ var losses = sub(one, sum(mul($labels, $predictions), axis, true));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var cosineDistance = op({ cosineDistance_: cosineDistance_ });
+
+ /**
+ * Computes the Hinge loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function hingeLoss_(labels, predictions, weights, reduction) {
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'hingeLoss');
+ var $predictions = convertToTensor(predictions, 'predictions', 'hingeLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'hingeLoss');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in hingeLoss: ');
+ var one = scalar(1);
+ // Convert binary labels to (-1, 1)
+ $labels = sub(mul(scalar(2), $labels), one);
+ var losses = relu(sub(one, mul($labels, $predictions)));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var hingeLoss = op({ hingeLoss_: hingeLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the huber loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param delta Point where huber loss changes from quadratic to linear.
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`.
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function huberLoss_(labels, predictions, weights, delta, reduction) {
+ if (delta === void 0) { delta = 1.0; }
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'huberLoss');
+ var $predictions = convertToTensor(predictions, 'predictions', 'huberLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'huberLoss');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in huberLoss: ');
+ var deltaScalar = scalar(delta);
+ var error = abs(sub($predictions, $labels));
+ var quadratic = minimum(error, deltaScalar);
+ var linear = sub(error, quadratic);
+ var losses = add(mul(scalar(0.5), square(quadratic)), mul(deltaScalar, linear));
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var huberLoss = op({ huberLoss_: huberLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the log loss between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param epsilon A small increment to avoid taking log of zero
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function logLoss_(labels, predictions, weights, epsilon, reduction) {
+ if (epsilon === void 0) { epsilon = 1e-7; }
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'logLoss');
+ var $predictions = convertToTensor(predictions, 'predictions', 'logLoss');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'logLoss');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in logLoss: ');
+ var one = scalar(1);
+ var epsilonScalar = scalar(epsilon);
+ var l1 = neg(mul($labels, log(add($predictions, epsilonScalar))));
+ var l2 = mul(sub(one, $labels), log(add(sub(one, $predictions), epsilonScalar)));
+ var losses = sub(l1, l2);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var logLoss = op({ logLoss_: logLoss_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the mean squared error between two tensors.
+ *
+ * @param labels The ground truth output tensor, same dimensions as
+ * 'predictions'.
+ * @param predictions The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
+ */
+ function meanSquaredError_(labels, predictions, weights, reduction) {
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $labels = convertToTensor(labels, 'labels', 'meanSquaredError');
+ var $predictions = convertToTensor(predictions, 'predictions', 'meanSquaredError');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'meanSquaredError');
+ }
+ assertShapesMatch($labels.shape, $predictions.shape, 'Error in meanSquaredError: ');
+ var losses = squaredDifference($labels, $predictions);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var meanSquaredError = op({ meanSquaredError_: meanSquaredError_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function sigmoidCrossEntropyWithLogits_(labels, logits) {
+ var $labels = convertToTensor(labels, 'labels', 'sigmoidCrossEntropyWithLogits');
+ var $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropyWithLogits');
+ assertShapesMatch($labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: ');
+ /**
+ * Implementation Details:
+ *
+ * For brevity, let `x = logits`, `z = labels`. The logistic loss is
+ * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
+ * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
+ * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
+ * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
+ * = (1 - z) * x + log(1 + exp(-x))
+ * = x - x * z + log(1 + exp(-x))
+ *
+ * For x < 0, to avoid overflow in exp(-x), we reformulate the above
+ * x - x * z + log(1 + exp(-x))
+ * = log(exp(x)) - x * z + log(1 + exp(-x))
+ * = - x * z + log(1 + exp(x))
+ *
+ * Hence, to ensure stability and avoid overflow, the implementation uses
+ * this equivalent formulation:
+ * max(x, 0) - x * z + log(1 + exp(-abs(x)))
+ */
+ var maxOutput = relu($logits);
+ var outputXTarget = mul($logits, $labels);
+ var sigmoidOutput = log1p(exp(neg(abs($logits))));
+ return add(sub(maxOutput, outputXTarget), sigmoidOutput);
+ }
+ /**
+ * Computes the sigmoid cross entropy loss between two tensors.
+ *
+ * If labelSmoothing is nonzero, smooth the labels towards 1/2:
+ *
+ * newMulticlassLabels = multiclassLabels * (1 - labelSmoothing)
+ * + 0.5 * labelSmoothing
+ *
+ * @param multiClassLabels The ground truth output tensor of shape
+ * [batch_size, num_classes], same dimensions as 'predictions'.
+ * @param logits The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or the same rank as
+ * `labels`, and must be broadcastable to `labels` (i.e., all dimensions
+ * must be either `1`, or the same as the corresponding `losses`
+ * dimension).
+ * @param labelSmoothing If greater than 0, then smooth the labels.
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }
+ */
+ function sigmoidCrossEntropy_(multiClassLabels, logits, weights, labelSmoothing, reduction) {
+ if (labelSmoothing === void 0) { labelSmoothing = 0; }
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $multiClassLabels = convertToTensor(multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy');
+ var $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropy');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'sigmoidCrossEntropy');
+ }
+ assertShapesMatch($multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: ');
+ if (labelSmoothing > 0) {
+ var labelSmoothingScalar = scalar(labelSmoothing);
+ var one = scalar(1);
+ var half = scalar(0.5);
+ $multiClassLabels =
+ add(mul($multiClassLabels, sub(one, labelSmoothingScalar)), mul(half, labelSmoothingScalar));
+ }
+ var losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var sigmoidCrossEntropy = op({ sigmoidCrossEntropy_: sigmoidCrossEntropy_ });
+
+ /**
+ * Computes softmax cross entropy between logits and labels.
+ *
+ * Measures the probability error in discrete classification tasks in which
+ * the classes are mutually exclusive (each entry is in exactly one class).
+ * For example, each CIFAR-10 image is labeled with one and only one label: an
+ * image can be a dog or a truck, but not both.
+ *
+ * `NOTE`: While the classes are mutually exclusive, their probabilities need
+ * not be. All that is required is that each row of labels is a valid
+ * probability distribution. If they are not, the computation of the gradient
+ * will be incorrect.
+ *
+ * `WARNING`: This op expects unscaled logits, since it performs a softmax on
+ * logits internally for efficiency. Do not call this op with the output of
+ * softmax, as it will produce incorrect results.
+ *
+ * logits and labels must have the same shape, e.g. [batch_size, num_classes]
+ * and the same dtype.
+ * @param labels The labels array.
+ * @param logits The logits array.
+ * @param dim The dimension softmax would be performed on. Defaults to `-1`
+ * which indicates the last dimension.
+ */
+ function softmaxCrossEntropyWithLogits_(labels, logits, dim) {
+ if (dim === void 0) { dim = -1; }
+ if (dim === -1) {
+ dim = logits.rank - 1;
+ }
+ if (dim !== logits.rank - 1) {
+ throw Error("Softmax cross entropy along a non-last dimension is not yet " +
+ ("supported. Labels / logits was rank " + logits.rank + " ") +
+ ("and dim was " + dim));
+ }
+ // Use a custom gradient for numerical stability.
+ var customOp = customGrad(function (labels, logits, save) {
+ // Reference:
+ // 1. http://cs231n.github.io/linear-classify/#softmax
+ // 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/
+ var keepDims = true;
+ var lse = logSumExp(logits, [dim], keepDims);
+ var logResult = sub(cast(logits, 'float32'), lse);
+ save([labels, logResult]);
+ var costVector = neg(mul(logResult, labels));
+ var value = sum(costVector, [dim]);
+ var gradFunc = function (dy, saved) {
+ var _a = __read(saved, 2), labels = _a[0], logResult = _a[1];
+ var dyShape = expandShapeToKeepDim(dy.shape, [dim]);
+ return [
+ mul(reshape(dy, dyShape), sub(cast(labels, 'float32'), exp(logResult))),
+ mul(reshape(dy, dyShape), sub(exp(logResult), cast(labels, 'float32'))),
+ ];
+ };
+ return { value: value, gradFunc: gradFunc };
+ });
+ return customOp(labels, logits);
+ }
+ /**
+ * Computes the softmax cross entropy loss between two tensors.
+ *
+ * If labelSmoothing is nonzero, smooth the labels towards 1/2:
+ *
+ * newOnehotLabels = onehotLabels * (1 - labelSmoothing)
+ * + labelSmoothing / numClasses
+ *
+ * @param onehotLabels One hot encoded labels
+ * [batch_size, num_classes], same dimensions as 'predictions'.
+ * @param logits The predicted outputs.
+ * @param weights Tensor whose rank is either 0, or 1, and must be
+ * broadcastable to `loss` of shape [batch_size]
+ * @param labelSmoothing If greater than 0, then smooth the labels.
+ * @param reduction Type of reduction to apply to loss. Should be of type
+ * `Reduction`
+ *
+ * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }
+ */
+ function softmaxCrossEntropy_(onehotLabels, logits, weights, labelSmoothing, reduction) {
+ if (labelSmoothing === void 0) { labelSmoothing = 0; }
+ if (reduction === void 0) { reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS; }
+ var $onehotLabels = convertToTensor(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy');
+ var $logits = convertToTensor(logits, 'logits', 'softmaxCrossEntropy');
+ var $weights = null;
+ if (weights != null) {
+ $weights = convertToTensor(weights, 'weights', 'softmaxCrossEntropy');
+ }
+ assertShapesMatch($onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: ');
+ if (labelSmoothing > 0) {
+ var labelSmoothingScalar = scalar(labelSmoothing);
+ var one = scalar(1);
+ var numClasses = scalar($onehotLabels.shape[1]);
+ $onehotLabels =
+ add(mul($onehotLabels, sub(one, labelSmoothingScalar)), div(labelSmoothingScalar, numClasses));
+ }
+ var losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits);
+ return computeWeightedLoss(losses, $weights, reduction);
+ }
+ var softmaxCrossEntropy = op({ softmaxCrossEntropy_: softmaxCrossEntropy_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * The input SparseTensor is represented via the map of inputs {`indices`,
+ * `values`, `denseShape`}. The output SparseTensor has the same `denseShape`
+ * but with indices `outputIndices` and values `outputValues`. This op inserts a
+ * single entry for every row that doesn't have any values. The index is created
+ * as `[row, 0, ..., 0]` and the inserted value is `defaultValue`.
+ *
+ * For example, suppose `spInput` has shape [5, 6] and non-empty values:
+ * [0, 1]: a
+ * [0, 3]: b
+ * [2, 0]: c
+ * [3, 1]: d
+ *
+ * Rows 1 and 4 are empty, so the output will be of shape [5, 6] with values:
+ * [0, 1]: a
+ * [0, 3]: b
+ * [1, 0]: `defaultValue`
+ * [2, 0]: c
+ * [3, 1]: d
+ * [4, 0]: `defaultValue`
+ *
+ * The output SparseTensor will be in row-major order and will have the same
+ * shape as the input.
+ *
+ * This op also returns an indicator vector shaped [dense_shape[0]] such that
+ * emptyRowIndicator[i] = True iff row i was an empty row.
+ *
+ * And a reverse index map vector shaped [indices.shape[0]] that is used during
+ * backpropagation, reverseIndexMap[i] = outi s.t. indices[i, j] ==
+ * outputIndices[outi, j] for all j
+ *
+ * ```js
+ * const result = tf.sparse.sparseFillEmptyRows(
+ * [[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]],
+ * [0, 10, 13, 14, 32, 33], [5, 6], -1);
+ * console.log(result);
+ * result['outputIndices'].print(); // [[0, 0], [1, 0], [1, 3], [1, 4],
+ * // [2, 0], [3, 2], [3, 3], [4, 0]]
+ * result['outputValues'].print(); // [0, 10, 13, 14,-1, 32, 33, -1]
+ * result['emptyRowIndicator'].print(); // [false, false, true, false, true]
+ * result['reverseIndexMap'].print(); // [0, 1, 2, 3, 5, 6]
+ * ```
+ * @param indices: 2-D. the indices of the sparse tensor.
+ * @param values: 1-D. the values of the sparse tensor.
+ * @param denseShape: 1-D. the shape of the sparse tensor.
+ * @param defaultValue: 0-D. default value to insert into location [row, 0, ...,
+ * 0] for rows missing from the input sparse tensor.
+ * @return A map with the following properties:
+ * - outputIndices
+ * - outputValues: 1-D. the values of the filled sparse tensor.
+ * - emptyRowIndicator: 1-D. whether the dense row was missing in the input
+ * sparse tensor.
+ * - reverseIndexMap: 1-D. a map from the input indices to the output
+ * indices.
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseFillEmptyRows_(indices, values, denseShape, defaultValue) {
+ var $indices = convertToTensor(indices, 'indices', 'sparseFillEmptyRows', 'int32');
+ var $values = convertToTensor(values, 'values', 'sparseFillEmptyRows');
+ var $denseShape = convertToTensor(denseShape, 'denseShape', 'sparseFillEmptyRows', 'int32');
+ var $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'sparseFillEmptyRows', $values.dtype);
+ if ($indices.rank !== 2) {
+ throw new Error("Indices should be Tensor2D but received shape\n " + $indices.shape);
+ }
+ if ($values.rank !== 1) {
+ throw new Error("Values should be Tensor1D but received shape " + $values.shape);
+ }
+ if ($denseShape.rank !== 1) {
+ throw new Error("Dense shape should be Tensor1D but received shape " + $denseShape.shape);
+ }
+ if ($defaultValue.rank !== 0) {
+ throw new Error("Default value should be a scalar but received shape " + $defaultValue.shape);
+ }
+ var inputs = {
+ indices: $indices,
+ values: $values,
+ denseShape: $denseShape,
+ defaultValue: $defaultValue
+ };
+ var result = ENGINE.runKernel(SparseFillEmptyRows, inputs);
+ return {
+ outputIndices: result[0],
+ outputValues: result[1],
+ emptyRowIndicator: result[2],
+ reverseIndexMap: result[3]
+ };
+ }
+ var sparseFillEmptyRows = op({ sparseFillEmptyRows_: sparseFillEmptyRows_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * This operation has the same semantics as reshape on the represented dense
+ * tensor. The `inputIndices` are recomputed based on the requested `newShape`.
+ * If one component of `newShape` is the special value -1, the size of that
+ * dimension is computed so that the total dense size remains constant. At most
+ * one component of `newShape` can be -1. The number of dense elements implied
+ * by `newShape` must be the same as the number of dense elements originally
+ * implied by `inputShape`. Reshaping does not affect the order of values in the
+ * SparseTensor. If the input tensor has rank R_in and N non-empty values, and
+ * `newShape` has length R_out, then `inputIndices` has shape [N, R_in],
+ * `inputShape` has length R_in, `outputIndices` has shape [N, R_out], and
+ * `outputShape` has length R_out.
+ *
+ * ```js
+ * const result = tf.sparse.sparseReshape(
+ * [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]],
+ * [2, 3, 6], [9, -1]);
+ * console.log(result);
+ * result['outputIndices'].print(); //[[0, 0], [0, 1], [1, 2], [4, 2], [8, 1]]
+ * result['outputShape'].print(); // [9, 4]
+ * ```
+ * @param inputIndices: 2-D. N x R_in matrix with the indices of non-empty
+ * values in a SparseTensor.
+ * @param inputShape: 1-D. R_in Tensor1D with the input SparseTensor's dense
+ * shape.
+ * @param newShape: 1-D. R_out Tensor1D with the requested new dense shape.
+ * @return A map with the following properties:
+ * - outputIndices: 2-D. N x R_out matrix with the updated indices of
+ * non-empty values in the output SparseTensor.
+ * - outputShape: 1-D. R_out vector with the full dense shape of the output
+ * SparseTensor. This is the same as newShape but with any -1 dimensions
+ * filled in.
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseReshape_(inputIndices, inputShape, newShape) {
+ var $inputIndices = convertToTensor(inputIndices, 'inputIndices', 'sparseReshape', 'int32');
+ var $inputShape = convertToTensor(inputShape, 'inputShape', 'sparseReshape', 'int32');
+ var $newShape = convertToTensor(newShape, 'newShape', 'sparseReshape', 'int32');
+ if ($inputIndices.rank !== 2) {
+ throw new Error("Input indices should be Tensor2D but received shape\n " + $inputIndices.shape);
+ }
+ if ($inputShape.rank !== 1) {
+ throw new Error("Input shape should be Tensor1D but received shape " + $inputShape.shape);
+ }
+ if ($newShape.rank !== 1) {
+ throw new Error("New shape should be Tensor1D but received shape " + $newShape.shape);
+ }
+ var inputs = {
+ inputIndices: $inputIndices,
+ inputShape: $inputShape,
+ newShape: $newShape
+ };
+ var result = ENGINE.runKernel(SparseReshape, inputs);
+ return { outputIndices: result[0], outputShape: result[1] };
+ }
+ var sparseReshape = op({ sparseReshape_: sparseReshape_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the mean along sparse segments of a tensor.
+ *
+ * ```js
+ * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [6,7,8,9]]);
+ * // Select two rows, one segment.
+ * const result1 = tf.sparse.sparseSegmentMean(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 0], 'int32'));
+ * result1.print(); // [[0, 0, 0, 0]]
+ *
+ * // Select two rows, two segments.
+ * const result2 = tf.sparse.sparseSegmentMean(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 1], 'int32'));
+ * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]]
+ *
+ * // Select all rows, two segments.
+ * const result3 = tf.sparse.sparseSegmentMean(c,
+ * tf.tensor1d([0, 1, 2], 'int32'),
+ * tf.tensor1d([0, 1, 1], 'int32'));
+ * result3.print(); // [[1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5]]
+ * ```
+ * @param data: A Tensor of at least one dimension with data that will be
+ * assembled in the output.
+ * @param indices: A 1-D Tensor with indices into data. Has same rank as
+ * segmentIds.
+ * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values
+ * should be sorted and can be repeated.
+ * @return Has same shape as data, except for dimension 0 which has equal to
+ * the number of segments.
+ *
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseSegmentMean_(data, indices, segmentIds) {
+ var $data = convertToTensor(data, 'data', 'sparseSegmentMean');
+ var $indices = convertToTensor(indices, 'indices', 'sparseSegmentMean', 'int32');
+ var $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentMean', 'int32');
+ if ($data.rank < 1) {
+ throw new Error("Data should be at least 1 dimensional but received scalar");
+ }
+ if ($indices.rank !== 1) {
+ throw new Error("Indices should be Tensor1D but received shape\n " + $indices.shape);
+ }
+ if ($segmentIds.rank !== 1) {
+ throw new Error("Segment ids should be Tensor1D but received shape\n " + $segmentIds.shape);
+ }
+ var inputs = {
+ data: $data,
+ indices: $indices,
+ segmentIds: $segmentIds
+ };
+ return ENGINE.runKernel(SparseSegmentMean, inputs);
+ }
+ var sparseSegmentMean = op({ sparseSegmentMean_: sparseSegmentMean_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Computes the sum along sparse segments of a tensor.
+ *
+ * ```js
+ * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]);
+ * // Select two rows, one segment.
+ * const result1 = tf.sparse.sparseSegmentSum(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 0], 'int32'));
+ * result1.print(); // [[0, 0, 0, 0]]
+ *
+ * // Select two rows, two segment.
+ * const result2 = tf.sparse.sparseSegmentSum(c,
+ * tf.tensor1d([0, 1], 'int32'),
+ * tf.tensor1d([0, 1], 'int32'));
+ * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]]
+ *
+ * // Select all rows, two segments.
+ * const result3 = tf.sparse.sparseSegmentSum(c,
+ * tf.tensor1d([0, 1, 2], 'int32'),
+ * tf.tensor1d([0, 0, 1], 'int32'));
+ * result3.print(); // [[0, 0, 0, 0], [5, 6, 7, 8]]
+ * ```
+ * @param data: A Tensor of at least one dimension with data that will be
+ * assembled in the output.
+ * @param indices: A 1-D Tensor with indices into data. Has same rank as
+ * segmentIds.
+ * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values
+ * should be sorted and can be repeated.
+ * @return Has same shape as data, except for dimension 0 which has equal to
+ * the number of segments.
+ *
+ * @doc {heading: 'Operations', subheading: 'Sparse'}
+ */
+ function sparseSegmentSum_(data, indices, segmentIds) {
+ var $data = convertToTensor(data, 'data', 'sparseSegmentSum');
+ var $indices = convertToTensor(indices, 'indices', 'sparseSegmentSum', 'int32');
+ var $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentSum', 'int32');
+ if ($data.rank < 1) {
+ throw new Error("Data should be at least 1 dimensional but received scalar");
+ }
+ if ($indices.rank !== 1) {
+ throw new Error("Indices should be Tensor1D but received shape\n " + $indices.shape);
+ }
+ if ($segmentIds.rank !== 1) {
+ throw new Error("Segment ids should be Tensor1D but received shape\n " + $segmentIds.shape);
+ }
+ var inputs = {
+ data: $data,
+ indices: $indices,
+ segmentIds: $segmentIds
+ };
+ return ENGINE.runKernel(SparseSegmentSum, inputs);
+ }
+ var sparseSegmentSum = op({ sparseSegmentSum_: sparseSegmentSum_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Creates ngrams from ragged string data.
+ *
+ * This op accepts a ragged tensor with 1 ragged dimension containing only
+ * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams
+ * of that string, joined along the innermost axis.
+ *
+ * ```js
+ * const result = tf.string.stringNGrams(
+ * ['a', 'b', 'c', 'd'], tf.tensor1d([0, 2, 4], 'int32'),
+ * '|', [1, 2], 'LP', 'RP', -1, false);
+ * result['nGrams'].print(); // ['a', 'b', 'LP|a', 'a|b', 'b|RP',
+ * // 'c', 'd', 'LP|c', 'c|d', 'd|RP']
+ * result['nGramsSplits'].print(); // [0, 5, 10]
+ * ```
+ * @param data: The values tensor of the ragged string tensor to make ngrams out
+ * of. Must be a 1D string tensor.
+ * @param dataSplits: The splits tensor of the ragged string tensor to make
+ * ngrams out of.
+ * @param separator: The string to append between elements of the token. Use ""
+ * for no separator.
+ * @param nGramWidths: The sizes of the ngrams to create.
+ * @param leftPad: The string to use to pad the left side of the ngram sequence.
+ * Only used if pad_width !== 0.
+ * @param rightPad: The string to use to pad the right side of the ngram
+ * sequence. Only used if pad_width !== 0.
+ * @param padWidth: The number of padding elements to add to each side of each
+ * sequence. Note that padding will never be greater than `nGramWidths`-1
+ * regardless of this value. If `padWidth`=-1 , then add max(`nGramWidths)-1
+ * elements.
+ * @param preserveShortSequences: If true, then ensure that at least one ngram
+ * is generated for each input sequence. In particular, if an input sequence
+ * is shorter than min(ngramWidth) + 2*padWidth, then generate a single
+ * ngram containing the entire sequence. If false, then no ngrams are
+ * generated for these short input sequences.
+ * @return A map with the following properties:
+ * - nGrams: The values tensor of the output ngrams ragged tensor.
+ * - nGramsSplits: The splits tensor of the output ngrams ragged tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'String'}
+ */
+ function stringNGrams_(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) {
+ var $data = convertToTensor(data, 'data', 'stringNGrams', 'string');
+ if ($data.dtype !== 'string') {
+ throw new Error('Data must be of datatype string');
+ }
+ if ($data.shape.length !== 1) {
+ throw new Error("Data must be a vector, saw: " + $data.shape);
+ }
+ var $dataSplits = convertToTensor(dataSplits, 'dataSplits', 'stringNGrams');
+ if ($dataSplits.dtype !== 'int32') {
+ throw new Error('Data splits must be of datatype int32');
+ }
+ var attrs = {
+ separator: separator,
+ nGramWidths: nGramWidths,
+ leftPad: leftPad,
+ rightPad: rightPad,
+ padWidth: padWidth,
+ preserveShortSequences: preserveShortSequences
+ };
+ var inputs = { data: $data, dataSplits: $dataSplits };
+ var result = ENGINE.runKernel(StringNGrams, inputs, attrs);
+ return { nGrams: result[0], nGramsSplits: result[1] };
+ }
+ var stringNGrams = op({ stringNGrams_: stringNGrams_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Split elements of `input` based on `delimiter` into a SparseTensor .
+ *
+ * Let N be the size of source (typically N will be the batch size). Split each
+ * element of `input` based on `delimiter` and return a SparseTensor containing
+ * the splitted tokens. Empty tokens are ignored if `skipEmpty` is set to True.
+ *
+ * `delimiter` can be empty, or a string of split characters. If `delimiter` is
+ * an empty string, each element of `input` is split into individual
+ * character strings. Otherwise every character of `delimiter` is a potential
+ * split point.
+ *
+ * ```js
+ * const result = tf.string.stringSplit(['hello world', 'a b c'], ' ');
+ * result['indices'].print(); // [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
+ * result['values'].print(); // ['hello', 'world', 'a', 'b', 'c']
+ * result['shape'].print(); // [2, 3]
+ * ```
+ * @param input: 1-D. Strings to split.
+ * @param delimiter: 0-D. Delimiter characters, or empty string.
+ * @param skipEmpty: Optional. If true, skip the empty strings from the result.
+ * Defaults to true.
+ * @return A map with the following properties:
+ * - indices: A dense matrix of int32 representing the indices of the sparse
+ * tensor.
+ * - values: A vector of strings corresponding to the splited values.
+ * - shape: a length-2 vector of int32 representing the shape of the sparse
+ * tensor, where the first value is N and the second value is the maximum number
+ * of tokens in a single input entry.
+ *
+ * @doc {heading: 'Operations', subheading: 'String'}
+ */
+ function stringSplit_(input, delimiter, skipEmpty) {
+ if (skipEmpty === void 0) { skipEmpty = true; }
+ var $input = convertToTensor(input, 'input', 'stringSplit', 'string');
+ var $delimiter = convertToTensor(delimiter, 'delimiter', 'stringSplit', 'string');
+ if ($input.rank !== 1) {
+ throw new Error("Input should be Tensor1D but received shape " + $input.shape);
+ }
+ if ($delimiter.rank !== 0) {
+ throw new Error("Delimiter should be a scalar but received shape " + $delimiter.shape);
+ }
+ var attrs = { skipEmpty: skipEmpty };
+ var inputs = { input: $input, delimiter: $delimiter };
+ var result = ENGINE.runKernel(StringSplit, inputs, attrs);
+ return { indices: result[0], values: result[1], shape: result[2] };
+ }
+ var stringSplit = op({ stringSplit_: stringSplit_ });
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Converts each string in the input Tensor to its hash mod by a number of
+ * buckets.
+ *
+ * The hash function is deterministic on the content of the string within the
+ * process and will never change. However, it is not suitable for cryptography.
+ * This function may be used when CPU time is scarce and inputs are trusted or
+ * unimportant. There is a risk of adversaries constructing inputs that all hash
+ * to the same bucket.
+ *
+ * ```js
+ * const result = tf.string.stringToHashBucketFast(
+ * ['Hello', 'TensorFlow', '2.x'], 3);
+ * result.print(); // [0, 2, 2]
+ * ```
+ * @param input: The strings to assign a hash bucket.
+ * @param numBuckets: The number of buckets.
+ * @return A Tensor of the same shape as the input tensor.
+ *
+ * @doc {heading: 'Operations', subheading: 'String'}
+ */
+ function stringToHashBucketFast_(input, numBuckets) {
+ var $input = convertToTensor(input, 'input', 'stringToHashBucketFast', 'string');
+ var attrs = { numBuckets: numBuckets };
+ if (numBuckets <= 0) {
+ throw new Error("Number of buckets must be at least 1");
+ }
+ var inputs = { input: $input };
+ return ENGINE.runKernel(StringToHashBucketFast, inputs, attrs);
+ }
+ var stringToHashBucketFast = op({ stringToHashBucketFast_: stringToHashBucketFast_ });
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var spectral = {
+ fft: fft,
+ ifft: ifft,
+ rfft: rfft,
+ irfft: irfft
+ };
+ var signal = {
+ hammingWindow: hammingWindow,
+ hannWindow: hannWindow,
+ frame: frame,
+ stft: stft,
+ };
+ var image = {
+ flipLeftRight: flipLeftRight,
+ grayscaleToRGB: grayscaleToRGB,
+ resizeNearestNeighbor: resizeNearestNeighbor,
+ resizeBilinear: resizeBilinear,
+ rotateWithOffset: rotateWithOffset,
+ cropAndResize: cropAndResize,
+ nonMaxSuppression: nonMaxSuppression,
+ nonMaxSuppressionAsync: nonMaxSuppressionAsync,
+ nonMaxSuppressionWithScore: nonMaxSuppressionWithScore,
+ nonMaxSuppressionWithScoreAsync: nonMaxSuppressionWithScoreAsync,
+ nonMaxSuppressionPadded: nonMaxSuppressionPadded,
+ nonMaxSuppressionPaddedAsync: nonMaxSuppressionPaddedAsync,
+ threshold: threshold,
+ transform: transform
+ };
+ var linalg = {
+ bandPart: bandPart,
+ gramSchmidt: gramSchmidt,
+ qr: qr
+ };
+ var losses = {
+ absoluteDifference: absoluteDifference,
+ computeWeightedLoss: computeWeightedLoss,
+ cosineDistance: cosineDistance,
+ hingeLoss: hingeLoss,
+ huberLoss: huberLoss,
+ logLoss: logLoss,
+ meanSquaredError: meanSquaredError,
+ sigmoidCrossEntropy: sigmoidCrossEntropy,
+ softmaxCrossEntropy: softmaxCrossEntropy
+ };
+ var sparse = {
+ sparseFillEmptyRows: sparseFillEmptyRows,
+ sparseReshape: sparseReshape,
+ sparseSegmentMean: sparseSegmentMean,
+ sparseSegmentSum: sparseSegmentSum
+ };
+ // tslint:disable-next-line:variable-name
+ var string = {
+ stringNGrams: stringNGrams,
+ stringSplit: stringSplit,
+ stringToHashBucketFast: stringToHashBucketFast
+ };
+
+ /** @doc {heading: 'Training', subheading: 'Classes', namespace: 'train'} */
+ var Optimizer = /** @class */ (function (_super) {
+ __extends(Optimizer, _super);
+ function Optimizer() {
+ return _super !== null && _super.apply(this, arguments) || this;
+ }
+ /**
+ * Executes `f()` and minimizes the scalar output of `f()` by computing
+ * gradients of y with respect to the list of trainable variables provided by
+ * `varList`. If no list is provided, it defaults to all trainable variables.
+ *
+ * @param f The function to execute and whose output to minimize.
+ * @param returnCost Whether to return the scalar cost value produced by
+ * executing `f()`.
+ * @param varList An optional list of variables to update. If specified, only
+ * the trainable variables in varList will be updated by minimize. Defaults to
+ * all trainable variables.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers'}
+ */
+ Optimizer.prototype.minimize = function (f, returnCost, varList) {
+ if (returnCost === void 0) { returnCost = false; }
+ var _a = this.computeGradients(f, varList), value = _a.value, grads = _a.grads;
+ if (varList != null) {
+ var gradArray = varList.map(function (v) { return ({ name: v.name, tensor: grads[v.name] }); });
+ this.applyGradients(gradArray);
+ }
+ else {
+ this.applyGradients(grads);
+ }
+ // Dispose gradients.
+ dispose(grads);
+ if (returnCost) {
+ return value;
+ }
+ else {
+ value.dispose();
+ return null;
+ }
+ };
+ Object.defineProperty(Optimizer.prototype, "iterations", {
+ /**
+ * The number of iterations that this optimizer instance has been invoked for.
+ */
+ get: function () {
+ if (this.iterations_ == null) {
+ this.iterations_ = 0;
+ }
+ return this.iterations_;
+ },
+ enumerable: true,
+ configurable: true
+ });
+ Optimizer.prototype.incrementIterations = function () {
+ this.iterations_ = this.iterations + 1;
+ };
+ /**
+ * Executes f() and computes the gradient of the scalar output of f() with
+ * respect to the list of trainable variables provided by `varList`. If no
+ * list is provided, it defaults to all trainable variables.
+ *
+ * @param f The function to execute and whose output to use for computing
+ * gradients with respect to variables.
+ * @param varList An optional list of variables to compute gradients with
+ * respect to. If specified, only the trainable variables in varList will have
+ * gradients computed with respect to. Defaults to all trainable variables.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers'}
+ */
+ Optimizer.prototype.computeGradients = function (f, varList) {
+ return variableGrads(f, varList);
+ };
+ /**
+ * Dispose the variables (if any) owned by this optimizer instance.
+ */
+ Optimizer.prototype.dispose = function () {
+ if (this.iterations_ != null) {
+ dispose(this.iterations_);
+ }
+ };
+ Optimizer.prototype.saveIterations = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ if (this.iterations_ == null) {
+ this.iterations_ = 0;
+ }
+ return [2 /*return*/, {
+ name: 'iter',
+ // TODO(cais): Use 'int64' type when available.
+ tensor: scalar(this.iterations_, 'int32')
+ }];
+ });
+ });
+ };
+ Optimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ throw new Error('getWeights() is not implemented for this optimizer yet.');
+ });
+ });
+ };
+ Optimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ throw new Error("setWeights() is not implemented for this optimizer class " +
+ ("" + this.getClassName()));
+ });
+ });
+ };
+ /**
+ * Extract the first element of the weight values and set it
+ * as the iterations counter variable of this instance of optimizer.
+ *
+ * @param weightValues
+ * @returns Weight values with the first element consumed and excluded.
+ */
+ Optimizer.prototype.extractIterations = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ var _a;
+ return __generator(this, function (_b) {
+ switch (_b.label) {
+ case 0:
+ _a = this;
+ return [4 /*yield*/, weightValues[0].tensor.data()];
+ case 1:
+ _a.iterations_ = (_b.sent())[0];
+ return [2 /*return*/, weightValues.slice(1)];
+ }
+ });
+ });
+ };
+ return Optimizer;
+ }(Serializable));
+ Object.defineProperty(Optimizer, Symbol.hasInstance, {
+ value: function (instance) {
+ return instance.minimize != null && instance.computeGradients != null &&
+ instance.applyGradients != null;
+ }
+ });
+
+ /** @doclink Optimizer */
+ var AdadeltaOptimizer = /** @class */ (function (_super) {
+ __extends(AdadeltaOptimizer, _super);
+ function AdadeltaOptimizer(learningRate, rho, epsilon) {
+ if (epsilon === void 0) { epsilon = null; }
+ var _this = _super.call(this) || this;
+ _this.learningRate = learningRate;
+ _this.rho = rho;
+ _this.epsilon = epsilon;
+ _this.accumulatedGrads = [];
+ _this.accumulatedUpdates = [];
+ if (epsilon == null) {
+ _this.epsilon = ENGINE.backend.epsilon();
+ }
+ return _this;
+ }
+ AdadeltaOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var variableNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (item) { return item.name; }) :
+ Object.keys(variableGradients);
+ variableNames.forEach(function (name, i) {
+ var value = ENGINE.registeredVariables[name];
+ var trainable = false;
+ if (_this.accumulatedGrads[i] == null) {
+ _this.accumulatedGrads[i] = {
+ originalName: name + "/accum_grad",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ if (_this.accumulatedUpdates[i] == null) {
+ _this.accumulatedUpdates[i] = {
+ originalName: name + "/accum_var",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ var accumulatedGrad = _this.accumulatedGrads[i].variable;
+ var accumulatedUpdate = _this.accumulatedUpdates[i].variable;
+ tidy(function () {
+ var newAccumulatedGrad = add(mul(accumulatedGrad, _this.rho), mul(square(gradient), 1 - _this.rho));
+ var updates = mul(div(sqrt(add(accumulatedUpdate, _this.epsilon)), sqrt(add(accumulatedGrad, _this.epsilon))), gradient);
+ var newAccumulatedUpdate = add(mul(accumulatedUpdate, _this.rho), mul(square(updates), 1 - _this.rho));
+ accumulatedGrad.assign(newAccumulatedGrad);
+ accumulatedUpdate.assign(newAccumulatedUpdate);
+ var newValue = add(mul(updates, -_this.learningRate), value);
+ value.assign(newValue);
+ });
+ });
+ this.incrementIterations();
+ };
+ AdadeltaOptimizer.prototype.dispose = function () {
+ if (this.accumulatedUpdates != null) {
+ dispose(this.accumulatedGrads.map(function (v) { return v.variable; }));
+ dispose(this.accumulatedUpdates.map(function (v) { return v.variable; }));
+ }
+ };
+ AdadeltaOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var variables;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ variables = __spread(this.accumulatedGrads, this.accumulatedUpdates);
+ return [4 /*yield*/, this.saveIterations()];
+ case 1: return [2 /*return*/, [_a.sent()].concat(variables.map(function (v) { return ({ name: v.originalName, tensor: v.variable }); }))];
+ }
+ });
+ });
+ };
+ AdadeltaOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ var variableCount, trainable;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.extractIterations(weightValues)];
+ case 1:
+ weightValues = _a.sent();
+ variableCount = weightValues.length / 2;
+ trainable = false;
+ this.accumulatedGrads =
+ weightValues.slice(0, variableCount).map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ this.accumulatedUpdates =
+ weightValues.slice(variableCount, variableCount * 2)
+ .map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ return [2 /*return*/];
+ }
+ });
+ });
+ };
+ AdadeltaOptimizer.prototype.getConfig = function () {
+ return {
+ 'learningRate': this.learningRate,
+ 'rho': this.rho,
+ 'epsilon': this.epsilon
+ };
+ };
+ /** @nocollapse */
+ AdadeltaOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate'], config['rho'], config['epsilon']);
+ };
+ return AdadeltaOptimizer;
+ }(Optimizer));
+ /** @nocollapse */
+ AdadeltaOptimizer.className = 'Adadelta'; // Name matters for Python compatibility.
+ registerClass(AdadeltaOptimizer);
+
+ /** @doclink Optimizer */
+ var AdagradOptimizer = /** @class */ (function (_super) {
+ __extends(AdagradOptimizer, _super);
+ function AdagradOptimizer(learningRate, initialAccumulatorValue) {
+ if (initialAccumulatorValue === void 0) { initialAccumulatorValue = 0.1; }
+ var _this = _super.call(this) || this;
+ _this.learningRate = learningRate;
+ _this.initialAccumulatorValue = initialAccumulatorValue;
+ _this.accumulatedGrads = [];
+ return _this;
+ }
+ AdagradOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var variableNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (item) { return item.name; }) :
+ Object.keys(variableGradients);
+ variableNames.forEach(function (name, i) {
+ var value = ENGINE.registeredVariables[name];
+ if (_this.accumulatedGrads[i] == null) {
+ var trainable_1 = false;
+ _this.accumulatedGrads[i] = {
+ originalName: name + "/accumulator",
+ variable: tidy(function () { return fill(value.shape, _this.initialAccumulatorValue)
+ .variable(trainable_1); })
+ };
+ }
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ var accumulatedGrad = _this.accumulatedGrads[i].variable;
+ tidy(function () {
+ var newAccumulatedGrad = add(accumulatedGrad, square(gradient));
+ accumulatedGrad.assign(newAccumulatedGrad);
+ var newValue = add(mul(div(gradient, sqrt(add(newAccumulatedGrad, ENGINE.backend.epsilon()))), -_this.learningRate), value);
+ value.assign(newValue);
+ });
+ });
+ this.incrementIterations();
+ };
+ AdagradOptimizer.prototype.dispose = function () {
+ if (this.accumulatedGrads != null) {
+ dispose(this.accumulatedGrads.map(function (v) { return v.variable; }));
+ }
+ };
+ AdagradOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.saveIterations()];
+ case 1:
+ // Order matters for Python compatibility.
+ return [2 /*return*/, [_a.sent()].concat(this.accumulatedGrads.map(function (v) { return ({ name: v.originalName, tensor: v.variable }); }))];
+ }
+ });
+ });
+ };
+ AdagradOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ var trainable;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.extractIterations(weightValues)];
+ case 1:
+ weightValues = _a.sent();
+ trainable = false;
+ this.accumulatedGrads = weightValues.map(function (v) { return ({ originalName: v.name, variable: v.tensor.variable(trainable) }); });
+ return [2 /*return*/];
+ }
+ });
+ });
+ };
+ AdagradOptimizer.prototype.getConfig = function () {
+ return {
+ 'learningRate': this.learningRate,
+ 'initialAccumulatorValue': this.initialAccumulatorValue,
+ };
+ };
+ /** @nocollapse */
+ AdagradOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate'], config['initialAccumulatorValue']);
+ };
+ return AdagradOptimizer;
+ }(Optimizer));
+ /** @nocollapse */
+ AdagradOptimizer.className = 'Adagrad'; // Note: Name matters for Python compatibility.
+ registerClass(AdagradOptimizer);
+
+ var AdamOptimizer = /** @class */ (function (_super) {
+ __extends(AdamOptimizer, _super);
+ function AdamOptimizer(learningRate, beta1, beta2, epsilon) {
+ if (epsilon === void 0) { epsilon = null; }
+ var _this = _super.call(this) || this;
+ _this.learningRate = learningRate;
+ _this.beta1 = beta1;
+ _this.beta2 = beta2;
+ _this.epsilon = epsilon;
+ _this.accumulatedFirstMoment = [];
+ _this.accumulatedSecondMoment = [];
+ tidy(function () {
+ // accB* will be updated by batch.
+ _this.accBeta1 = scalar(beta1).variable();
+ _this.accBeta2 = scalar(beta2).variable();
+ });
+ if (epsilon == null) {
+ _this.epsilon = ENGINE.backend.epsilon();
+ }
+ return _this;
+ }
+ AdamOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var varNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (v) { return v.name; }) :
+ Object.keys(variableGradients);
+ tidy(function () {
+ var oneMinusAccBeta1 = sub(1, _this.accBeta1);
+ var oneMinusAccBeta2 = sub(1, _this.accBeta2);
+ varNames.forEach(function (name, i) {
+ var value = ENGINE.registeredVariables[name];
+ var trainable = false;
+ if (_this.accumulatedFirstMoment[i] == null) {
+ _this.accumulatedFirstMoment[i] = {
+ originalName: name + "/m",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ if (_this.accumulatedSecondMoment[i] == null) {
+ _this.accumulatedSecondMoment[i] = {
+ originalName: name + "/v",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ var firstMoment = _this.accumulatedFirstMoment[i].variable;
+ var secondMoment = _this.accumulatedSecondMoment[i].variable;
+ var newFirstMoment = add(mul(firstMoment, _this.beta1), mul(gradient, 1 - _this.beta1));
+ var newSecondMoment = add(mul(secondMoment, _this.beta2), mul(square(gradient), 1 - _this.beta2));
+ var biasCorrectedFirstMoment = div(newFirstMoment, oneMinusAccBeta1);
+ var biasCorrectedSecondMoment = div(newSecondMoment, oneMinusAccBeta2);
+ firstMoment.assign(newFirstMoment);
+ secondMoment.assign(newSecondMoment);
+ var newValue = add(mul(div(biasCorrectedFirstMoment, add(sqrt(biasCorrectedSecondMoment), _this.epsilon)), -_this.learningRate), value);
+ value.assign(newValue);
+ });
+ _this.accBeta1.assign(mul(_this.accBeta1, _this.beta1));
+ _this.accBeta2.assign(mul(_this.accBeta2, _this.beta2));
+ });
+ this.incrementIterations();
+ };
+ AdamOptimizer.prototype.dispose = function () {
+ this.accBeta1.dispose();
+ this.accBeta2.dispose();
+ if (this.accumulatedFirstMoment != null) {
+ dispose(this.accumulatedFirstMoment.map(function (v) { return v.variable; }));
+ }
+ if (this.accumulatedSecondMoment != null) {
+ dispose(this.accumulatedSecondMoment.map(function (v) { return v.variable; }));
+ }
+ };
+ AdamOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var variables;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ variables = __spread(this.accumulatedFirstMoment, this.accumulatedSecondMoment);
+ return [4 /*yield*/, this.saveIterations()];
+ case 1: return [2 /*return*/, [_a.sent()].concat(variables.map(function (v) { return ({ name: v.originalName, tensor: v.variable }); }))];
+ }
+ });
+ });
+ };
+ AdamOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ var variableCount, trainable;
+ var _this = this;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.extractIterations(weightValues)];
+ case 1:
+ weightValues = _a.sent();
+ tidy(function () {
+ _this.accBeta1.assign(pow(_this.beta1, _this.iterations_ + 1));
+ _this.accBeta2.assign(pow(_this.beta2, _this.iterations_ + 1));
+ });
+ variableCount = weightValues.length / 2;
+ trainable = false;
+ this.accumulatedFirstMoment =
+ weightValues.slice(0, variableCount).map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ this.accumulatedSecondMoment =
+ weightValues.slice(variableCount, variableCount * 2)
+ .map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ return [2 /*return*/];
+ }
+ });
+ });
+ };
+ AdamOptimizer.prototype.getConfig = function () {
+ return {
+ 'learningRate': this.learningRate,
+ 'beta1': this.beta1,
+ 'beta2': this.beta2,
+ 'epsilon': this.epsilon,
+ };
+ };
+ /** @nocollapse */
+ AdamOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate'], config['beta1'], config['beta2'], config['epsilon']);
+ };
+ return AdamOptimizer;
+ }(Optimizer));
+ /** @nocollapse */
+ AdamOptimizer.className = 'Adam'; // Note: Name matters for Python compatibility.
+ registerClass(AdamOptimizer);
+
+ var AdamaxOptimizer = /** @class */ (function (_super) {
+ __extends(AdamaxOptimizer, _super);
+ function AdamaxOptimizer(learningRate, beta1, beta2, epsilon, decay) {
+ if (epsilon === void 0) { epsilon = null; }
+ if (decay === void 0) { decay = 0.0; }
+ var _this = _super.call(this) || this;
+ _this.learningRate = learningRate;
+ _this.beta1 = beta1;
+ _this.beta2 = beta2;
+ _this.epsilon = epsilon;
+ _this.decay = decay;
+ _this.accumulatedFirstMoment = [];
+ _this.accumulatedWeightedInfNorm = [];
+ tidy(function () {
+ _this.iteration = scalar(0).variable();
+ _this.accBeta1 = scalar(beta1).variable();
+ });
+ if (epsilon == null) {
+ _this.epsilon = ENGINE.backend.epsilon();
+ }
+ return _this;
+ }
+ AdamaxOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var variableNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (item) { return item.name; }) :
+ Object.keys(variableGradients);
+ tidy(function () {
+ var oneMinusAccBeta1 = sub(1, _this.accBeta1);
+ var lr = div(-_this.learningRate, add(mul(_this.iteration, _this.decay), 1));
+ variableNames.forEach(function (name, i) {
+ var value = ENGINE.registeredVariables[name];
+ var trainable = false;
+ if (_this.accumulatedFirstMoment[i] == null) {
+ _this.accumulatedFirstMoment[i] = {
+ originalName: name + "/m",
+ variable: zerosLike(value).variable(trainable)
+ };
+ }
+ if (_this.accumulatedWeightedInfNorm[i] == null) {
+ _this.accumulatedWeightedInfNorm[i] = {
+ originalName: name + "/v",
+ variable: zerosLike(value).variable(trainable)
+ };
+ }
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ var firstMoment = _this.accumulatedFirstMoment[i].variable;
+ var weightedInfNorm = _this.accumulatedWeightedInfNorm[i].variable;
+ var newFirstMoment = add(mul(firstMoment, _this.beta1), mul(gradient, 1 - _this.beta1));
+ var ut0 = mul(weightedInfNorm, _this.beta2);
+ var ut1 = abs(gradient);
+ var newWeightedInfNorm = maximum(ut0, ut1);
+ firstMoment.assign(newFirstMoment);
+ weightedInfNorm.assign(newWeightedInfNorm);
+ var newValue = add(mul(div(lr, oneMinusAccBeta1), div(newFirstMoment, add(newWeightedInfNorm, _this.epsilon))), value);
+ value.assign(newValue);
+ });
+ _this.iteration.assign(add(_this.iteration, 1));
+ _this.accBeta1.assign(mul(_this.accBeta1, _this.beta1));
+ });
+ this.incrementIterations();
+ };
+ AdamaxOptimizer.prototype.dispose = function () {
+ this.accBeta1.dispose();
+ this.iteration.dispose();
+ if (this.accumulatedFirstMoment != null) {
+ dispose(this.accumulatedFirstMoment.map(function (v) { return v.variable; }));
+ }
+ if (this.accumulatedWeightedInfNorm != null) {
+ dispose(this.accumulatedWeightedInfNorm.map(function (v) { return v.variable; }));
+ }
+ };
+ AdamaxOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ throw new Error('getWeights() is not implemented for Adamax yet.');
+ });
+ });
+ };
+ AdamaxOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ throw new Error('setWeights() is not implemented for Adamax yet.');
+ });
+ });
+ };
+ AdamaxOptimizer.prototype.getConfig = function () {
+ return {
+ 'learningRate': this.learningRate,
+ 'beta1': this.beta1,
+ 'beta2': this.beta2,
+ 'epsilon': this.epsilon,
+ 'decay': this.decay
+ };
+ };
+ /** @nocollapse */
+ AdamaxOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate'], config['beta1'], config['beta2'], config['epsilon'], config['decay']);
+ };
+ return AdamaxOptimizer;
+ }(Optimizer));
+ /** @nocollapse */
+ AdamaxOptimizer.className = 'Adamax'; // Note: Name matters for Python compatbility.
+ registerClass(AdamaxOptimizer);
+
+ /** @doclink Optimizer */
+ var SGDOptimizer = /** @class */ (function (_super) {
+ __extends(SGDOptimizer, _super);
+ function SGDOptimizer(learningRate) {
+ var _this = _super.call(this) || this;
+ _this.learningRate = learningRate;
+ _this.setLearningRate(learningRate);
+ return _this;
+ }
+ SGDOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var varNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (v) { return v.name; }) :
+ Object.keys(variableGradients);
+ varNames.forEach(function (name, i) {
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ var value = ENGINE.registeredVariables[name];
+ tidy(function () {
+ var newValue = add(mul(_this.c, gradient), value);
+ value.assign(newValue);
+ });
+ });
+ this.incrementIterations();
+ };
+ /**
+ * Sets the learning rate of the optimizer.
+ */
+ SGDOptimizer.prototype.setLearningRate = function (learningRate) {
+ this.learningRate = learningRate;
+ if (this.c != null) {
+ this.c.dispose();
+ }
+ this.c = keep(scalar(-learningRate));
+ };
+ SGDOptimizer.prototype.dispose = function () {
+ this.c.dispose();
+ };
+ SGDOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.saveIterations()];
+ case 1: return [2 /*return*/, [_a.sent()]];
+ }
+ });
+ });
+ };
+ SGDOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.extractIterations(weightValues)];
+ case 1:
+ weightValues = _a.sent();
+ if (weightValues.length !== 0) {
+ throw new Error('SGD optimizer does not have settable weights.');
+ }
+ return [2 /*return*/];
+ }
+ });
+ });
+ };
+ SGDOptimizer.prototype.getConfig = function () {
+ return { 'learningRate': this.learningRate };
+ };
+ /** @nocollapse */
+ SGDOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate']);
+ };
+ return SGDOptimizer;
+ }(Optimizer));
+ /** @nocollapse */
+ SGDOptimizer.className = 'SGD'; // Note: Name matters for Python compatibility.
+ registerClass(SGDOptimizer);
+
+ /** @doclink Optimizer */
+ var MomentumOptimizer = /** @class */ (function (_super) {
+ __extends(MomentumOptimizer, _super);
+ function MomentumOptimizer(learningRate, momentum, useNesterov) {
+ if (useNesterov === void 0) { useNesterov = false; }
+ var _this = _super.call(this, learningRate) || this;
+ _this.learningRate = learningRate;
+ _this.momentum = momentum;
+ _this.useNesterov = useNesterov;
+ _this.accumulations = [];
+ _this.m = scalar(_this.momentum);
+ return _this;
+ }
+ MomentumOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var variableNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (item) { return item.name; }) :
+ Object.keys(variableGradients);
+ variableNames.forEach(function (name, i) {
+ var value = ENGINE.registeredVariables[name];
+ if (_this.accumulations[i] == null) {
+ var trainable_1 = false;
+ _this.accumulations[i] = {
+ originalName: name + "/momentum",
+ variable: tidy(function () { return zerosLike(value).variable(trainable_1); })
+ };
+ }
+ var accumulation = _this.accumulations[i].variable;
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ tidy(function () {
+ var newValue;
+ var newAccumulation = add(mul(_this.m, accumulation), gradient);
+ if (_this.useNesterov) {
+ newValue = add(mul(_this.c, add(gradient, mul(newAccumulation, _this.m))), value);
+ }
+ else {
+ newValue = add(mul(_this.c, newAccumulation), value);
+ }
+ accumulation.assign(newAccumulation);
+ value.assign(newValue);
+ });
+ });
+ this.incrementIterations();
+ };
+ MomentumOptimizer.prototype.dispose = function () {
+ this.m.dispose();
+ if (this.accumulations != null) {
+ dispose(this.accumulations.map(function (v) { return v.variable; }));
+ }
+ };
+ /**
+ * Sets the momentum of the optimizer.
+ *
+ * @param momentum
+ */
+ MomentumOptimizer.prototype.setMomentum = function (momentum) {
+ this.momentum = momentum;
+ };
+ MomentumOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.saveIterations()];
+ case 1:
+ // Order matters for Python compatibility.
+ return [2 /*return*/, [_a.sent()].concat(this.accumulations.map(function (v) { return ({ name: v.originalName, tensor: v.variable }); }))];
+ }
+ });
+ });
+ };
+ MomentumOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ var trainable;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.extractIterations(weightValues)];
+ case 1:
+ weightValues = _a.sent();
+ trainable = false;
+ this.accumulations = weightValues.map(function (v) { return ({ originalName: v.name, variable: v.tensor.variable(trainable) }); });
+ return [2 /*return*/];
+ }
+ });
+ });
+ };
+ MomentumOptimizer.prototype.getConfig = function () {
+ return {
+ 'learningRate': this.learningRate,
+ 'momentum': this.momentum,
+ 'useNesterov': this.useNesterov
+ };
+ };
+ /** @nocollapse */
+ MomentumOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate'], config['momentum'], config['useNesterov']);
+ };
+ return MomentumOptimizer;
+ }(SGDOptimizer));
+ /** @nocollapse */
+ MomentumOptimizer.className = 'Momentum'; // Name matters for Python compatibility.
+ registerClass(MomentumOptimizer);
+
+ /** @doclink Optimizer */
+ var RMSPropOptimizer = /** @class */ (function (_super) {
+ __extends(RMSPropOptimizer, _super);
+ function RMSPropOptimizer(learningRate, decay, momentum, epsilon, centered) {
+ if (decay === void 0) { decay = 0.9; }
+ if (momentum === void 0) { momentum = 0.0; }
+ if (epsilon === void 0) { epsilon = null; }
+ if (centered === void 0) { centered = false; }
+ var _this = _super.call(this) || this;
+ _this.learningRate = learningRate;
+ _this.decay = decay;
+ _this.momentum = momentum;
+ _this.epsilon = epsilon;
+ _this.accumulatedMeanSquares = [];
+ _this.accumulatedMoments = [];
+ _this.accumulatedMeanGrads = [];
+ _this.centered = centered;
+ if (epsilon == null) {
+ _this.epsilon = ENGINE.backend.epsilon();
+ }
+ if (learningRate == null) {
+ throw new Error("learningRate for RMSPropOptimizer must be defined.");
+ }
+ return _this;
+ }
+ RMSPropOptimizer.prototype.applyGradients = function (variableGradients) {
+ var _this = this;
+ var variableNames = Array.isArray(variableGradients) ?
+ variableGradients.map(function (item) { return item.name; }) :
+ Object.keys(variableGradients);
+ variableNames.forEach(function (name, i) {
+ var value = ENGINE.registeredVariables[name];
+ var trainable = false;
+ if (_this.accumulatedMeanSquares[i] == null) {
+ _this.accumulatedMeanSquares[i] = {
+ originalName: name + "/rms",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ if (_this.accumulatedMoments[i] == null) {
+ _this.accumulatedMoments[i] = {
+ originalName: name + "/momentum",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ if (_this.accumulatedMeanGrads[i] == null && _this.centered) {
+ _this.accumulatedMeanGrads[i] = {
+ originalName: name + "/mg",
+ variable: tidy(function () { return zerosLike(value).variable(trainable); })
+ };
+ }
+ var gradient = Array.isArray(variableGradients) ?
+ variableGradients[i].tensor :
+ variableGradients[name];
+ if (gradient == null) {
+ return;
+ }
+ var accumulatedMeanSquare = _this.accumulatedMeanSquares[i].variable;
+ var accumulatedMoments = _this.accumulatedMoments[i].variable;
+ tidy(function () {
+ var newAccumulatedMeanSquare = add(mul(accumulatedMeanSquare, _this.decay), mul(square(gradient), 1 - _this.decay));
+ if (_this.centered) {
+ var accumulatedMeanGrad = _this.accumulatedMeanGrads[i].variable;
+ // Centered gradient
+ var newAccumulatedMeanGrad = add(mul(accumulatedMeanGrad, _this.decay), mul(gradient, 1 - _this.decay));
+ var gradContribution = div(mul(gradient, _this.learningRate), sqrt(sub(newAccumulatedMeanSquare, add(square(newAccumulatedMeanGrad), _this.epsilon))));
+ var newAccumulatedMoments = add(mul(accumulatedMoments, _this.momentum), gradContribution);
+ accumulatedMeanSquare.assign(newAccumulatedMeanSquare);
+ accumulatedMeanGrad.assign(newAccumulatedMeanGrad);
+ accumulatedMoments.assign(newAccumulatedMoments);
+ var newValue = sub(value, newAccumulatedMoments);
+ value.assign(newValue);
+ }
+ else {
+ // Plain gradient
+ var newAccumulatedMeanSquare_1 = add(mul(accumulatedMeanSquare, _this.decay), mul(square(gradient), 1 - _this.decay));
+ var newAccumulatedMoments = add(mul(accumulatedMoments, _this.momentum), div(mul(gradient, _this.learningRate), sqrt(add(newAccumulatedMeanSquare_1, _this.epsilon))));
+ accumulatedMeanSquare.assign(newAccumulatedMeanSquare_1);
+ accumulatedMoments.assign(newAccumulatedMoments);
+ var newValue = sub(value, newAccumulatedMoments);
+ value.assign(newValue);
+ }
+ });
+ });
+ this.incrementIterations();
+ };
+ RMSPropOptimizer.prototype.dispose = function () {
+ if (this.accumulatedMeanSquares != null) {
+ dispose(this.accumulatedMeanSquares.map(function (v) { return v.variable; }));
+ }
+ if (this.accumulatedMeanGrads != null && this.centered) {
+ dispose(this.accumulatedMeanGrads.map(function (v) { return v.variable; }));
+ }
+ if (this.accumulatedMoments != null) {
+ dispose(this.accumulatedMoments.map(function (v) { return v.variable; }));
+ }
+ };
+ RMSPropOptimizer.prototype.getWeights = function () {
+ return __awaiter(this, void 0, void 0, function () {
+ var variables;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0:
+ variables = __spread(this.accumulatedMeanSquares, this.accumulatedMoments);
+ if (this.centered) {
+ variables.push.apply(variables, __spread(this.accumulatedMeanGrads));
+ }
+ return [4 /*yield*/, this.saveIterations()];
+ case 1: return [2 /*return*/, [_a.sent()].concat(variables.map(function (v) { return ({ name: v.originalName, tensor: v.variable }); }))];
+ }
+ });
+ });
+ };
+ RMSPropOptimizer.prototype.setWeights = function (weightValues) {
+ return __awaiter(this, void 0, void 0, function () {
+ var variableCount, trainable;
+ return __generator(this, function (_a) {
+ switch (_a.label) {
+ case 0: return [4 /*yield*/, this.extractIterations(weightValues)];
+ case 1:
+ weightValues = _a.sent();
+ variableCount = this.centered ? weightValues.length / 3 : weightValues.length / 2;
+ trainable = false;
+ this.accumulatedMeanSquares =
+ weightValues.slice(0, variableCount).map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ this.accumulatedMoments =
+ weightValues.slice(variableCount, variableCount * 2)
+ .map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ if (this.centered) {
+ this.accumulatedMeanGrads =
+ weightValues.slice(variableCount * 2, variableCount * 3)
+ .map(function (v) { return ({
+ originalName: v.name,
+ variable: v.tensor.variable(trainable)
+ }); });
+ }
+ return [2 /*return*/];
+ }
+ });
+ });
+ };
+ RMSPropOptimizer.prototype.getConfig = function () {
+ return {
+ 'learningRate': this.learningRate,
+ 'decay': this.decay,
+ 'momentum': this.momentum,
+ 'epsilon': this.epsilon,
+ 'centered': this.centered
+ };
+ };
+ /** @nocollapse */
+ RMSPropOptimizer.fromConfig = function (cls, config) {
+ return new cls(config['learningRate'], config['decay'], config['momentum'], config['epsilon'], config['centered']);
+ };
+ return RMSPropOptimizer;
+ }(Optimizer));
+ /** @nocollapse */
+ RMSPropOptimizer.className = 'RMSProp'; // Note: Name matters for Python compatibility.
+ registerClass(RMSPropOptimizer);
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var OptimizerConstructors = /** @class */ (function () {
+ function OptimizerConstructors() {
+ }
+ /**
+ * Constructs a `tf.SGDOptimizer` that uses stochastic gradient descent.
+ *
+ * ```js
+ * // Fit a quadratic function by learning the coefficients a, b, c.
+ * const xs = tf.tensor1d([0, 1, 2, 3]);
+ * const ys = tf.tensor1d([1.1, 5.9, 16.8, 33.9]);
+ *
+ * const a = tf.scalar(Math.random()).variable();
+ * const b = tf.scalar(Math.random()).variable();
+ * const c = tf.scalar(Math.random()).variable();
+ *
+ * // y = a * x^2 + b * x + c.
+ * const f = x => a.mul(x.square()).add(b.mul(x)).add(c);
+ * const loss = (pred, label) => pred.sub(label).square().mean();
+ *
+ * const learningRate = 0.01;
+ * const optimizer = tf.train.sgd(learningRate);
+ *
+ * // Train the model.
+ * for (let i = 0; i < 10; i++) {
+ * optimizer.minimize(() => loss(f(xs), ys));
+ * }
+ *
+ * // Make predictions.
+ * console.log(
+ * `a: ${a.dataSync()}, b: ${b.dataSync()}, c: ${c.dataSync()}`);
+ * const preds = f(xs).dataSync();
+ * preds.forEach((pred, i) => {
+ * console.log(`x: ${i}, pred: ${pred}`);
+ * });
+ * ```
+ *
+ * @param learningRate The learning rate to use for the SGD algorithm.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.sgd = function (learningRate) {
+ return new SGDOptimizer(learningRate);
+ };
+ /**
+ * Constructs a `tf.MomentumOptimizer` that uses momentum gradient
+ * descent.
+ *
+ * See
+ * [http://proceedings.mlr.press/v28/sutskever13.pdf](
+ * http://proceedings.mlr.press/v28/sutskever13.pdf)
+ *
+ * @param learningRate The learning rate to use for the Momentum gradient
+ * descent algorithm.
+ * @param momentum The momentum to use for the momentum gradient descent
+ * algorithm.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.momentum = function (learningRate, momentum, useNesterov) {
+ if (useNesterov === void 0) { useNesterov = false; }
+ return new MomentumOptimizer(learningRate, momentum, useNesterov);
+ };
+ /**
+ * Constructs a `tf.RMSPropOptimizer` that uses RMSProp gradient
+ * descent. This implementation uses plain momentum and is not centered
+ * version of RMSProp.
+ *
+ * See
+ * [http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf](
+ * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
+ *
+ * @param learningRate The learning rate to use for the RMSProp gradient
+ * descent algorithm.
+ * @param decay The discounting factor for the history/coming gradient.
+ * @param momentum The momentum to use for the RMSProp gradient descent
+ * algorithm.
+ * @param epsilon Small value to avoid zero denominator.
+ * @param centered If true, gradients are normalized by the estimated
+ * variance of the gradient.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.rmsprop = function (learningRate, decay, momentum, epsilon, centered) {
+ if (decay === void 0) { decay = .9; }
+ if (momentum === void 0) { momentum = 0.0; }
+ if (epsilon === void 0) { epsilon = null; }
+ if (centered === void 0) { centered = false; }
+ return new RMSPropOptimizer(learningRate, decay, momentum, epsilon, centered);
+ };
+ /**
+ * Constructs a `tf.AdamOptimizer` that uses the Adam algorithm.
+ * See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)
+ *
+ * @param learningRate The learning rate to use for the Adam gradient
+ * descent algorithm.
+ * @param beta1 The exponential decay rate for the 1st moment estimates.
+ * @param beta2 The exponential decay rate for the 2nd moment estimates.
+ * @param epsilon A small constant for numerical stability.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.adam = function (learningRate, beta1, beta2, epsilon) {
+ if (learningRate === void 0) { learningRate = 0.001; }
+ if (beta1 === void 0) { beta1 = 0.9; }
+ if (beta2 === void 0) { beta2 = 0.999; }
+ if (epsilon === void 0) { epsilon = null; }
+ return new AdamOptimizer(learningRate, beta1, beta2, epsilon);
+ };
+ /**
+ * Constructs a `tf.AdadeltaOptimizer` that uses the Adadelta algorithm.
+ * See [https://arxiv.org/abs/1212.5701](https://arxiv.org/abs/1212.5701)
+ *
+ * @param learningRate The learning rate to use for the Adadelta gradient
+ * descent algorithm.
+ * @param rho The learning rate decay over each update.
+ * @param epsilon A constant epsilon used to better condition the grad
+ * update.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.adadelta = function (learningRate, rho, epsilon) {
+ if (learningRate === void 0) { learningRate = .001; }
+ if (rho === void 0) { rho = .95; }
+ if (epsilon === void 0) { epsilon = null; }
+ return new AdadeltaOptimizer(learningRate, rho, epsilon);
+ };
+ /**
+ * Constructs a `tf.AdamaxOptimizer` that uses the Adamax algorithm.
+ * See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)
+ *
+ * @param learningRate The learning rate to use for the Adamax gradient
+ * descent algorithm.
+ * @param beta1 The exponential decay rate for the 1st moment estimates.
+ * @param beta2 The exponential decay rate for the 2nd moment estimates.
+ * @param epsilon A small constant for numerical stability.
+ * @param decay The learning rate decay over each update.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.adamax = function (learningRate, beta1, beta2, epsilon, decay) {
+ if (learningRate === void 0) { learningRate = 0.002; }
+ if (beta1 === void 0) { beta1 = 0.9; }
+ if (beta2 === void 0) { beta2 = 0.999; }
+ if (epsilon === void 0) { epsilon = null; }
+ if (decay === void 0) { decay = 0.0; }
+ return new AdamaxOptimizer(learningRate, beta1, beta2, epsilon, decay);
+ };
+ /**
+ * Constructs a `tf.AdagradOptimizer` that uses the Adagrad algorithm.
+ * See
+ * [http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf](
+ * http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
+ * or
+ * [http://ruder.io/optimizing-gradient-descent/index.html#adagrad](
+ * http://ruder.io/optimizing-gradient-descent/index.html#adagrad)
+ *
+ * @param learningRate The learning rate to use for the Adagrad gradient
+ * descent algorithm.
+ * @param initialAccumulatorValue Starting value for the accumulators, must be
+ * positive.
+ *
+ * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
+ */
+ OptimizerConstructors.adagrad = function (learningRate, initialAccumulatorValue) {
+ if (initialAccumulatorValue === void 0) { initialAccumulatorValue = 0.1; }
+ return new AdagradOptimizer(learningRate, initialAccumulatorValue);
+ };
+ return OptimizerConstructors;
+ }());
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var train = {
+ sgd: OptimizerConstructors.sgd,
+ momentum: OptimizerConstructors.momentum,
+ adadelta: OptimizerConstructors.adadelta,
+ adagrad: OptimizerConstructors.adagrad,
+ rmsprop: OptimizerConstructors.rmsprop,
+ adamax: OptimizerConstructors.adamax,
+ adam: OptimizerConstructors.adam
+ };
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var delayCallback = (function () {
+ if (typeof requestAnimationFrame !== 'undefined') {
+ return requestAnimationFrame;
+ }
+ else if (typeof setImmediate !== 'undefined') {
+ return setImmediate;
+ }
+ return function (f) { return f(); }; // no delays
+ })();
+ /**
+ * Returns a promise that resolve when a requestAnimationFrame has completed.
+ *
+ * On Node.js this uses setImmediate instead of requestAnimationFrame.
+ *
+ * This is simply a sugar method so that users can do the following:
+ * `await tf.nextFrame();`
+ *
+ * @doc {heading: 'Performance', subheading: 'Timing'}
+ */
+ function nextFrame() {
+ return new Promise(function (resolve) { return delayCallback(function () { return resolve(); }); });
+ }
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function assertParamsConsistent(shapes, axis) {
+ var rank = shapes[0].length;
+ shapes.forEach(function (shape, i) {
+ assert(shape.length === rank, function () { return "Error in concat" + rank + "D: rank of tensors[" + i + "] must be the same " +
+ ("as the rank of the rest (" + rank + ")"); });
+ });
+ assert(axis >= 0 && axis < rank, function () { return "Error in concat" + rank + "D: axis must be between 0 and " + (rank - 1) + "."; });
+ var firstShape = shapes[0];
+ shapes.forEach(function (shape, i) {
+ for (var r = 0; r < rank; r++) {
+ assert((r === axis) || (shape[r] === firstShape[r]), function () { return "Error in concat" + rank + "D: Shape of tensors[" + i + "] (" + shape + ") " +
+ ("does not match the shape of the rest (" + firstShape + ") ") +
+ ("along the non-concatenated axis " + i + "."); });
+ }
+ });
+ }
+ function computeOutShape$1(shapes, axis) {
+ var outputShape = shapes[0].slice();
+ for (var i = 1; i < shapes.length; i++) {
+ outputShape[axis] += shapes[i][axis];
+ }
+ return outputShape;
+ }
+
+ /**
+ * @license
+ * Copyright 2017 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var PARALLELIZE_THRESHOLD = 30;
+ function computeOptimalWindowSize(inSize) {
+ if (inSize <= PARALLELIZE_THRESHOLD) {
+ return inSize;
+ }
+ return nearestDivisor(inSize, Math.floor(Math.sqrt(inSize)));
+ }
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ // Returns the image center in pixels.
+ function getImageCenter(center, imageHeight, imageWidth) {
+ var centerX = imageWidth * (typeof center === 'number' ? center : center[0]);
+ var centerY = imageHeight * (typeof center === 'number' ? center : center[1]);
+ return [centerX, centerY];
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Gets the new shape of the input Tensor after it's been reshaped
+ * to:
+ * [blockShape[0], ..., blockShape[M-1], batch / prod(blockShape),
+ * inputShape[1], ..., inputShape[N-1]]
+ *
+ * See step 1: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
+ */
+ function getReshaped(inputShape, blockShape, prod, batchToSpace) {
+ if (batchToSpace === void 0) { batchToSpace = true; }
+ var reshaped = [];
+ if (batchToSpace) {
+ reshaped = reshaped.concat(blockShape.slice(0));
+ reshaped.push(inputShape[0] / prod);
+ reshaped = reshaped.concat(inputShape.slice(1));
+ }
+ else {
+ reshaped = reshaped.concat(inputShape[0]);
+ var spatialLength = blockShape.length;
+ for (var i = 0; i < spatialLength; ++i) {
+ reshaped =
+ reshaped.concat([inputShape[i + 1] / blockShape[i], blockShape[i]]);
+ }
+ reshaped = reshaped.concat(inputShape.slice(spatialLength + 1));
+ }
+ return reshaped;
+ }
+ /**
+ * Gets the permutation that will transpose the dimensions of the
+ * reshaped tensor to shape:
+ *
+ * [batch / prod(block_shape),inputShape[1], blockShape[0], ...,
+ * inputShape[M], blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]
+ *
+ * see step 2: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
+ */
+ function getPermuted(reshapedRank, blockShapeRank, batchToSpace) {
+ if (batchToSpace === void 0) { batchToSpace = true; }
+ var permuted = [];
+ if (batchToSpace) {
+ permuted.push(blockShapeRank);
+ for (var i = blockShapeRank + 1; i < reshapedRank; ++i) {
+ if (i <= 2 * blockShapeRank) {
+ permuted.push(i);
+ permuted.push(i - (blockShapeRank + 1));
+ }
+ else {
+ permuted.push(i);
+ }
+ }
+ }
+ else {
+ var permutedBeforeBatch = [];
+ var permutedAfterBatch = [];
+ for (var i = 1; i < reshapedRank; ++i) {
+ if (i >= blockShapeRank * 2 + 1 || i % 2 === 1) {
+ permutedAfterBatch.push(i);
+ }
+ else {
+ permutedBeforeBatch.push(i);
+ }
+ }
+ permuted.push.apply(permuted, __spread(permutedBeforeBatch));
+ permuted.push(0);
+ permuted.push.apply(permuted, __spread(permutedAfterBatch));
+ }
+ return permuted;
+ }
+ /**
+ * Gets the shape of the reshaped and permuted input Tensor before any cropping
+ * is applied. The new shape will be:
+ *
+ * [batch / prod(blockShape),inputShape[1] * blockShape[0], ...,
+ * inputShape[M] * blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]
+ *
+ * See step 3: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
+ */
+ function getReshapedPermuted(inputShape, blockShape, prod, batchToSpace) {
+ if (batchToSpace === void 0) { batchToSpace = true; }
+ var reshapedPermuted = [];
+ if (batchToSpace) {
+ reshapedPermuted.push(inputShape[0] / prod);
+ }
+ else {
+ reshapedPermuted.push(inputShape[0] * prod);
+ }
+ for (var i = 1; i < inputShape.length; ++i) {
+ if (i <= blockShape.length) {
+ if (batchToSpace) {
+ reshapedPermuted.push(blockShape[i - 1] * inputShape[i]);
+ }
+ else {
+ reshapedPermuted.push(inputShape[i] / blockShape[i - 1]);
+ }
+ }
+ else {
+ reshapedPermuted.push(inputShape[i]);
+ }
+ }
+ return reshapedPermuted;
+ }
+ /**
+ * Converts the crops argument into the beginning coordinates of a slice
+ * operation.
+ */
+ function getSliceBeginCoords(crops, blockShape) {
+ var sliceBeginCoords = [0];
+ for (var i = 0; i < blockShape; ++i) {
+ sliceBeginCoords.push(crops[i][0]);
+ }
+ return sliceBeginCoords;
+ }
+ /**
+ * Converts the crops argument into the size of a slice operation. When
+ * combined with getSliceBeginCoords this function allows the reshaped and
+ * permuted Tensor to be cropped to its final output shape of:
+ *
+ * inputShape[1] * blockShape[0] - crops[0,0] - crops[0,1], ...,
+ * inputShape[M] * blockShape[M-1] -crops[M-1,0] -
+ * crops[M-1,1],inputShape[M+1], ..., inputShape[N-1]]
+ *
+ * See step 4: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
+ */
+ function getSliceSize(uncroppedShape, crops, blockShape) {
+ var sliceSize = uncroppedShape.slice(0, 1);
+ for (var i = 0; i < blockShape; ++i) {
+ sliceSize.push(uncroppedShape[i + 1] - crops[i][0] - crops[i][1]);
+ }
+ return sliceSize;
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var SELU_SCALEALPHA = 1.7580993408473768599402175208123;
+ var SELU_SCALE = 1.0507009873554804934193349852946;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ var ERF_P = 0.3275911;
+ var ERF_A1 = 0.254829592;
+ var ERF_A2 = -0.284496736;
+ var ERF_A3 = 1.421413741;
+ var ERF_A4 = -1.453152027;
+ var ERF_A5 = 1.061405429;
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Merges real and imaginary Float32Arrays into a single complex Float32Array.
+ *
+ * The memory layout is interleaved as follows:
+ * real: [r0, r1, r2]
+ * imag: [i0, i1, i2]
+ * complex: [r0, i0, r1, i1, r2, i2]
+ *
+ * This is the inverse of splitRealAndImagArrays.
+ *
+ * @param real The real values of the complex tensor values.
+ * @param imag The imag values of the complex tensor values.
+ * @returns A complex tensor as a Float32Array with merged values.
+ */
+ function mergeRealAndImagArrays(real, imag) {
+ if (real.length !== imag.length) {
+ throw new Error("Cannot merge real and imag arrays of different lengths. real:" +
+ (real.length + ", imag: " + imag.length + "."));
+ }
+ var result = new Float32Array(real.length * 2);
+ for (var i = 0; i < result.length; i += 2) {
+ result[i] = real[i / 2];
+ result[i + 1] = imag[i / 2];
+ }
+ return result;
+ }
+ /**
+ * Splits a complex Float32Array into real and imag parts.
+ *
+ * The memory layout is interleaved as follows:
+ * complex: [r0, i0, r1, i1, r2, i2]
+ * real: [r0, r1, r2]
+ * imag: [i0, i1, i2]
+ *
+ * This is the inverse of mergeRealAndImagArrays.
+ *
+ * @param complex The complex tensor values.
+ * @returns An object with real and imag Float32Array components of the complex
+ * tensor.
+ */
+ function splitRealAndImagArrays(complex) {
+ var real = new Float32Array(complex.length / 2);
+ var imag = new Float32Array(complex.length / 2);
+ for (var i = 0; i < complex.length; i += 2) {
+ real[i / 2] = complex[i];
+ imag[i / 2] = complex[i + 1];
+ }
+ return { real: real, imag: imag };
+ }
+ /**
+ * Extracts even indexed complex values in the given array.
+ * @param complex The complex tensor values
+ */
+ function complexWithEvenIndex(complex) {
+ var len = Math.ceil(complex.length / 4);
+ var real = new Float32Array(len);
+ var imag = new Float32Array(len);
+ for (var i = 0; i < complex.length; i += 4) {
+ real[Math.floor(i / 4)] = complex[i];
+ imag[Math.floor(i / 4)] = complex[i + 1];
+ }
+ return { real: real, imag: imag };
+ }
+ /**
+ * Extracts odd indexed comple values in the given array.
+ * @param complex The complex tensor values
+ */
+ function complexWithOddIndex(complex) {
+ var len = Math.floor(complex.length / 4);
+ var real = new Float32Array(len);
+ var imag = new Float32Array(len);
+ for (var i = 2; i < complex.length; i += 4) {
+ real[Math.floor(i / 4)] = complex[i];
+ imag[Math.floor(i / 4)] = complex[i + 1];
+ }
+ return { real: real, imag: imag };
+ }
+ /**
+ * Get the map representing a complex value in the given array.
+ * @param complex The complex tensor values.
+ * @param index An index of the target complex value.
+ */
+ function getComplexWithIndex(complex, index) {
+ var real = complex[index * 2];
+ var imag = complex[index * 2 + 1];
+ return { real: real, imag: imag };
+ }
+ /**
+ * Insert a given complex value into the TypedArray.
+ * @param data The array in which the complex value is inserted.
+ * @param c The complex value to be inserted.
+ * @param index An index of the target complex value.
+ */
+ function assignToTypedArray(data, real, imag, index) {
+ data[index * 2] = real;
+ data[index * 2 + 1] = imag;
+ }
+ /**
+ * Make the list of exponent terms used by FFT.
+ */
+ function exponents(n, inverse) {
+ var real = new Float32Array(n / 2);
+ var imag = new Float32Array(n / 2);
+ for (var i = 0; i < Math.ceil(n / 2); i++) {
+ var x = (inverse ? 2 : -2) * Math.PI * (i / n);
+ real[i] = Math.cos(x);
+ imag[i] = Math.sin(x);
+ }
+ return { real: real, imag: imag };
+ }
+ /**
+ * Make the exponent term used by FFT.
+ */
+ function exponent(k, n, inverse) {
+ var x = (inverse ? 2 : -2) * Math.PI * (k / n);
+ var real = Math.cos(x);
+ var imag = Math.sin(x);
+ return { real: real, imag: imag };
+ }
+
+ var ARROW = '->';
+ var ARROW_REGEX = /->/g;
+ var COMMA = ',';
+ var ELLIPSIS = '...';
+ /**
+ * Parse an equation for einsum.
+ *
+ * @param equation The einsum equation (e.g., "ij,jk->ik").
+ * @param numTensors Number of tensors provided along with `equation`. Used to
+ * check matching number of input tensors.
+ * @returns An object consisting of the following fields:
+ * - allDims: all dimension names as strings.
+ * - summedDims: a list of all dimensions being summed over, as indices to
+ * the elements of `allDims`.
+ * - idDims: indices of the dimensions in each input tensor, as indices to
+ * the elements of `allDims.
+ */
+ function decodeEinsumEquation(equation, numTensors) {
+ equation = equation.replace(/\s/g, ''); // Remove witespace in equation.
+ var numArrows = (equation.length - equation.replace(ARROW_REGEX, '').length) /
+ ARROW.length;
+ if (numArrows < 1) {
+ throw new Error('Equations without an arrow are not supported.');
+ }
+ else if (numArrows > 1) {
+ throw new Error("Equation must contain exactly one arrow (\"" + ARROW + "\").");
+ }
+ var _a = __read(equation.split(ARROW), 2), inputString = _a[0], outputString = _a[1];
+ assert(inputString.indexOf(ELLIPSIS) === -1, function () { return "The ellipsis notation (\"" + ELLIPSIS + "\") is not supported yet."; });
+ var inputTerms = inputString.split(COMMA);
+ var numInputs = inputTerms.length;
+ if (numTensors !== numInputs) {
+ throw new Error("Expected " + numInputs + " input tensors, received " + numTensors);
+ }
+ if (numInputs > 2) {
+ throw new Error('Support for more than 2 input tensors is not implemented yet.');
+ }
+ var allDims = [];
+ var _loop_1 = function (i) {
+ var dimName = outputString[i];
+ if (!inputTerms.some(function (inputTerm) { return inputTerm.indexOf(dimName) !== -1; })) {
+ throw new Error("Output subscripts contain the label " + dimName + " " +
+ "not present in the input subscripts.");
+ }
+ if (allDims.indexOf(dimName) === -1) {
+ allDims.push(dimName);
+ }
+ };
+ for (var i = 0; i < outputString.length; ++i) {
+ _loop_1(i);
+ }
+ for (var i = 0; i < inputString.length; ++i) {
+ var dimName = inputString[i];
+ if (allDims.indexOf(dimName) === -1 && dimName !== COMMA) {
+ allDims.push(dimName);
+ }
+ }
+ var idDims = new Array(inputTerms.length);
+ for (var i = 0; i < numInputs; ++i) {
+ if (new Set(inputTerms[i].split('')).size !== inputTerms[i].length) {
+ throw new Error("Found duplicate axes in input component " + inputTerms[i] + ". " +
+ "Support for duplicate axes in input is not implemented yet.");
+ }
+ idDims[i] = [];
+ for (var j = 0; j < inputTerms[i].length; ++j) {
+ idDims[i].push(allDims.indexOf(inputTerms[i][j]));
+ }
+ }
+ var numDims = allDims.length; // Number of unique dimensions.
+ var numOutDims = outputString.length; // Number of output dimensions.
+ var summedDims = []; // Dimensions being summed over.
+ for (var i = numOutDims; i < numDims; ++i) {
+ summedDims.push(i);
+ }
+ return { allDims: allDims, summedDims: summedDims, idDims: idDims };
+ }
+ /**
+ * Get the permutation for a given input tensor.
+ *
+ * @param nDims Total number of dimension of all tensors involved in the einsum
+ * operation.
+ * @param idDims Dimension indices involve in the tensor in question.
+ * @returns An object consisting of the following fields:
+ * - permutationIndices: Indices to permute the axes of the tensor with.
+ * - expandDims: Indices to the dimension that need to be expanded from the
+ * tensor after permutation.
+ */
+ function getEinsumPermutation(nDims, idDims) {
+ var permutationIndices = new Array(nDims);
+ permutationIndices.fill(-1);
+ for (var i = 0; i < idDims.length; ++i) {
+ permutationIndices[idDims[i]] = i;
+ }
+ var expandDims = [];
+ for (var i = 0; i < nDims; ++i) {
+ if (permutationIndices[i] === -1) {
+ expandDims.push(i);
+ }
+ }
+ permutationIndices = permutationIndices.filter(function (d) { return d !== -1; });
+ return { permutationIndices: permutationIndices, expandDims: expandDims };
+ }
+ /**
+ * Checks that the dimension sizes from different input tensors match the
+ * equation.
+ */
+ function checkEinsumDimSizes(nDims, idDims, tensors) {
+ var dimSizes = new Array(nDims);
+ var _loop_2 = function (i) {
+ var shape = tensors[i].shape;
+ var _loop_3 = function (j) {
+ if (dimSizes[idDims[i][j]] === undefined) {
+ dimSizes[idDims[i][j]] = shape[j];
+ }
+ else {
+ assert(dimSizes[idDims[i][j]] === shape[j], function () { return "Expected dimension " + dimSizes[idDims[i][j]] + " at axis " + j + " " +
+ ("of input shaped " + JSON.stringify(shape) + ", ") +
+ ("but got dimension " + shape[j]); });
+ }
+ };
+ for (var j = 0; j < idDims[i].length; ++j) {
+ _loop_3(j);
+ }
+ };
+ for (var i = 0; i < tensors.length; ++i) {
+ _loop_2(i);
+ }
+ }
+ /**
+ * Gets path of computation for einsum.
+ *
+ * @param summedDims indices to the dimensions being summed over.
+ * @param idDims A look up table for the dimensions present in each input
+ * tensor. Each consituent array contains indices for the dimensions in the
+ * corresponding input tensor.
+ *
+ * @return A map with two fields:
+ * - path: The path of computation, with each element indicating the dimension
+ * being summed over after the element-wise multiplication in that step.
+ * - steps: With the same length as `path`. Each element contains the indices
+ * to the input tensors being used for element-wise multiplication in the
+ * corresponding step.
+ */
+ function getEinsumComputePath(summedDims, idDims) {
+ var e_1, _a;
+ var path = summedDims;
+ var steps = [];
+ var nSteps = 0;
+ if (summedDims.length === 0) {
+ // Einsum that involes no summing: e.g., transpose and outer product.
+ path.push(-1);
+ }
+ nSteps = summedDims.length + 1;
+ for (var i = 0; i < nSteps; ++i) {
+ steps.push([]);
+ }
+ var computedTermIndices = [];
+ for (var i = 0; i < path.length; ++i) {
+ var summedDim = path[i];
+ var termIndices = findTermsWithDim(idDims, summedDim);
+ try {
+ for (var termIndices_1 = (e_1 = void 0, __values(termIndices)), termIndices_1_1 = termIndices_1.next(); !termIndices_1_1.done; termIndices_1_1 = termIndices_1.next()) {
+ var termIndex = termIndices_1_1.value;
+ if (computedTermIndices.indexOf(termIndex) === -1) {
+ steps[i].push(termIndex);
+ computedTermIndices.push(termIndex);
+ }
+ }
+ }
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
+ finally {
+ try {
+ if (termIndices_1_1 && !termIndices_1_1.done && (_a = termIndices_1.return)) _a.call(termIndices_1);
+ }
+ finally { if (e_1) throw e_1.error; }
+ }
+ }
+ return { path: path, steps: steps };
+ }
+ /** Determines if an axes permutation is the identity permutation. */
+ function isIdentityPermutation(perm) {
+ return perm.every(function (dim, index) { return dim === index; });
+ }
+ function findTermsWithDim(idDims, dim) {
+ var termIndices = [];
+ for (var i = 0; i < idDims.length; ++i) {
+ if (idDims[i].length === 0 || idDims[i].indexOf(dim) !== -1 || dim === -1) {
+ termIndices.push(i);
+ }
+ }
+ return termIndices;
+ }
+
+ /**
+ * Prepare the split size array. When the input is a number, the axis is evenly
+ * divided among the split size. When the input contains the negative value, the
+ * rest of the axis is allocated toward that.
+ */
+ function prepareSplitSize(x, numOrSizeSplits, axis) {
+ if (axis === void 0) { axis = 0; }
+ var splitSizes = [];
+ if (typeof (numOrSizeSplits) === 'number') {
+ assert(x.shape[axis] % numOrSizeSplits === 0, function () { return 'Number of splits must evenly divide the axis.'; });
+ splitSizes =
+ new Array(numOrSizeSplits).fill(x.shape[axis] / numOrSizeSplits);
+ }
+ else {
+ var numOfNegs = numOrSizeSplits.reduce(function (count, value) {
+ if (value === -1) {
+ count += 1;
+ }
+ return count;
+ }, 0);
+ assert(numOfNegs <= 1, function () { return 'There should be only one negative value in split array.'; });
+ var negIndex = numOrSizeSplits.indexOf(-1);
+ // Allow the number of split array to be -1, which indicates the rest
+ // of dimension is allocated to that split.
+ if (negIndex !== -1) {
+ var total = numOrSizeSplits.reduce(function (a, b) { return b > 0 ? a + b : a; });
+ numOrSizeSplits[negIndex] = x.shape[axis] - total;
+ }
+ assert(x.shape[axis] === numOrSizeSplits.reduce(function (a, b) { return a + b; }), function () { return 'The sum of sizes must match the size of the axis dimension.'; });
+ splitSizes = numOrSizeSplits;
+ }
+ return splitSizes;
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generates sparse fill empty rows indices, dense shape mismatch error message.
+ *
+ * @param indicesLength The first dimension of indices.
+ */
+ function getSparseFillEmptyRowsIndicesDenseShapeMismatch(indicesLength) {
+ return "Received SparseTensor with denseShape[0] = 0 but\n indices.shape[0] = " + indicesLength;
+ }
+ /**
+ * Generates sparse fill empty rows negative index error message.
+ *
+ * @param index The index with a negative value.
+ * @param value The negative value.
+ */
+ function getSparseFillEmptyRowsNegativeIndexErrorMessage(index, value) {
+ return "indices(" + index + ", 0) is invalid: " + value + " < 0";
+ }
+ /**
+ * Generates sparse fill empty rows out of range index error message.
+ *
+ * @param index The index with an out of range value.
+ * @param value The out of range value.
+ * @param limit The upper limit for indices.
+ */
+ function getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(index, value, limit) {
+ return "indices(" + index + ", 0) is invalid: " + value + " >= " + limit;
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generates sparse reshape multiple negative 1 output dimension error message.
+ *
+ * @param dim1 The first dimension with a negative 1 value.
+ * @param dim2 The second dimension with a negative 1 value.
+ */
+ function getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(dim1, dim2) {
+ return "only one output dimension may be -1, not both " + dim1 + " and " + dim2;
+ }
+ /**
+ * Generates sparse reshape negative output dimension error message.
+ *
+ * @param dim The dimension with a negative value.
+ * @param value The negative value.
+ */
+ function getSparseReshapeNegativeOutputDimErrorMessage(dim, value) {
+ return "size " + dim + " must be non-negative, not " + value;
+ }
+ /**
+ * Generates sparse reshape empty tensor zero output dimension error message.
+ *
+ */
+ function getSparseReshapeEmptyTensorZeroOutputDimErrorMessage() {
+ return 'reshape cannot infer the missing input size for an empty tensor ' +
+ 'unless all specified input sizes are non-zero';
+ }
+ /**
+ * Generates sparse reshape input output multiple mismatch error message.
+ *
+ * @param inputShape the input shape.
+ * @param outputShape the requested output shape.
+ */
+ function getSparseReshapeInputOutputMultipleErrorMessage(inputShape, outputShape) {
+ var inputSize = sizeFromShape(inputShape);
+ var outputSize = sizeFromShape(outputShape);
+ return "Input to reshape is a SparseTensor with " + inputSize + "\n dense values, but the requested shape requires a multiple of " + outputSize + ". inputShape=" + inputShape + " outputShape= " + outputShape;
+ }
+ /**
+ * Generates sparse reshape input output inequality error message.
+ *
+ * @param inputShape the input shape.
+ * @param outputShape the requested output shape.
+ */
+ function getSparseReshapeInputOutputMismatchErrorMessage(inputShape, outputShape) {
+ var inputSize = sizeFromShape(inputShape);
+ var outputSize = sizeFromShape(outputShape);
+ return "Input to reshape is a tensor with " + inputSize + " dense values, but the requested shape has " + outputSize + ". inputShape=" + inputShape + " outputShape=" + outputShape;
+ }
+
+ /**
+ * @license
+ * Copyright 2021 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ /**
+ * Generates sparse segment reduction negative segment ids error message.
+ *
+ */
+ function getSparseSegmentReductionNegativeSegmentIdsErrorMessage() {
+ return "segment ids must be >= 0";
+ }
+ /**
+ * Generates sparse segment reduction non increasing segment ids error message.
+ *
+ */
+ function getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage() {
+ return "segment ids are not increasing";
+ }
+ /**
+ * Generates sparse segment reduction segment id out of range error message.
+ *
+ * @param segmentId The segment id index that is out of range.
+ * @param outputRows Upper bound of valid segment id values.
+ */
+ function getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(segmentId, outputRows) {
+ return "Segment id " + segmentId + " out of range [0, " + outputRows + "), possibly because segmentIds input is not sorted.";
+ }
+ /**
+ * Generates sparse segment reduction input indice out of range error message.
+ *
+ * @param index The index that holds the out of range value.
+ * @param indexValue The value that is out of range.
+ * @param inputRows Upper bound of valid index values.
+ */
+ function getSparseSegmentReductionIndicesOutOfRangeErrorMessage(index, indexValue, inputRows) {
+ return "Bad: indices[" + index + "] == " + indexValue + " out of range [0, " + inputRows + ")";
+ }
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function segOpComputeOptimalWindowSize(inSize, numSegments) {
+ var done = false;
+ var res;
+ if (inSize <= PARALLELIZE_THRESHOLD) {
+ res = inSize;
+ done = true;
+ }
+ else {
+ res = nearestDivisor(inSize, Math.floor(Math.sqrt(inSize)));
+ }
+ while (!done) {
+ if (res > numSegments || res === inSize) {
+ done = true;
+ }
+ else {
+ res = nearestDivisor(inSize, res + 1);
+ }
+ }
+ return res;
+ }
+ function computeOutShape(aShape, axis, numSegments) {
+ var outShape = [];
+ var rank = aShape.length;
+ for (var dim = 0; dim < rank; dim++) {
+ if (dim !== axis) {
+ outShape.push(aShape[dim]);
+ }
+ else {
+ outShape.push(numSegments);
+ }
+ }
+ return outShape;
+ }
+ function collectGatherOpShapeInfo(x, indices, axis, batchDims) {
+ var indicesRank = indices.shape.length;
+ var xRank = x.shape.length;
+ if (batchDims !== 0) {
+ if (batchDims < -indicesRank || batchDims > indicesRank) {
+ throw new Error("Expect batchDims in the range of [-" + indicesRank + ", " + indicesRank + "], but got " + batchDims);
+ }
+ }
+ if (batchDims < 0) {
+ batchDims += indicesRank;
+ }
+ if (batchDims > xRank) {
+ throw new Error("batchDims (" + batchDims + ") must be less than rank(x) (\n " + xRank + ").");
+ }
+ if (axis < batchDims) {
+ throw new Error("batchDims (" + batchDims + ") must be less than or equal to axis (" + axis + ").");
+ }
+ for (var i = 0; i < batchDims; ++i) {
+ if (x.shape[i] !== indices.shape[i]) {
+ throw new Error("x.shape[" + i + "]: " + x.shape[i] + " should be equal to indices.shape[" + i + "]: " + indices.shape[i] + ".");
+ }
+ }
+ var dimSize = x.shape[axis];
+ var outputShape = [];
+ var batchSize = 1;
+ var outerSize = 1;
+ var sliceSize = 1;
+ for (var i = 0; i < batchDims; ++i) {
+ outputShape.push(x.shape[i]);
+ batchSize *= x.shape[i];
+ }
+ for (var i = batchDims; i < axis; i++) {
+ outputShape.push(x.shape[i]);
+ outerSize *= x.shape[i];
+ }
+ for (var i = batchDims; i < indicesRank; i++) {
+ outputShape.push(indices.shape[i]);
+ }
+ for (var i = axis + 1; i < xRank; i++) {
+ outputShape.push(x.shape[i]);
+ sliceSize *= x.shape[i];
+ }
+ return { batchSize: batchSize, sliceSize: sliceSize, outerSize: outerSize, dimSize: dimSize, outputShape: outputShape };
+ }
+
+ var segment_util = {
+ __proto__: null,
+ segOpComputeOptimalWindowSize: segOpComputeOptimalWindowSize,
+ computeOutShape: computeOutShape,
+ collectGatherOpShapeInfo: collectGatherOpShapeInfo
+ };
+
+ /**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+ function fromUint8ToStringArray(vals) {
+ try {
+ // Decode the bytes into string.
+ return vals.map(function (val) { return decodeString(val); });
+ }
+ catch (err) {
+ throw new Error("Failed to decode encoded string bytes into utf-8, error: " + err);
+ }
+ }
+ function fromStringArrayToUint8(strings) {
+ return strings.map(function (s) { return encodeString(s); });
+ }
+
+ var backend_util = {
+ __proto__: null,
+ slice_util: slice_util,
+ segment_util: segment_util,
+ fromUint8ToStringArray: fromUint8ToStringArray,
+ fromStringArrayToUint8: fromStringArrayToUint8,
+ upcastType: upcastType,
+ axesAreInnerMostDims: axesAreInnerMostDims,
+ combineLocations: combineLocations,
+ computeOutAndReduceShapes: computeOutAndReduceShapes,
+ expandShapeToKeepDim: expandShapeToKeepDim,
+ assertAxesAreInnerMostDims: assertAxesAreInnerMostDims,
+ getAxesPermutation: getAxesPermutation,
+ getUndoAxesPermutation: getUndoAxesPermutation,
+ getInnerMostAxes: getInnerMostAxes,
+ getBroadcastDims: getBroadcastDims,
+ getReductionAxes: getReductionAxes,
+ assertAndGetBroadcastShape: assertAndGetBroadcastShape,
+ assertParamsConsistent: assertParamsConsistent,
+ computeOutShape: computeOutShape$1,
+ computeDilation2DInfo: computeDilation2DInfo,
+ computePool2DInfo: computePool2DInfo,
+ computePool3DInfo: computePool3DInfo,
+ computeConv2DInfo: computeConv2DInfo,
+ computeConv3DInfo: computeConv3DInfo,
+ computeDefaultPad: computeDefaultPad,
+ tupleValuesAreOne: tupleValuesAreOne,
+ eitherStridesOrDilationsAreOne: eitherStridesOrDilationsAreOne,
+ convertConv2DDataFormat: convertConv2DDataFormat,
+ checkPadOnDimRoundingMode: checkPadOnDimRoundingMode,
+ getFusedDyActivation: getFusedDyActivation,
+ getFusedBiasGradient: getFusedBiasGradient,
+ applyActivation: applyActivation,
+ shouldFuse: shouldFuse,
+ PARALLELIZE_THRESHOLD: PARALLELIZE_THRESHOLD,
+ computeOptimalWindowSize: computeOptimalWindowSize,
+ getImageCenter: getImageCenter,
+ getReshaped: getReshaped,
+ getPermuted: getPermuted,
+ getReshapedPermuted: getReshapedPermuted,
+ getSliceBeginCoords: getSliceBeginCoords,
+ getSliceSize: getSliceSize,
+ prepareAndValidate: prepareAndValidate,
+ validateUpdateShape: validateUpdateShape,
+ validateInput: validateInput$1,
+ calculateShapes: calculateShapes,
+ SELU_SCALEALPHA: SELU_SCALEALPHA,
+ SELU_SCALE: SELU_SCALE,
+ ERF_P: ERF_P,
+ ERF_A1: ERF_A1,
+ ERF_A2: ERF_A2,
+ ERF_A3: ERF_A3,
+ ERF_A4: ERF_A4,
+ ERF_A5: ERF_A5,
+ warn: warn,
+ log: log$1,
+ mergeRealAndImagArrays: mergeRealAndImagArrays,
+ splitRealAndImagArrays: splitRealAndImagArrays,
+ complexWithEvenIndex: complexWithEvenIndex,
+ complexWithOddIndex: complexWithOddIndex,
+ getComplexWithIndex: getComplexWithIndex,
+ assignToTypedArray: assignToTypedArray,
+ exponents: exponents,
+ exponent: exponent,
+ decodeEinsumEquation: decodeEinsumEquation,
+ getEinsumPermutation: getEinsumPermutation,
+ checkEinsumDimSizes: checkEinsumDimSizes,
+ getEinsumComputePath: getEinsumComputePath,
+ isIdentityPermutation: isIdentityPermutation,
+ prepareSplitSize: prepareSplitSize,
+ getSparseFillEmptyRowsIndicesDenseShapeMismatch: getSparseFillEmptyRowsIndicesDenseShapeMismatch,
+ getSparseFillEmptyRowsNegativeIndexErrorMessage: getSparseFillEmptyRowsNegativeIndexErrorMessage,
+ getSparseFillEmptyRowsOutOfRangeIndexErrorMessage: getSparseFillEmptyRowsOutOfRangeIndexErrorMessage,
+ getSparseReshapeMultipleNegativeOneOutputDimErrorMessage: getSparseReshapeMultipleNegativeOneOutputDimErrorMessage,
+ getSparseReshapeNegativeOutputDimErrorMessage: getSparseReshapeNegativeOutputDimErrorMessage,
+ getSparseReshapeEmptyTensorZeroOutputDimErrorMessage: getSparseReshapeEmptyTensorZeroOutputDimErrorMessage,
+ getSparseReshapeInputOutputMultipleErrorMessage: getSparseReshapeInputOutputMultipleErrorMessage,
+ getSparseReshapeInputOutputMismatchErrorMessage: getSparseReshapeInputOutputMismatchErrorMessage,
+ getSparseSegmentReductionNegativeSegmentIdsErrorMessage: getSparseSegmentReductionNegativeSegmentIdsErrorMessage,
+ getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage: getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage,
+ getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage: getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage,
+ getSparseSegmentReductionIndicesOutOfRangeErrorMessage: getSparseSegmentReductionIndicesOutOfRangeErrorMessage
+ };
+
+ /**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+ var kernel_impls = {
+ __proto__: null,
+ nonMaxSuppressionV3Impl: nonMaxSuppressionV3Impl,
+ nonMaxSuppressionV4Impl: nonMaxSuppressionV4Impl,
+ nonMaxSuppressionV5Impl: nonMaxSuppressionV5Impl,
+ whereImpl: whereImpl
+ };
+
+ exports.Abs = Abs;
+ exports.Acos = Acos;
+ exports.Acosh = Acosh;
+ exports.AdadeltaOptimizer = AdadeltaOptimizer;
+ exports.AdagradOptimizer = AdagradOptimizer;
+ exports.AdamOptimizer = AdamOptimizer;
+ exports.AdamaxOptimizer = AdamaxOptimizer;
+ exports.Add = Add;
+ exports.AddN = AddN;
+ exports.All = All;
+ exports.Any = Any;
+ exports.ArgMax = ArgMax;
+ exports.ArgMin = ArgMin;
+ exports.Asin = Asin;
+ exports.Asinh = Asinh;
+ exports.Atan = Atan;
+ exports.Atan2 = Atan2;
+ exports.Atanh = Atanh;
+ exports.AvgPool = AvgPool;
+ exports.AvgPool3D = AvgPool3D;
+ exports.AvgPool3DGrad = AvgPool3DGrad;
+ exports.AvgPoolGrad = AvgPoolGrad;
+ exports.BatchMatMul = BatchMatMul;
+ exports.BatchToSpaceND = BatchToSpaceND;
+ exports.Bincount = Bincount;
+ exports.BroadcastArgs = BroadcastArgs;
+ exports.BroadcastTo = BroadcastTo;
+ exports.Cast = Cast;
+ exports.Ceil = Ceil;
+ exports.ClipByValue = ClipByValue;
+ exports.Complex = Complex;
+ exports.ComplexAbs = ComplexAbs;
+ exports.Concat = Concat;
+ exports.Conv2D = Conv2D;
+ exports.Conv2DBackpropFilter = Conv2DBackpropFilter;
+ exports.Conv2DBackpropInput = Conv2DBackpropInput;
+ exports.Conv3D = Conv3D;
+ exports.Conv3DBackpropFilterV2 = Conv3DBackpropFilterV2;
+ exports.Conv3DBackpropInputV2 = Conv3DBackpropInputV2;
+ exports.Cos = Cos;
+ exports.Cosh = Cosh;
+ exports.CropAndResize = CropAndResize;
+ exports.Cumsum = Cumsum;
+ exports.DataStorage = DataStorage;
+ exports.DenseBincount = DenseBincount;
+ exports.DepthToSpace = DepthToSpace;
+ exports.DepthwiseConv2dNative = DepthwiseConv2dNative;
+ exports.DepthwiseConv2dNativeBackpropFilter = DepthwiseConv2dNativeBackpropFilter;
+ exports.DepthwiseConv2dNativeBackpropInput = DepthwiseConv2dNativeBackpropInput;
+ exports.Diag = Diag;
+ exports.Dilation2D = Dilation2D;
+ exports.Dilation2DBackpropFilter = Dilation2DBackpropFilter;
+ exports.Dilation2DBackpropInput = Dilation2DBackpropInput;
+ exports.Einsum = Einsum;
+ exports.Elu = Elu;
+ exports.EluGrad = EluGrad;
+ exports.Environment = Environment;
+ exports.Equal = Equal;
+ exports.Erf = Erf;
+ exports.Exp = Exp;
+ exports.ExpandDims = ExpandDims;
+ exports.Expm1 = Expm1;
+ exports.FFT = FFT;
+ exports.Fill = Fill;
+ exports.FlipLeftRight = FlipLeftRight;
+ exports.Floor = Floor;
+ exports.FloorDiv = FloorDiv;
+ exports.FromPixels = FromPixels;
+ exports.FusedBatchNorm = FusedBatchNorm;
+ exports.FusedConv2D = FusedConv2D;
+ exports.FusedDepthwiseConv2D = FusedDepthwiseConv2D;
+ exports.GatherNd = GatherNd;
+ exports.GatherV2 = GatherV2;
+ exports.Greater = Greater;
+ exports.GreaterEqual = GreaterEqual;
+ exports.IFFT = IFFT;
+ exports.Identity = Identity;
+ exports.Imag = Imag;
+ exports.IsFinite = IsFinite;
+ exports.IsInf = IsInf;
+ exports.IsNan = IsNan;
+ exports.KernelBackend = KernelBackend;
+ exports.LRN = LRN;
+ exports.LRNGrad = LRNGrad;
+ exports.LeakyRelu = LeakyRelu;
+ exports.Less = Less;
+ exports.LessEqual = LessEqual;
+ exports.LinSpace = LinSpace;
+ exports.Log = Log;
+ exports.Log1p = Log1p;
+ exports.LogSoftmax = LogSoftmax;
+ exports.LogicalAnd = LogicalAnd;
+ exports.LogicalNot = LogicalNot;
+ exports.LogicalOr = LogicalOr;
+ exports.Max = Max;
+ exports.MaxPool = MaxPool;
+ exports.MaxPool3D = MaxPool3D;
+ exports.MaxPool3DGrad = MaxPool3DGrad;
+ exports.MaxPoolGrad = MaxPoolGrad;
+ exports.MaxPoolWithArgmax = MaxPoolWithArgmax;
+ exports.Maximum = Maximum;
+ exports.Mean = Mean;
+ exports.Min = Min;
+ exports.Minimum = Minimum;
+ exports.MirrorPad = MirrorPad;
+ exports.Mod = Mod;
+ exports.MomentumOptimizer = MomentumOptimizer;
+ exports.Multinomial = Multinomial;
+ exports.Multiply = Multiply;
+ exports.Neg = Neg;
+ exports.NonMaxSuppressionV3 = NonMaxSuppressionV3;
+ exports.NonMaxSuppressionV4 = NonMaxSuppressionV4;
+ exports.NonMaxSuppressionV5 = NonMaxSuppressionV5;
+ exports.NotEqual = NotEqual;
+ exports.OP_SCOPE_SUFFIX = OP_SCOPE_SUFFIX;
+ exports.OneHot = OneHot;
+ exports.OnesLike = OnesLike;
+ exports.Optimizer = Optimizer;
+ exports.OptimizerConstructors = OptimizerConstructors;
+ exports.Pack = Pack;
+ exports.PadV2 = PadV2;
+ exports.Pool = Pool;
+ exports.Pow = Pow;
+ exports.Prelu = Prelu;
+ exports.Prod = Prod;
+ exports.RMSPropOptimizer = RMSPropOptimizer;
+ exports.Range = Range;
+ exports.Real = Real;
+ exports.RealDiv = RealDiv;
+ exports.Reciprocal = Reciprocal;
+ exports.Relu = Relu;
+ exports.Relu6 = Relu6;
+ exports.Reshape = Reshape;
+ exports.ResizeBilinear = ResizeBilinear;
+ exports.ResizeBilinearGrad = ResizeBilinearGrad;
+ exports.ResizeNearestNeighbor = ResizeNearestNeighbor;
+ exports.ResizeNearestNeighborGrad = ResizeNearestNeighborGrad;
+ exports.Reverse = Reverse;
+ exports.RotateWithOffset = RotateWithOffset;
+ exports.Round = Round;
+ exports.Rsqrt = Rsqrt;
+ exports.SGDOptimizer = SGDOptimizer;
+ exports.ScatterNd = ScatterNd;
+ exports.Select = Select;
+ exports.Selu = Selu;
+ exports.Sigmoid = Sigmoid;
+ exports.Sign = Sign;
+ exports.Sin = Sin;
+ exports.Sinh = Sinh;
+ exports.Slice = Slice;
+ exports.Softmax = Softmax;
+ exports.Softplus = Softplus;
+ exports.SpaceToBatchND = SpaceToBatchND;
+ exports.SparseFillEmptyRows = SparseFillEmptyRows;
+ exports.SparseReshape = SparseReshape;
+ exports.SparseSegmentMean = SparseSegmentMean;
+ exports.SparseSegmentSum = SparseSegmentSum;
+ exports.SparseToDense = SparseToDense;
+ exports.SplitV = SplitV;
+ exports.Sqrt = Sqrt;
+ exports.Square = Square;
+ exports.SquaredDifference = SquaredDifference;
+ exports.Step = Step;
+ exports.StridedSlice = StridedSlice;
+ exports.StringNGrams = StringNGrams;
+ exports.StringSplit = StringSplit;
+ exports.StringToHashBucketFast = StringToHashBucketFast;
+ exports.Sub = Sub;
+ exports.Sum = Sum;
+ exports.Tan = Tan;
+ exports.Tanh = Tanh;
+ exports.Tensor = Tensor;
+ exports.TensorBuffer = TensorBuffer;
+ exports.Tile = Tile;
+ exports.TopK = TopK;
+ exports.Transform = Transform;
+ exports.Transpose = Transpose;
+ exports.Unique = Unique;
+ exports.Unpack = Unpack;
+ exports.UnsortedSegmentSum = UnsortedSegmentSum;
+ exports.Variable = Variable;
+ exports.ZerosLike = ZerosLike;
+ exports._FusedMatMul = _FusedMatMul;
+ exports.abs = abs;
+ exports.acos = acos;
+ exports.acosh = acosh;
+ exports.add = add;
+ exports.addN = addN;
+ exports.all = all;
+ exports.any = any;
+ exports.argMax = argMax;
+ exports.argMin = argMin;
+ exports.asin = asin;
+ exports.asinh = asinh;
+ exports.atan = atan;
+ exports.atan2 = atan2;
+ exports.atanh = atanh;
+ exports.avgPool = avgPool;
+ exports.avgPool3d = avgPool3d;
+ exports.backend = backend;
+ exports.backend_util = backend_util;
+ exports.basicLSTMCell = basicLSTMCell;
+ exports.batchNorm = batchNorm;
+ exports.batchNorm2d = batchNorm2d;
+ exports.batchNorm3d = batchNorm3d;
+ exports.batchNorm4d = batchNorm4d;
+ exports.batchToSpaceND = batchToSpaceND;
+ exports.bincount = bincount;
+ exports.booleanMaskAsync = booleanMaskAsync;
+ exports.broadcastArgs = broadcastArgs;
+ exports.broadcastTo = broadcastTo;
+ exports.broadcast_util = broadcast_util;
+ exports.browser = browser;
+ exports.buffer = buffer;
+ exports.cast = cast;
+ exports.ceil = ceil;
+ exports.clipByValue = clipByValue;
+ exports.clone = clone;
+ exports.complex = complex;
+ exports.concat = concat;
+ exports.concat1d = concat1d;
+ exports.concat2d = concat2d;
+ exports.concat3d = concat3d;
+ exports.concat4d = concat4d;
+ exports.conv1d = conv1d;
+ exports.conv2d = conv2d$1;
+ exports.conv2dTranspose = conv2dTranspose;
+ exports.conv3d = conv3d;
+ exports.conv3dTranspose = conv3dTranspose;
+ exports.copyRegisteredKernels = copyRegisteredKernels;
+ exports.cos = cos;
+ exports.cosh = cosh;
+ exports.cosineWindow = cosineWindow;
+ exports.cumsum = cumsum;
+ exports.customGrad = customGrad;
+ exports.denseBincount = denseBincount;
+ exports.deprecationWarn = deprecationWarn;
+ exports.depthToSpace = depthToSpace;
+ exports.depthwiseConv2d = depthwiseConv2d$1;
+ exports.device_util = device_util;
+ exports.diag = diag;
+ exports.dilation2d = dilation2d;
+ exports.disableDeprecationWarnings = disableDeprecationWarnings;
+ exports.dispose = dispose;
+ exports.disposeVariables = disposeVariables;
+ exports.div = div;
+ exports.divNoNan = divNoNan;
+ exports.dot = dot;
+ exports.dropout = dropout;
+ exports.einsum = einsum;
+ exports.elu = elu;
+ exports.enableDebugMode = enableDebugMode;
+ exports.enableProdMode = enableProdMode;
+ exports.enclosingPowerOfTwo = enclosingPowerOfTwo;
+ exports.engine = engine;
+ exports.env = env;
+ exports.equal = equal;
+ exports.erf = erf;
+ exports.exp = exp;
+ exports.expandDims = expandDims;
+ exports.expm1 = expm1;
+ exports.eye = eye;
+ exports.fft = fft;
+ exports.fill = fill;
+ exports.findBackend = findBackend;
+ exports.findBackendFactory = findBackendFactory;
+ exports.floor = floor;
+ exports.floorDiv = floorDiv;
+ exports.fused = fused_ops;
+ exports.gather = gather;
+ exports.gatherND = gatherND;
+ exports.gather_util = gather_nd_util;
+ exports.getBackend = getBackend;
+ exports.getGradient = getGradient;
+ exports.getKernel = getKernel;
+ exports.getKernelsForBackend = getKernelsForBackend;
+ exports.grad = grad;
+ exports.grads = grads;
+ exports.greater = greater;
+ exports.greaterEqual = greaterEqual;
+ exports.ifft = ifft;
+ exports.imag = imag;
+ exports.image = image;
+ exports.inTopKAsync = inTopKAsync;
+ exports.io = io;
+ exports.irfft = irfft;
+ exports.isFinite = isFinite$1;
+ exports.isInf = isInf;
+ exports.isNaN = isNaN$1;
+ exports.keep = keep;
+ exports.kernel_impls = kernel_impls;
+ exports.leakyRelu = leakyRelu;
+ exports.less = less;
+ exports.lessEqual = lessEqual;
+ exports.linalg = linalg;
+ exports.linspace = linspace;
+ exports.localResponseNormalization = localResponseNormalization;
+ exports.log = log;
+ exports.log1p = log1p;
+ exports.logSigmoid = logSigmoid;
+ exports.logSoftmax = logSoftmax;
+ exports.logSumExp = logSumExp;
+ exports.logicalAnd = logicalAnd;
+ exports.logicalNot = logicalNot;
+ exports.logicalOr = logicalOr;
+ exports.logicalXor = logicalXor;
+ exports.losses = losses;
+ exports.matMul = matMul$1;
+ exports.math = math;
+ exports.max = max;
+ exports.maxPool = maxPool;
+ exports.maxPool3d = maxPool3d;
+ exports.maxPoolWithArgmax = maxPoolWithArgmax;
+ exports.maximum = maximum;
+ exports.mean = mean;
+ exports.memory = memory;
+ exports.meshgrid = meshgrid;
+ exports.min = min;
+ exports.minimum = minimum;
+ exports.mirrorPad = mirrorPad;
+ exports.mod = mod;
+ exports.moments = moments;
+ exports.movingAverage = movingAverage;
+ exports.mul = mul;
+ exports.multiRNNCell = multiRNNCell;
+ exports.multinomial = multinomial;
+ exports.neg = neg;
+ exports.nextFrame = nextFrame;
+ exports.norm = norm;
+ exports.notEqual = notEqual;
+ exports.oneHot = oneHot;
+ exports.ones = ones;
+ exports.onesLike = onesLike;
+ exports.op = op;
+ exports.outerProduct = outerProduct;
+ exports.pad = pad;
+ exports.pad1d = pad1d;
+ exports.pad2d = pad2d;
+ exports.pad3d = pad3d;
+ exports.pad4d = pad4d;
+ exports.pool = pool;
+ exports.pow = pow;
+ exports.prelu = prelu;
+ exports.print = print;
+ exports.prod = prod;
+ exports.profile = profile;
+ exports.rand = rand;
+ exports.randomGamma = randomGamma;
+ exports.randomNormal = randomNormal;
+ exports.randomUniform = randomUniform;
+ exports.range = range;
+ exports.ready = ready;
+ exports.real = real;
+ exports.reciprocal = reciprocal;
+ exports.registerBackend = registerBackend;
+ exports.registerGradient = registerGradient;
+ exports.registerKernel = registerKernel;
+ exports.relu = relu;
+ exports.relu6 = relu6;
+ exports.removeBackend = removeBackend;
+ exports.reshape = reshape;
+ exports.reverse = reverse;
+ exports.reverse1d = reverse1d;
+ exports.reverse2d = reverse2d;
+ exports.reverse3d = reverse3d;
+ exports.reverse4d = reverse4d;
+ exports.rfft = rfft;
+ exports.round = round;
+ exports.rsqrt = rsqrt;
+ exports.scalar = scalar;
+ exports.scatterND = scatterND;
+ exports.scatter_util = scatter_nd_util;
+ exports.selu = selu;
+ exports.separableConv2d = separableConv2d;
+ exports.serialization = serialization;
+ exports.setBackend = setBackend;
+ exports.setPlatform = setPlatform;
+ exports.setdiff1dAsync = setdiff1dAsync;
+ exports.sigmoid = sigmoid;
+ exports.sign = sign;
+ exports.signal = signal;
+ exports.sin = sin;
+ exports.sinh = sinh;
+ exports.slice = slice;
+ exports.slice1d = slice1d;
+ exports.slice2d = slice2d;
+ exports.slice3d = slice3d;
+ exports.slice4d = slice4d;
+ exports.slice_util = slice_util;
+ exports.softmax = softmax;
+ exports.softplus = softplus;
+ exports.spaceToBatchND = spaceToBatchND;
+ exports.sparse = sparse;
+ exports.sparseToDense = sparseToDense;
+ exports.spectral = spectral;
+ exports.split = split;
+ exports.sqrt = sqrt;
+ exports.square = square;
+ exports.squaredDifference = squaredDifference;
+ exports.squeeze = squeeze;
+ exports.stack = stack;
+ exports.step = step;
+ exports.stridedSlice = stridedSlice;
+ exports.string = string;
+ exports.sub = sub;
+ exports.sum = sum;
+ exports.sumOutType = sumOutType;
+ exports.tan = tan;
+ exports.tanh = tanh;
+ exports.tensor = tensor;
+ exports.tensor1d = tensor1d;
+ exports.tensor2d = tensor2d;
+ exports.tensor3d = tensor3d;
+ exports.tensor4d = tensor4d;
+ exports.tensor5d = tensor5d;
+ exports.tensor6d = tensor6d;
+ exports.tensor_util = tensor_util;
+ exports.test_util = test_util;
+ exports.tidy = tidy;
+ exports.tile = tile;
+ exports.time = time;
+ exports.topk = topk;
+ exports.train = train;
+ exports.transpose = transpose;
+ exports.truncatedNormal = truncatedNormal;
+ exports.unique = unique;
+ exports.unregisterGradient = unregisterGradient;
+ exports.unregisterKernel = unregisterKernel;
+ exports.unsortedSegmentSum = unsortedSegmentSum;
+ exports.unstack = unstack;
+ exports.upcastType = upcastType;
+ exports.util = util;
+ exports.valueAndGrad = valueAndGrad;
+ exports.valueAndGrads = valueAndGrads;
+ exports.variable = variable;
+ exports.variableGrads = variableGrads;
+ exports.version_core = version;
+ exports.where = where;
+ exports.whereAsync = whereAsync;
+ exports.zeros = zeros;
+ exports.zerosLike = zerosLike;
+
+ Object.defineProperty(exports, '__esModule', { value: true });
+
+})));
+//# sourceMappingURL=tf-core.js.map