From 34ef967513e7e29d7ece92aa0ac89f97c43a8bde Mon Sep 17 00:00:00 2001
From: Ryan Mullins
Date: Thu, 11 Apr 2024 11:12:57 -0700
Subject: [PATCH 01/50] Add Python 3.11 to CI matrix.
PiperOrigin-RevId: 623882200
---
.github/workflows/ci.yml | 2 +-
lit_nlp/examples/models/tfx_model_test.py | 3 ++-
pyproject.toml | 9 +++++----
requirements_examples.txt | 8 ++++----
4 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3046befe..236359e5 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -33,7 +33,7 @@ jobs:
strategy:
matrix:
node-version: [18]
- python-version: ["3.10"]
+ python-version: ["3.10", "3.11"]
defaults:
run:
shell: bash -l {0}
diff --git a/lit_nlp/examples/models/tfx_model_test.py b/lit_nlp/examples/models/tfx_model_test.py
index 4b44b2d4..e7b977c9 100644
--- a/lit_nlp/examples/models/tfx_model_test.py
+++ b/lit_nlp/examples/models/tfx_model_test.py
@@ -12,7 +12,8 @@ def setUp(self):
super(TfxModelTest, self).setUp()
self._path = tempfile.mkdtemp()
input_layer = tf.keras.layers.Input(
- shape=(1), dtype=tf.string, name='example')
+ shape=(1,), dtype=tf.string, name='example'
+ )
parsed_input = tf.io.parse_example(
tf.reshape(input_layer, [-1]),
{'input_0': tf.io.FixedLenFeature([1], dtype=tf.float32)})
diff --git a/pyproject.toml b/pyproject.toml
index fb3dea3f..5bed29b2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -79,10 +79,11 @@ keywords = [
[project.optional-dependencies]
# LINT.IfChange
examples = [
- "gunicorn==20.1.0",
- "tensorflow==2.10.0",
- "tensorflow-datasets==4.8.0",
- "tensorflow-text==2.10.0",
+ "gunicorn>=20.1.0",
+ "sentencepiece==0.1.99",
+ "tensorflow>=2.10.0,<2.16.0",
+ "tensorflow-datasets>=4.9.0",
+ "tensorflow-text>=2.10.0,<2.16.0",
"torch>=2.0.0",
"transformers>=4.27.1",
]
diff --git a/requirements_examples.txt b/requirements_examples.txt
index 7778c6d7..3015bfcb 100644
--- a/requirements_examples.txt
+++ b/requirements_examples.txt
@@ -13,11 +13,11 @@
# limitations under the License.
# ==============================================================================
# LINT.IfChange
-gunicorn==20.1.0
+gunicorn>=20.1.0
sentencepiece==0.1.99
-tensorflow==2.10.0
-tensorflow-datasets==4.8.0
-tensorflow-text==2.10.0
+tensorflow>=2.10.0,<2.16.0
+tensorflow-datasets>=4.9.0
+tensorflow-text>=2.10.0,<2.16.0
torch>=2.0.0
transformers>=4.27.1
# LINT.ThenChange(./pyproject.toml)
From 63c21a163407a9b2387a86375473ede1313c4b1e Mon Sep 17 00:00:00 2001
From: Ian Tenney
Date: Mon, 15 Apr 2024 15:47:51 -0700
Subject: [PATCH 02/50] Add link to sequence salience paper
PiperOrigin-RevId: 625108188
---
website/sphinx_src/components.md | 22 ++++++++++++++++++----
website/src/tutorials/sequence-salience.md | 10 ++++++----
2 files changed, 24 insertions(+), 8 deletions(-)
diff --git a/website/sphinx_src/components.md b/website/sphinx_src/components.md
index 7cf577f6..f2366d7c 100644
--- a/website/sphinx_src/components.md
+++ b/website/sphinx_src/components.md
@@ -460,11 +460,27 @@ The UI supports multiple options for analysis, including:
For a walkthrough of how to use sequence salience to debug LLMs, check out the
Responsible Generative AI Toolkit at
-https://ai.google.dev/responsible/model_behavior.
+https://ai.google.dev/responsible/model_behavior and for more on design of the
+system see our paper at https://arxiv.org/abs/2404.07498.
+
+If you find this useful in your work, please cite Sequence Salience as:
+
+```
+@article{tenney2024interactive,
+ title={Interactive Prompt Debugging with Sequence Salience},
+ author={Tenney, Ian and Mullins, Ryan and Du, Bin and Pandya, Shree and Kahng, Minsuk and Dixon, Lucas},
+ journal={arXiv preprint arXiv:2404.07498},
+ year={2024}
+}
+```
**Code:**
-* LIT-for-Gemma Colab: [`lit_gemma.ipynb`](https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/lit_gemma.ipynb)
+Currently, this works out-of-the-box with Gemma, Llama 2, Mistral, and GPT-2,
+using either KerasNLP or Transformers.
+
+* LIT-for-Gemma Colab:
+ [`lit_gemma.ipynb`](https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/lit_gemma.ipynb)
* Demo binary:
[`lm_salience_demo.py`](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/lm_salience_demo.py)
* KerasNLP model wrappers:
@@ -472,8 +488,6 @@ https://ai.google.dev/responsible/model_behavior.
* Transformers model wrappers:
[`pretrained_lms.py`](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/models/pretrained_lms.py)
-Currently, this works out-of-the-box
-with Gemma models (using Keras) as well as with GPT-2.
## Salience Clustering
diff --git a/website/src/tutorials/sequence-salience.md b/website/src/tutorials/sequence-salience.md
index 51fca21e..bb045c1e 100644
--- a/website/src/tutorials/sequence-salience.md
+++ b/website/src/tutorials/sequence-salience.md
@@ -55,10 +55,11 @@ LIT supports additional LLMs, including [Llama 2][llama] and [Mistral][mistral],
via the HuggingFace Transformers and KerasNLP libraries.
This tutorial was adapted from and expands upon LIT's contributions to the
-[Responsible Generative AI Tookit][rai_toolkit] and the related paper and
-[video][seqsal_video] submitted to the ACL 2024 Systems Demonstration track.
-This is an active and ongoing research area for the LIT team, so expect changes
-and further expansions to this tutorial over time.
+[Responsible Generative AI Tookit][rai_toolkit] and the related
+[paper][seqsal_paper] and [video][seqsal_video] submitted to the ACL 2024
+System Demonstrations track. This is an active and ongoing research area for
+the LIT team, so expect changes and further expansions to this tutorial over
+time.
## Case Study 1: Debugging Few-Shot Prompts
@@ -486,6 +487,7 @@ helpful guides that can help you develop better prompts, including:
[salience_research_1]: https://dl.acm.org/doi/full/10.1145/3639372
[salience_research_2]: https://arxiv.org/abs/2402.01761
[seqsal_docs]: ../../documentation/components.html#sequence-salience
+[seqsal_paper]: https://arxiv.org/abs/2404.07498
[seqsal_video]: https://youtu.be/EZgUlnWdh0w
[synapis]: https://scholarspace.manoa.hawaii.edu/items/65312e48-5954-4a5f-a1e8-e5119e6abc0a
[toolformer]: https://arxiv.org/abs/2302.04761
From 9afdf96867af6ecbb5cd825eec09c9fa4c7c7802 Mon Sep 17 00:00:00 2001
From: Ian Tenney
Date: Wed, 17 Apr 2024 01:51:23 +0000
Subject: [PATCH 03/50] Update website with paper links
---
docs/documentation/_sources/components.md.txt | 22 +++++++++++++++----
docs/documentation/components.html | 19 ++++++++++++----
docs/documentation/searchindex.js | 2 +-
docs/tutorials/sequence-salience/index.html | 9 ++++----
4 files changed, 39 insertions(+), 13 deletions(-)
diff --git a/docs/documentation/_sources/components.md.txt b/docs/documentation/_sources/components.md.txt
index 7cf577f6..f2366d7c 100644
--- a/docs/documentation/_sources/components.md.txt
+++ b/docs/documentation/_sources/components.md.txt
@@ -460,11 +460,27 @@ The UI supports multiple options for analysis, including:
For a walkthrough of how to use sequence salience to debug LLMs, check out the
Responsible Generative AI Toolkit at
-https://ai.google.dev/responsible/model_behavior.
+https://ai.google.dev/responsible/model_behavior and for more on design of the
+system see our paper at https://arxiv.org/abs/2404.07498.
+
+If you find this useful in your work, please cite Sequence Salience as:
+
+```
+@article{tenney2024interactive,
+ title={Interactive Prompt Debugging with Sequence Salience},
+ author={Tenney, Ian and Mullins, Ryan and Du, Bin and Pandya, Shree and Kahng, Minsuk and Dixon, Lucas},
+ journal={arXiv preprint arXiv:2404.07498},
+ year={2024}
+}
+```
**Code:**
-* LIT-for-Gemma Colab: [`lit_gemma.ipynb`](https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/lit_gemma.ipynb)
+Currently, this works out-of-the-box with Gemma, Llama 2, Mistral, and GPT-2,
+using either KerasNLP or Transformers.
+
+* LIT-for-Gemma Colab:
+ [`lit_gemma.ipynb`](https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/lit_gemma.ipynb)
* Demo binary:
[`lm_salience_demo.py`](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/lm_salience_demo.py)
* KerasNLP model wrappers:
@@ -472,8 +488,6 @@ https://ai.google.dev/responsible/model_behavior.
* Transformers model wrappers:
[`pretrained_lms.py`](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/models/pretrained_lms.py)
-Currently, this works out-of-the-box
-with Gemma models (using Keras) as well as with GPT-2.
## Salience Clustering
diff --git a/docs/documentation/components.html b/docs/documentation/components.html
index 48454b7e..bfbf62a6 100644
--- a/docs/documentation/components.html
+++ b/docs/documentation/components.html
@@ -637,10 +637,23 @@ Sequence Salience.
+Currently, this works out-of-the-box with Gemma, Llama 2, Mistral, and GPT-2,
+using either KerasNLP or Transformers.
Currently, this works out-of-the-box
-with Gemma models (using Keras) as well as with GPT-2.
Salience Clustering
diff --git a/docs/documentation/searchindex.js b/docs/documentation/searchindex.js
index a50bc241..75ece15f 100644
--- a/docs/documentation/searchindex.js
+++ b/docs/documentation/searchindex.js
@@ -1 +1 @@
-Search.setIndex({"docnames": ["api", "components", "demos", "docker", "faq", "frontend_development", "getting_started", "glossary", "includes/highlight_demos", "index", "ui_guide"], "filenames": ["api.md", "components.md", "demos.md", "docker.md", "faq.md", "frontend_development.md", "getting_started.md", "glossary.md", "includes/highlight_demos.md", "index.md", "ui_guide.md"], "titles": ["LIT Python API", "Components and Features", "Demos", "Running LIT in a Docker container", "Frequently Asked Questions", "Frontend Developer Guide", "Getting Started with LIT", "Glossary", "<no title>", "Learning Interpretability Tool (LIT)", "UI Guide"], "terms": {"i": [0, 1, 3, 5, 6, 7, 10], "modular": 0, "compris": [0, 10], "collect": [0, 4, 5, 10], "backend": [0, 4, 5, 7, 10], "written": [0, 4], "frontend": [0, 4, 7, 9], "modul": [0, 1, 2, 3, 4, 7, 9], "typescript": [0, 5], "most": [0, 1, 4, 5, 6, 7, 10], "user": [0, 1, 3, 4, 5, 6, 9], "develop": [0, 1, 4, 9, 10], "against": [0, 1, 2, 4, 10], "which": [0, 1, 2, 3, 4, 5, 6, 7, 10], "document": [0, 1, 4, 5, 6], "below": [0, 1, 2, 3, 5, 10], "allow": [0, 1, 4, 5, 10], "extend": [0, 5], "counterfactu": [0, 2, 6, 7, 9, 10], "The": [0, 1, 2, 3, 4, 5, 6, 7, 9, 10], "ar": [0, 1, 3, 4, 5, 6, 7, 10], "provid": [0, 1, 3, 4, 5, 6, 7, 10], "librari": [0, 1, 5], "can": [0, 1, 3, 4, 5, 6, 10], "through": [0, 1, 4, 10], "own": [0, 1, 9], "demo": [0, 1, 3, 4, 5, 7, 9], "binari": [0, 2, 6, 7, 10], "via": [0, 1, 4], "colab": [0, 1, 4], "also": [0, 1, 3, 4, 5, 6, 10], "regular": [0, 4, 7], "class": [0, 1, 4, 5, 6, 7, 10], "without": [0, 1, 4, 5, 10], "start": [0, 1, 3, 4, 5, 9], "see": [0, 1, 2, 4, 5, 6, 7, 10], "detail": [0, 1, 3, 4, 5, 6, 9], "serv": [0, 1, 3, 4, 5, 6, 7], "each": [0, 1, 2, 3, 5, 6, 10], "implement": [0, 1, 3, 4, 5, 6, 7], "minim": [0, 1, 3, 7, 10], "reli": [0, 5], "spec": [0, 1, 5], "detect": 0, "field": [0, 1, 4, 10], "verifi": [0, 1, 3, 10], "stateless": 0, "cach": [0, 4, 5, 6], "layer": [0, 1, 2, 4, 10], "predict": [0, 4, 5, 7, 9, 10], "thi": [0, 1, 2, 3, 4, 5, 6, 7, 10], "simplifi": [0, 5], "interact": [0, 1, 2, 4, 5, 7, 9, 10], "larg": [0, 1], "like": [0, 1, 4, 5, 7, 10], "bert": [0, 1, 4, 10], "t5": [0, 1, 4, 10], "state": [0, 4, 7, 10], "singl": [0, 1, 2, 4, 5, 6, 7, 10], "page": [0, 1, 5, 6, 9, 10], "app": [0, 3, 4, 5, 10], "built": [0, 2, 3, 5, 6, 7, 10], "1": [0, 1, 4, 5, 6, 10], "mobx": [0, 5], "manag": [0, 1, 4, 5], "It": [0, 1, 5, 10], "consist": [0, 1, 5, 7, 10], "core": [0, 5], "framework": [0, 4, 5, 6, 7, 9], "set": [0, 1, 2, 3, 4, 5, 6, 7], "share": [0, 4, 5], "servic": [0, 5, 7], "persist": [0, 1, 10], "independ": 0, "render": [0, 1, 5, 6, 7], "visual": [0, 1, 2, 4, 5, 7, 9, 10], "support": [0, 2, 4, 5, 6, 9, 10], "For": [0, 1, 2, 4, 5, 6, 10], "guid": [0, 1, 4, 6, 7, 9], "To": [0, 1, 6, 10], "run": [0, 1, 4, 5, 7, 9, 10], "your": [0, 1, 4, 5, 9, 10], "you": [0, 1, 3, 4, 5, 6, 9, 10], "creat": [0, 1, 3, 4, 5, 7, 10], "py": [0, 1, 2, 3, 4, 5, 6, 7], "script": [0, 3, 4, 6], "pass": [0, 1, 3, 4, 5, 6, 10], "def": [0, 1, 3], "main": [0, 1, 3, 5, 9], "_": 0, "mulitinlidata": 0, "mnli_match": 0, "multinlidata": 0, "path": [0, 1, 3, 4, 5, 6], "dev_match": 0, "tsv": [0, 6], "mnli_mismatch": 0, "dev_mismatch": 0, "nlimodel": 0, "model_foo": 0, "foo": [0, 5, 6], "file": [0, 3, 4, 5, 6], "model_bar": 0, "bar": [0, 1, 6, 7], "lit_demo": [0, 6], "lit_nlp": [0, 3, 5, 6], "dev_serv": [0, 3, 5, 6], "port": [0, 3, 5, 6], "4321": [0, 6], "__name__": 0, "__main__": 0, "conceptu": [0, 5], "just": [0, 4, 5, 6, 10], "list": [0, 1, 3, 4, 6, 10], "function": [0, 1, 2, 3, 10], "take": [0, 1, 4, 5, 6, 10], "return": [0, 1, 3, 5, 10], "metadata": [0, 1, 5], "describ": [0, 1, 3, 5, 6, 10], "themselv": 0, "other": [0, 1, 2, 4, 5, 6, 7, 10], "pre": [0, 1, 2, 4, 5, 6, 10], "out": [0, 1, 2, 5, 6, 9, 10], "http": [0, 1, 2, 3, 5, 6, 7, 9], "github": [0, 1, 2, 3, 4, 6, 9], "com": 0, "pair": [0, 1, 2, 3, 6, 9], "code": [0, 1, 2, 3, 4, 5, 6, 9], "tree": 0, "ensur": [0, 1, 3, 5], "match": [0, 4], "valu": [0, 1, 3, 4, 5, 10], "veri": [0, 1, 2, 5], "help": [0, 1, 3, 4, 10], "dure": [0, 5], "new": [0, 1, 4, 5, 10], "wrapper": [0, 1, 2, 4, 7], "correct": [0, 10], "behavior": [0, 1, 5], "At": [0, 10], "startup": 0, "flag": [0, 2, 3, 4, 5, 6, 10], "enabl": [0, 1, 6, 10], "There": [0, 1, 4, 7, 10], "three": [0, 2, 4, 5, 10], "mode": [0, 1, 2, 5, 10], "first": [0, 4, 5, 10], "sampl": [0, 1, 4], "5": [0, 1, 2, 5, 10], "all": [0, 1, 3, 4, 5, 7, 10], "from": [0, 1, 2, 3, 5, 6, 7, 10], "addition": [0, 1, 10], "call": [0, 1, 3, 5, 10], "directli": [0, 1, 4, 5, 6, 10], "associ": [0, 2, 5, 9, 10], "inform": [0, 4, 5, 10], "follow": [0, 3, 5, 10], "": [0, 1, 2, 3, 4, 5, 10], "should": [0, 1, 3, 4, 5, 6], "flat": 0, "dict": [0, 1], "self": [0, 1, 5], "_exampl": 0, "oper": [0, 4], "load": [0, 2, 5, 6, 10], "includ": [0, 1, 2, 3, 4, 5, 7, 10], "therefor": [0, 3], "care": 0, "size": [0, 1, 2], "fit": [0, 4], "memori": [0, 1, 4], "displai": [0, 1, 5, 10], "browser": [0, 4, 5], "note": [0, 1, 3, 5, 6, 10], "faq": [0, 1, 9], "limit": [0, 4], "subclass": [0, 5], "usual": [0, 1, 7], "few": [0, 1, 4, 5, 6, 7], "line": [0, 1, 3, 6, 10], "complet": [0, 1, 3, 10], "multinli": [0, 2], "loader": [0, 1], "nli_label": 0, "entail": 0, "neutral": 0, "contradict": 0, "__init__": [0, 1], "str": [0, 1, 3], "read": [0, 4, 5], "eval": [0, 2], "distribut": [0, 1], "glue": [0, 1, 3], "benchmark": 0, "df": 0, "panda": 0, "read_csv": 0, "sep": 0, "t": [0, 1, 3, 4, 5, 7, 10], "store": [0, 4, 5, 10], "conform": 0, "premis": [0, 2], "row": [0, 1, 10], "sentence1": 0, "hypothesi": [0, 1, 2], "sentence2": 0, "label": [0, 2, 10], "gold_label": 0, "genr": 0, "iterrow": 0, "lit_typ": [0, 1], "textseg": [0, 1], "categorylabel": [0, 1], "vocab": [0, 1], "we": [0, 1, 3, 4, 5, 6, 7, 10], "addit": [0, 1, 3, 4, 5, 6, 10], "don": [0, 1, 4, 5], "have": [0, 1, 3, 5, 10], "four": 0, "string": [0, 1, 4, 5], "semant": [0, 1], "tell": 0, "bit": [0, 5], "about": [0, 1, 5, 10], "how": [0, 1, 3, 5, 6, 10], "them": [0, 1, 4, 5, 10], "treat": [0, 1, 4], "natur": [0, 2, 9], "languag": [0, 1, 7, 9, 10], "text": [0, 1, 2, 4], "categor": [0, 1, 2, 4, 10], "featur": [0, 2, 4, 5, 6, 7, 9, 10], "fix": [0, 1], "known": [0, 7, 10], "possibl": [0, 1, 5], "unknown": 0, "open": [0, 1, 9], "tensorflow": [0, 2], "simpli": [0, 1, 3], "wrap": [0, 1, 4], "freeli": 0, "add": [0, 1, 3, 4, 5, 10], "abov": [0, 1, 3, 4, 5, 6, 10], "mai": [0, 1, 4, 5, 6, 7, 10], "awar": 0, "recogn": [0, 1], "slice": [0, 1, 2, 4, 7, 9], "bin": 0, "etc": [0, 3, 4], "thei": [0, 1, 4, 5, 10], "similar": [0, 3, 10], "tf": [0, 1], "scope": 0, "aim": [0, 5], "quick": [0, 1, 2, 10], "iter": 0, "step": [0, 3, 5], "end": [0, 3, 10], "same": [0, 1, 5], "datapoint": [0, 1, 4, 7, 9], "n": 0, "seed": 0, "42": 0, "random": [0, 1, 10], "remap": 0, "field_map": 0, "renam": 0, "both": [0, 1, 5], "latter": 0, "shortcut": 0, "one": [0, 1, 3, 4, 5, 6, 7, 10], "anoth": [0, 1, 5, 7, 10], "expect": [0, 4], "2": [0, 1, 5], "produc": [0, 1, 3, 10], "method": [0, 1, 2, 3, 4, 5, 6, 7, 9, 10], "input_spec": [0, 1], "necessari": [0, 3, 5], "output_spec": [0, 1], "ani": [0, 1, 3, 4, 5, 10], "sequenc": [0, 3, 9], "satisfi": 0, "yield": 0, "parallel": 0, "might": [0, 1, 3, 10], "look": [0, 1, 3, 4, 10], "someth": [0, 1, 5, 7], "infer": [0, 1, 2, 4, 5, 6, 10], "model_path": 0, "kw": 0, "so": [0, 1, 3, 4, 10], "re": [0, 4, 5, 7], "readi": 0, "_model": 0, "_load_my_model": 0, "pred": 0, "stream": 0, "convert_dict_input": 0, "d": [0, 3, 10], "preprocess": 0, "predict_exampl": 0, "parent": [0, 1, 5], "keyword": 0, "where": [0, 1, 2, 3, 5, 6, 10], "gold": [0, 1, 2], "when": [0, 1, 4, 5, 7, 10], "comput": [0, 1, 4, 5, 9], "proba": [0, 1], "multiclasspr": [0, 1, 10], "unlik": [0, 1], "incomplet": 0, "ll": [0, 1, 4, 5], "need": [0, 1, 3, 4, 5, 6, 7], "accordingli": [0, 1], "post": [0, 1, 2], "process": [0, 1, 2, 5, 9], "token": [0, 2, 4, 9, 10], "mani": [0, 1, 3, 4, 5, 6, 7, 10], "deep": 0, "learn": [0, 1, 7, 10], "batch": 0, "thu": [0, 5], "batchedmodel": 0, "simpl": [0, 1, 5, 6, 10], "must": [0, 1, 5, 10], "predict_minibatch": 0, "convert": [0, 1], "jsondict": 0, "object": [0, 4], "appropri": [0, 1, 4], "represent": [0, 1, 2], "typic": [0, 1], "map": [0, 1, 2, 3, 5], "align": [0, 1], "tensor": 0, "befor": [0, 3, 4, 6, 10], "want": [0, 1, 3, 4, 5, 6, 9, 10], "overrid": [0, 3, 5], "max_minibatch_s": 0, "determin": [0, 1, 5, 10], "If": [0, 1, 4, 5, 6, 9, 10], "remot": [0, 7], "consid": 0, "batchedremotemodel": 0, "base": [0, 2, 3, 5, 9, 10], "request": [0, 4, 5], "thread": 0, "pool": 0, "defin": [0, 1, 3, 5, 10], "black": [0, 1], "box": [0, 1, 10], "intern": [0, 1, 5], "richer": 0, "view": [0, 1, 4, 10], "correspond": [0, 1], "hidden": [0, 5], "activ": [0, 1, 10], "gradient": [0, 2, 10], "word": [0, 1, 2, 10], "embed": [0, 2, 4, 9], "attent": [0, 2, 9], "sever": [0, 1, 3, 4], "two": [0, 1, 2, 3, 5, 10], "differ": [0, 1, 2, 4, 5, 10], "vector": [0, 1], "easili": [0, 1, 3, 4, 6, 10], "output_emb": 0, "cl": 0, "top": [0, 1, 5], "mean_word_emb": 0, "mean": [0, 1, 10], "than": [0, 1, 4, 5, 10], "segment": [0, 2], "premise_token": 0, "hypothesis_token": 0, "here": [0, 3, 5], "refer": [0, 1, 2, 3, 5, 7, 10], "premise_grad": 0, "tokengradi": [0, 1], "hypothesis_grad": 0, "similarli": [0, 1, 10], "full": [0, 1, 4, 5, 7, 10], "baz": 0, "spam": 0, "egg": 0, "attention_layer0": 0, "attentionhead": [0, 1], "attention_layer1": 0, "attention_layer2": 0, "sinc": [0, 1, 4, 5], "dictionari": [0, 5], "dataclass": 0, "popul": 0, "loop": 0, "would": [0, 1], "entri": [0, 5, 10], "becaus": [0, 1, 3, 5], "often": [0, 1, 7], "tightli": 0, "coupl": [0, 4], "intermedi": 0, "level": [0, 1, 2, 9, 10], "expos": 0, "automat": [0, 1, 5, 10], "projector": [0, 2, 4, 9], "salienc": [0, 2, 6, 7, 9], "multi": [0, 2, 4, 10], "head": [0, 1, 10], "wai": [0, 1, 2, 4, 5, 10], "By": [0, 1, 10], "default": [0, 1, 4, 5, 6, 9, 10], "requir": [0, 1, 3, 5, 10], "howev": [0, 4, 5], "fals": [0, 1, 10], "wish": 0, "accept": [0, 1], "And": [0, 3, 5], "logic": [0, 1, 5], "bypass": 0, "input_token": [0, 1], "ex": 0, "get": [0, 1, 3, 5, 7, 9, 10], "rest": [0, 1], "though": [0, 1, 5, 6, 10], "omit": 0, "entir": [0, 1, 2, 10], "aid": 0, "umap": [0, 1, 2, 7, 10], "plug": [0, 1], "ins": 0, "some": [0, 1, 2, 5, 6, 10], "3": [0, 1, 5, 10], "local": [0, 1, 4], "gradientnorm": 0, "score": [0, 9, 10], "integr": [0, 2, 9], "tokenembed": [0, 1], "well": [0, 1, 2, 4, 5, 6, 10], "target": [0, 2, 10], "pin": [0, 1, 4, 10], "involv": [0, 1, 3, 4, 6], "lit_model": 0, "lit_dataset": 0, "model_output": 0, "none": [0, 1, 5, 10], "config": 0, "runtim": [0, 1, 4], "threshold": [0, 2, 10], "classif": [0, 4, 9, 10], "unconstrain": 0, "up": [0, 1, 4, 5, 10], "correctli": [0, 10], "particular": [0, 1, 5, 10], "while": [0, 1, 3, 4, 5, 10], "aggreg": [0, 9], "summari": 0, "respons": [0, 1, 2, 5], "what": [0, 1, 3, 5, 10], "A": [0, 1, 3, 5, 7, 10], "over": [0, 1, 7, 10], "relev": 0, "find_field": 0, "find": [0, 1, 2, 3, 10], "grad_field": 0, "util": [0, 10], "find_spec_kei": 0, "f": 0, "tokens_field": 0, "pytyp": 0, "disabl": [0, 5], "attribut": [0, 5, 9], "error": [0, 2, 6, 7, 10], "assert": 0, "isinst": 0, "given": [0, 1, 5, 10], "log": [0, 1, 3, 10], "info": [0, 3], "found": [0, 3, 5, 7, 9, 10], "len": 0, "0": [0, 1, 2, 5, 6, 10], "pylint": 0, "g": [0, 1, 3, 4, 7, 10], "explicit": [0, 4, 5], "length": [0, 1, 4, 10], "test": [0, 1, 2, 5], "do": [0, 1, 4, 5], "work": [0, 1, 2, 4, 5], "dtype": [0, 1], "tokensali": 0, "ha": [0, 1, 4, 5, 10], "overhead": 0, "benefit": 0, "flexibl": [0, 1], "specifi": [0, 1, 2, 4, 5], "complex": [0, 1, 5], "depend": [0, 2, 3, 4, 5, 10], "between": [0, 1, 2, 5, 10], "multipl": [0, 2, 3, 5, 10], "simplemetr": 0, "unpack": 0, "is_compat": 0, "name": [0, 1, 2, 3, 4, 5, 7, 10], "regressionmetr": 0, "standard": [0, 1, 4, 10], "regress": [0, 9, 10], "field_spec": 0, "littyp": 0, "bool": 0, "true": [0, 2, 3, 5, 10], "regressionscor": [0, 1], "float": [0, 1], "label_spec": 0, "scalar": [0, 4], "pred_spec": 0, "del": 0, "mse": 0, "sklearn_metr": 0, "mean_squared_error": 0, "pearsonr": 0, "scipy_stat": 0, "spearmanr": 0, "kei": [0, 1], "compar": [0, 1, 2, 4, 9], "These": [0, 1, 3, 4, 5, 10], "onli": [0, 1, 3, 4, 5, 10], "techniqu": [0, 1, 2], "back": [0, 1, 5, 7, 10], "translat": [0, 1, 2, 4, 5, 10], "feedback": [0, 5, 10], "adversari": [0, 1, 2], "attack": [0, 1], "generate_al": 0, "arg": [0, 3, 5], "current": [0, 1, 4, 5, 10], "belong": [0, 1, 10], "conveni": [0, 4, 5, 10], "As": [0, 1, 10], "argument": [0, 3], "substitut": [0, 1], "replac": [0, 1, 10], "foreign": 0, "sourc": [0, 1, 3, 6, 9, 10], "order": [0, 1, 4, 5, 10], "paraphras": [0, 1, 10], "googl": [0, 1, 2], "cloud": [0, 2], "perform": [0, 1, 5, 10], "those": [0, 1, 3, 4, 10], "project": [0, 1, 5, 7, 10], "doc": [0, 3, 5, 10], "setup": [0, 10], "Then": [0, 1, 3, 6], "download": [0, 10], "applic": [0, 1, 7, 9, 10], "credenti": 0, "google_application_credenti": 0, "environ": [0, 3, 4], "variabl": [0, 3, 5], "point": [0, 1, 4, 5, 10], "With": [0, 5, 10], "make": [0, 1, 4, 5, 10], "backtranl": 0, "constructor": [0, 4, 5, 6], "time": [0, 4, 10], "number": [0, 1, 2, 4, 5, 10], "lime": [0, 2, 10], "pivot": 0, "dsl": 0, "auto": 0, "form": 0, "config_spec": 0, "sparsemultilabel": [0, 1], "bg": 0, "de": 0, "el": 0, "en": [0, 2], "e": [0, 1, 3, 4, 7, 10], "fr": 0, "hi": [0, 2, 10], "ru": 0, "sw": 0, "th": 0, "tr": 0, "ur": 0, "vi": 0, "zh": 0, "give": [0, 1, 6, 10], "its": [0, 1, 5, 10], "interpreter_control": 0, "slider": [0, 1, 10], "numer": [0, 1, 2, 4, 10], "min_val": 0, "max_val": 0, "boolean": [0, 5], "booleanlittyp": 0, "checkbox": [0, 1, 10], "dropdown": [0, 10], "seri": 0, "comma": [0, 10], "separ": [0, 1, 3, 4, 5, 10], "pars": [0, 1, 3, 5], "suppli": 0, "singlefieldmatch": 0, "act": [0, 1], "multifieldmatch": 0, "except": [0, 7], "select": [0, 2, 4, 5, 9], "control": [0, 1, 4, 5, 7, 10], "choos": [0, 1], "perturb": [0, 1, 2], "hotflip": [0, 1, 2, 10], "around": [0, 5], "web": [0, 4, 5, 7, 10], "record": 0, "serial": 0, "json": 0, "commun": [0, 1], "client": [0, 7, 9, 10], "introduc": [0, 5, 10], "understand": [0, 1, 2, 10], "shape": [0, 1], "being": [0, 1, 5, 10], "specif": [0, 1, 2, 5, 7, 10], "properti": [0, 5], "whose": 0, "annot": [0, 1], "hierarch": 0, "inherit": [0, 7], "canon": 0, "kind": 0, "v": [0, 10], "meta": 0, "context": 0, "implic": 0, "per": [0, 1, 2, 5, 10], "everi": [0, 1, 5, 10], "init_spec": 0, "instanc": [0, 2, 5, 7, 9], "empti": [0, 3], "noth": 0, "show": [0, 1, 2, 5, 10], "section": [0, 1, 10], "global": [0, 1, 5, 7], "subset": [0, 1, 2, 10], "paramet": [0, 3, 10], "alwai": [0, 1, 3, 7], "meta_spec": 0, "essenti": 0, "ideal": [0, 1], "wherea": 0, "slight": 0, "variat": 0, "tradit": 0, "metricresult": 0, "higher": [0, 1], "lower": [0, 5, 10], "closer": 0, "zero": 0, "better": [0, 1, 10], "encapsul": 0, "impli": 0, "index": [0, 5, 10], "element": [0, 1, 4, 7], "contain": [0, 1, 4, 5, 7, 10], "po": 0, "sequencetag": [0, 1], "item": 0, "indic": [0, 1, 10], "transit": [0, 1], "zip": 0, "strict": 0, "10": [0, 1, 5], "pseudo": 0, "ground": [0, 1, 10], "truth": [0, 1, 10], "repres": [0, 1], "appear": [0, 1, 10], "assum": [0, 3, 10], "pattern": [0, 1, 5], "manipul": [0, 5, 10], "could": [0, 1, 7, 10], "former": [0, 7, 10], "swap": 0, "mask": [0, 1, 2], "feed": [0, 1, 7], "fill": [0, 10], "plai": [0, 6, 9], "critic": 0, "role": 0, "reliabl": 0, "interoper": 0, "robust": [0, 1, 10], "univers": 0, "is_compatible_with_dataset": 0, "wordreplac": 0, "doe": [0, 1, 3, 4, 10], "curv": 0, "gradientdotinput": [0, 1], "shoulddisplaymodul": 0, "suffici": [0, 1], "caus": [0, 5, 10], "jitter": [0, 2, 10], "disappear": 0, "reorder": [0, 5], "resiz": [0, 4, 7], "switch": [0, 1, 2, 10], "heterogen": 0, "findspeckei": 0, "identifi": 0, "interest": [0, 1, 10], "respect": [0, 1, 10], "import": [0, 1, 2, 5, 10], "build": [0, 1, 5, 9], "litmetadata": 0, "litapp": [0, 5], "raw": [0, 1], "jupyt": [0, 6], "repl": 0, "encourag": [0, 1], "explicitli": [0, 1, 5, 10], "avoid": [0, 5], "chase": 0, "red": [0, 10], "her": 0, "mnli": 0, "buffet": 0, "la": 0, "cart": 0, "travel": 0, "task": [0, 1, 2, 5, 6, 10], "notic": 0, "967": 0, "024": 0, "009": 0, "classificationresult": 0, "human": [0, 1], "readabl": 0, "classification_result": 0, "predicted_class": 0, "integ": 0, "predefin": [0, 1], "rang": [0, 1, 2, 10], "overridden": 0, "seen": 0, "summar": [0, 2, 10], "tabl": [0, 1, 4, 5], "bracket": 0, "syntax": 0, "num_token": [0, 1], "numpi": [0, 1], "arrai": [0, 1], "insid": [0, 3, 10], "descript": [0, 3, 5], "untoken": 0, "generatedtext": [0, 1], "seq2seq": 0, "url": [0, 1, 4, 5, 9], "generatedurl": 0, "may": 0, "real": 0, "inappropri": 0, "searchqueri": 0, "search": [0, 1, 10], "queri": [0, 1, 2, 4, 5], "opaqu": 0, "ignor": 0, "referencetext": [0, 1], "mt": 0, "tupl": [0, 1], "generatedtextcandid": [0, 1], "candid": [0, 1, 2, 10], "beam": [0, 1, 2], "tokentopkpr": 0, "32768": 0, "32767": 0, "int": 0, "imagebyt": [0, 1], "imag": [0, 4, 9], "base64": [0, 1], "encod": [0, 1, 4, 10], "jpegbyt": 0, "pngbyte": 0, "referencescor": [0, 1], "vocabulari": 0, "multiclass": [0, 2], "probabl": [0, 1, 2, 4, 10], "num_label": 0, "non": [0, 1, 2, 4, 10], "exclus": [0, 1], "sparsemultilabelpr": [0, 1], "spars": 0, "tag": [0, 1, 3, 5, 10], "spanlabel": [0, 1], "span": [0, 10], "j": [0, 1, 5], "edgelabel": [0, 1], "edg": [0, 2], "structur": [0, 5, 9, 10], "corefer": 0, "srl": [0, 1], "arxiv": 0, "org": [0, 9], "ab": 0, "1905": 0, "06316": 0, "multisegmentannot": [0, 1], "byte": [0, 1], "annotationclust": 0, "emb_dim": [0, 1], "imagegradi": [0, 1], "pixel": [0, 9], "image_height": [0, 1], "image_width": [0, 1], "color_channel": [0, 1], "group": [0, 1, 5, 7, 10], "num_head": [0, 1], "plain": [0, 1, 2], "further": 0, "stringlittyp": 0, "urllittyp": 0, "collis": 0, "protect": 0, "page_titl": 0, "titl": [0, 5, 9], "canonical_url": [0, 10], "shortlink": 0, "copi": [0, 4, 5, 10], "link": [0, 6, 10], "default_layout": 0, "demo_mod": [0, 3, 4], "kiosk": 0, "save": [0, 1, 4, 5, 10], "untrust": 0, "inline_doc": 0, "markdown": 0, "panel": [0, 5, 7, 10], "onboard_start_doc": 0, "onboard": 0, "splash": 0, "screen": [0, 5, 10], "onboard_end_doc": 0, "last": [0, 2, 3], "server_flag": [0, 5], "litcanonicallayout": [0, 5], "lm_layout": 0, "upper": [0, 5, 10], "embeddingsmodul": 0, "datatablemodul": [0, 5], "datapointeditormodul": [0, 5], "languagemodelpredictionmodul": 0, "confusionmatrixmodul": 0, "generatormodul": 0, "lm": 0, "get_flag": 0, "lm_demo": [0, 2, 3, 5], "achiev": [0, 2], "major": [0, 10], "content": [0, 4, 10], "area": [0, 10], "left": [0, 1, 2, 5, 10], "right": [0, 1, 2, 5, 10], "shown": [0, 1, 3, 10], "initi": [0, 1, 10], "set_default": [0, 3, 5], "my_layout_nam": 0, "fly": 0, "param": [0, 4, 5], "preced": 0, "comprehens": 0, "remov": [0, 1, 5, 10], "chang": [0, 1, 3, 5, 10], "experiment": [0, 1, 4, 5, 10], "desir": [0, 2, 5, 10], "altern": [0, 1], "connect": [0, 1, 4], "after": [0, 1, 5, 10], "instal": [0, 3, 9], "pip": [0, 6], "litwidget": [0, 6], "height": [0, 5, 10], "cell": [0, 1, 4, 6, 10], "widget": [0, 4, 7], "tab": [0, 1, 4, 5, 10], "open_in_new_tab": 0, "certain": 0, "stop": 0, "shut": 0, "down": [0, 1, 10], "free": 0, "resourc": 0, "plan": 0, "doesn": [0, 7, 10], "still": [0, 1], "standalon": [0, 4], "handi": [0, 1], "reload": [0, 1, 5], "click": [0, 1, 2, 5, 10], "sst2data": 0, "print": 0, "sentenc": [0, 1, 2, 10], "glue_model": [0, 4], "sst2model": 0, "cls_emb": 0, "Or": 0, "lime_explain": 0, "lit_components_exampl": [0, 6], "ipynb": [0, 1, 6], "happi": 0, "coincid": 0, "tool": [0, 1, 5, 7, 10], "relat": [0, 1, 10], "solv": 0, "case": [0, 1, 2, 3, 4, 5, 6, 10], "question": [0, 1], "qa": 0, "trivial": 0, "practic": [0, 1, 5], "endpoint": [0, 1, 5, 7], "lit": [1, 2, 5, 7, 10], "agnost": 1, "compat": [1, 5, 10], "python": [1, 3, 4, 5, 6, 7, 9, 10], "In": [1, 4, 5, 6, 10], "ve": 1, "tf2": [1, 2], "jax": 1, "pytorch": 1, "us": [1, 2, 3, 5, 7, 9, 10], "custom": [1, 2, 4, 7, 9, 10], "c": [1, 10], "clif": 1, "rpc": 1, "aren": [1, 10], "constraint": [1, 2], "beyond": 1, "impos": 1, "platform": 1, "coexist": 1, "underli": [1, 4], "hardwar": 1, "gpu": [1, 4, 6], "scale": [1, 9], "dataset": [1, 2, 5, 6, 7, 9, 10], "kera": [1, 2, 4], "easi": [1, 5, 10], "access": [1, 4, 5, 6, 9, 10], "interpret": [1, 2, 6, 7, 10], "great": [1, 10], "place": [1, 5, 10], "small": [1, 5, 6], "experi": 1, "academ": 1, "more": [1, 3, 4, 5, 6, 7, 10], "due": 1, "graph": [1, 10], "session": 1, "cannot": 1, "invoc": 1, "weight": [1, 5], "export": [1, 5, 10], "savedmodel": [1, 2], "an": [1, 3, 4, 5, 7, 9, 10], "eager": 1, "lead": [1, 5, 10], "much": [1, 5, 10], "simpler": 1, "servomat": 1, "usag": [1, 4, 9], "remain": 1, "server": [1, 3, 4, 5, 6, 7, 9, 10], "stub": [1, 5], "handl": [1, 2, 4, 5, 7, 10], "format": [1, 2, 4, 5, 10], "convers": 1, "purpos": 1, "interfac": [1, 5], "remote_model": [1, 4], "best": [1, 2, 5], "isn": [1, 10], "situat": 1, "staticpredict": 1, "lookup": [1, 10], "quickli": [1, 10], "brows": 1, "retain": 1, "rich": 1, "almost": 1, "tfrecord": [1, 6], "capacitor": 1, "sstabl": 1, "even": 1, "sql": 1, "api": [1, 3, 4, 5, 6, 7, 9, 10], "our": [1, 3, 4, 5, 6, 9, 10], "tfd": 1, "ui": [1, 5, 6, 7, 9], "workflow": [1, 9], "extens": [1, 5, 9], "system": [1, 3, 5, 7, 9, 10], "modal": [1, 4], "common": [1, 2, 5], "nlp": [1, 3, 6, 9], "domain": 1, "ad": [1, 4, 5, 9, 10], "explor": [1, 2, 5, 7, 10], "side": [1, 2, 10], "avail": [1, 4, 5, 6, 10], "matric": 1, "basic": [1, 9], "sentiment": 1, "simple_tf2_demo": 1, "evalu": [1, 2, 4, 10], "neg": [1, 10], "design": [1, 4, 9], "null_idx": 1, "commonli": [1, 7, 10], "precis": [1, 10], "recal": 1, "f1": 1, "auc": 1, "aucpr": 1, "comment": [1, 3], "toxic": [1, 10], "posit": [1, 2, 10], "margin": [1, 2, 10], "bia": 1, "space": [1, 2, 10], "bucket": [1, 10], "facet": [1, 5, 7, 10], "scatterplot": 1, "st": 1, "b": [1, 10], "textual": 1, "glue_demo": [1, 2, 3, 6], "image_demo": [1, 2, 3], "quit": 1, "matur": [1, 5], "highlight": [1, 2, 10], "diff": [1, 2], "decod": [1, 2, 10], "emit": 1, "varieti": [1, 2, 4, 6], "part": [1, 2, 7, 10], "speech": 1, "entiti": 1, "ner": 1, "mix": 1, "privileg": 1, "otherwis": [1, 5], "kernel": 1, "shap": 1, "valid": [1, 3], "check": [1, 2, 6, 9, 10], "mark": 1, "option": [1, 3, 5], "least": 1, "multilabel": 1, "penguin": [1, 4], "stat": 1, "penguin_demo": [1, 2], "That": 1, "awai": [1, 2], "move": [1, 5], "individu": [1, 2, 4, 5, 10], "color": [1, 5, 10], "break": 1, "exist": [1, 4, 9, 10], "open_imag": 1, "classifi": [1, 2, 10], "comparison": [1, 5, 7, 10], "onc": [1, 10], "io": [1, 2, 3, 6, 7, 9], "html": [1, 2, 5, 7], "navig": [1, 6, 10], "explan": [1, 10], "proport": 1, "l2": [1, 2], "propto": 1, "nabla_": 1, "x_i": 1, "hat": 1, "y": [1, 10], "_2": 1, "result": [1, 2, 5, 6, 10], "choic": 1, "argmax": 1, "product": [1, 4], "cdot": 1, "grad": [1, 2], "direct": [1, 3], "influenc": [1, 10], "suggest": [1, 4, 10], "stronger": 1, "wa": [1, 5, 10], "grad_for": 1, "normal": [1, 5, 10], "contribut": 1, "along": [1, 4, 7, 10], "sundararajan": 1, "et": [1, 2], "al": [1, 2], "2017": 1, "algorithm": 1, "instrument": 1, "modifi": [1, 5, 10], "plu": 1, "grad_target_field_kei": 1, "convent": [1, 5], "interpol": 1, "token_emb": 1, "input_text": 1, "token_grad": 1, "concret": 1, "contact": 1, "team": [1, 2], "assist": 1, "drop": [1, 10], "train": [1, 2, 10], "linear": 1, "reconstruct": 1, "origin": [1, 5, 10], "trade": 1, "off": [1, 5, 10], "slow": [1, 4], "noisi": 1, "longer": [1, 10], "ablat": 1, "compens": 1, "increas": 1, "explain": [1, 3, 10], "movi": [1, 10], "terribl": [1, 10], "ask": [1, 10], "receiv": 1, "okai": 1, "present": 1, "long": [1, 3, 4, 10], "deriv": [1, 5], "impact": 1, "prompt": [1, 2], "causal": 1, "granular": [1, 2], "sub": [1, 10], "paragraph": [1, 2], "refin": 1, "densiti": 1, "shot": 1, "eaxmpl": 1, "chain": 1, "thought": [1, 5], "walkthrough": [1, 2], "debug": [1, 2, 7], "llm": [1, 2], "ai": [1, 2], "toolkit": [1, 2, 5], "dev": [1, 2, 7], "model_behavior": [1, 2], "gemma": 1, "lit_gemma": 1, "lm_salience_demo": [1, 2], "kerasnlp": [1, 2], "instrumented_keras_lm": 1, "pretrained_lm": 1, "gpt": 1, "ebert": 1, "2022": 1, "k": 1, "bag": 1, "appli": 1, "button": [1, 5, 10], "6": [1, 5], "paper": [1, 9], "seek": 1, "column": [1, 10], "pleas": [1, 5, 9], "tutori": [1, 2], "analyz": [1, 10], "actual": [1, 5, 10], "been": [1, 3, 5, 10], "grad_target": 1, "channel": 1, "2d": [1, 10], "abl": [1, 4], "mobilenet": [1, 4], "blur": 1, "xrai": 1, "imagesali": 1, "layer_0": 1, "align_in": 1, "align_out": 1, "layer_1": 1, "layer_2": 1, "target_token": 1, "version": 1, "latent": [1, 2], "pca": [1, 2, 7, 10], "pan": [1, 10], "zoom": 1, "rotat": [1, 10], "shift": [1, 5, 10], "colormap": 1, "menu": [1, 2, 4, 10], "bleu": [1, 10], "whole": [1, 4], "try": 1, "power": [1, 5], "header": [1, 7, 10], "intersect": 1, "drag": [1, 10], "decis": 1, "boundari": 1, "respond": [1, 4, 5], "either": [1, 5, 10], "calcul": [1, 10], "optim": 1, "cost": 1, "ratio": 1, "rel": 1, "penalti": 1, "equal": 1, "costli": 1, "term": [1, 7, 10], "twice": 1, "overal": [1, 5, 10], "fair": [1, 2], "One": [1, 5], "demograph": [1, 2], "pariti": [1, 2], "attempt": 1, "percentag": 1, "accuraci": [1, 10], "opportun": 1, "among": 1, "effect": [1, 5], "minimum": 1, "maximum": [1, 10], "chart": 1, "averag": 1, "chosen": [1, 5], "done": [1, 5, 10], "across": [1, 5, 10], "inspir": 1, "prior": [1, 10], "toolbar": [1, 5, 9], "megaplot": [1, 4], "100k": [1, 4], "mous": 1, "scroll": 1, "reset": [1, 10], "futur": [1, 10], "releas": 1, "contrast": 1, "high": [1, 9, 10], "concept": 1, "gender": 1, "race": [1, 5], "akin": 1, "cav": 1, "intuit": 1, "measur": [1, 10], "sensit": [1, 10], "mention": [1, 2], "rather": [1, 5], "begin": 1, "low": 1, "15": [1, 4, 9], "selector": 1, "actor": 1, "actress": 1, "next": [1, 10], "final": [1, 5], "newli": [1, 4, 10], "split": 1, "second": [1, 5, 10], "9": [1, 5], "blue": [1, 10], "baselin": 1, "technic": 1, "null": [1, 5, 10], "pitfal": 1, "potenti": 1, "meaningless": 1, "randomli": [1, 10], "meaning": 1, "guard": 1, "whether": [1, 5], "possibli": 1, "remaind": 1, "reject": 1, "insignific": 1, "p": [1, 3], "greater": [1, 5], "05": 1, "happen": 1, "warn": 1, "100": [1, 10], "uniqu": 1, "accommod": 1, "cross": [1, 4], "approach": [1, 5], "too": 1, "min": 1, "upcom": 1, "pictur": 1, "insight": 1, "answer": [1, 10], "my": 1, "behav": 1, "under": [1, 3, 6, 10], "delet": [1, 10], "systemat": 1, "editor": 1, "enter": [1, 4], "duplic": [1, 5, 7, 10], "keep": [1, 4, 10], "track": [1, 10], "relationship": 1, "cycl": [1, 10], "scrambl": [1, 10], "regex": [1, 10], "hand": [1, 7, 10], "shelf": 1, "scrambler": [1, 10], "ebrahimi": 1, "tri": 1, "special": 1, "get_embedding_t": 1, "flip": 1, "maintain": [2, 7], "host": [2, 3, 4, 7, 9], "launcher": 2, "type": [2, 5, 6, 9, 10], "publicli": 2, "visibl": [2, 5], "exampl": [2, 3, 5, 7, 9, 10], "analysi": [2, 9], "sst": 2, "input": [2, 4, 5, 9, 10], "huggingfac": 2, "widest": 2, "output": [2, 4, 5, 6, 9], "metric": [2, 6, 7], "plot": [2, 10], "confid": 2, "ones": 2, "cluster": [2, 9, 10], "gener": [2, 4, 6, 7, 9], "tip": [2, 9], "studi": 2, "public": [2, 3], "websit": 2, "xnli_demo": 2, "14": 2, "jp": 2, "fine": 2, "tune": 2, "whitespac": 2, "delimit": 2, "wordpiec": 2, "stsb_dev": 2, "unrel": 2, "wide": 2, "scatter": [2, 10], "filter": [2, 10], "2b": 2, "7b": 2, "dot": 2, "depth": 2, "t5_demo": [2, 3], "hypothes": 2, "veer": 2, "roug": [2, 10], "machin": [2, 3, 4, 7], "cnndm": 2, "wmt": [2, 4], "corpora": 2, "coref_demo": [2, 3], "ontonot": [2, 10], "winogend": [2, 10], "schema": 2, "ruding": 2, "2018": 2, "profess": 2, "stratifi": 2, "quantifi": 2, "pronoun": [2, 10], "bureau": 2, "labor": 2, "statist": 2, "partial": 2, "setter": 2, "imagenet": 2, "deploi": 3, "onto": 3, "face": 3, "container": 3, "prefer": [3, 5], "engin": [3, 5], "dockerfil": 3, "directori": [3, 5, 6], "front": 3, "gunicorn": 3, "invok": [3, 5], "get_wsgi_app": 3, "wsgi": [3, 4, 7], "gunicorn_config": 3, "shell": 3, "command": [3, 6], "rememb": 3, "trail": 3, "know": [3, 10], "now": 3, "rm": 3, "5432": [3, 5], "launch": [3, 10], "demo_nam": 3, "demo_port": 3, "children": 3, "nest": 3, "coref": 3, "is_ev": 3, "is_eval_demo": 3, "Be": 3, "sure": [3, 4, 5], "2345": 3, "bring": 3, "togeth": [3, 10], "combin": 3, "background": [3, 10], "execut": 3, "meet": 3, "locat": 3, "litservertyp": 3, "server_typ": 3, "extern": [3, 4], "unus": 3, "sy": 3, "argv": 3, "known_onli": 3, "rebuild": 3, "your_server_script_path_her": 3, "consum": 3, "absl": 3, "stricli": 3, "recommend": [3, 4, 5, 6], "consol": 3, "suppos": 3, "come": 3, "soon": 3, "submit": 4, "bug": 4, "issu": [4, 5], "modern": 4, "ml": 4, "good": [4, 10], "tabular": [4, 9], "palmer": 4, "unicod": 4, "english": 4, "latin": [4, 5], "modif": 4, "xnli": 4, "lingual": 4, "nli": 4, "multilingu": 4, "comfort": 4, "10k": 4, "speed": 4, "caveat": [4, 5], "lot": 4, "larger": [4, 5], "warm_start": [4, 5, 6], "nativ": 4, "resolut": 4, "smaller": [4, 6], "anywai": 4, "256x256": 4, "thing": [4, 5, 7], "webgl": 4, "scattergl": 4, "dive": [4, 10], "older": [4, 10], "thousand": 4, "preload": 4, "wait": [4, 5, 10], "visit": 4, "bewar": 4, "node": [4, 5], "favorit": 4, "lightweight": 4, "alreadi": [4, 10], "ephemer": 4, "window": [4, 7], "manual": [4, 5, 10], "data_dir": [4, 5], "disk": [4, 5], "abil": 4, "edit": [4, 10], "write": [4, 6], "anyon": 4, "address": [4, 10], "restrict": 4, "configur": [4, 5, 6, 9, 10], "thin": [4, 7], "middlewar": 4, "docker": [4, 9], "data_": 4, "fieldnam": 4, "total": 4, "within": [4, 10], "data0": 4, "data1": 4, "data2": 4, "data0_": 4, "csv": [4, 10], "push": 4, "writabl": 4, "notebook": [4, 9], "ui_stat": 4, "primari": [4, 10], "bulk": 4, "pipelin": [4, 7], "littl": 4, "difficult": 4, "refactor": 4, "primarili": 4, "offici": 4, "facilit": 4, "reus": 4, "fact": 4, "exactli": 4, "quickstart_sst_demo": 4, "gotcha": 5, "central": 5, "piec": [5, 10], "tech": 5, "compon": [5, 7, 9, 10], "observ": 5, "orient": 5, "templat": 5, "declar": 5, "isol": 5, "center": 5, "data": [5, 6, 9], "scalabl": 5, "highli": 5, "fairli": 5, "digest": 5, "heavier": 5, "angular": 5, "roughli": [5, 7], "divid": [5, 10], "itself": 5, "coordin": 5, "footer": 5, "lai": 5, "variou": 5, "go": 5, "later": 5, "bundl": 5, "attach": 5, "dom": 5, "kick": 5, "singleton": [5, 7], "arraign": 5, "minimalist": [5, 10], "bottom": [5, 10], "analyt": [5, 10], "put": [5, 10], "exploratori": [5, 10], "inspect": [5, 10], "v1": [5, 10], "appreci": [5, 10], "custom_layout": 5, "updat": [5, 10], "factor": 5, "model": [5, 7, 9], "modules_servic": 5, "fetch": 5, "static": 5, "helper": [5, 7], "outlin": [5, 10], "dummi": 5, "pig": 5, "customel": 5, "demotextmodul": 5, "selectionserviceindex": 5, "shouldreact": 5, "duplicateformodelcomparison": 5, "4": 5, "privat": 5, "readonli": 5, "colorservic": 5, "getservic": 5, "piglatin": 5, "firstupd": 5, "reactimmedi": 5, "selectionservic": [5, 7], "primaryselectedinputdata": 5, "7": [5, 10], "gettransl": 5, "async": 5, "indexedinput": 5, "const": 5, "promis": 5, "apiservic": 5, "getpiglatin": 5, "8": 5, "await": 5, "loadlatest": 5, "renderimpl": 5, "getdatapointcolor": 5, "div": 5, "stylemap": 5, "checkmodul": 5, "modelspec": 5, "modelsmap": 5, "datasetspec": 5, "11": 5, "12": 5, "htmlelementtagnamemap": 5, "illustr": 5, "definit": 5, "css": 5, "shared_styl": 5, "super": 5, "inject": 5, "easier": 5, "mock": 5, "appstat": 5, "u": [5, 10], "decor": 5, "unit": 5, "reactiv": 5, "mobxlitel": 5, "trigger": 5, "excel": 5, "asynchron": 5, "leverag": 5, "machineri": 5, "lifecycl": 5, "reaction": 5, "whatev": [5, 10], "immedi": 5, "whenev": 5, "react": 5, "won": 5, "condit": 5, "rapidli": 5, "supersed": 5, "recent": 5, "rerend": 5, "seem": 5, "pure": 5, "worth": 5, "sometim": [5, 7], "inadequ": 5, "advanc": 5, "suit": 5, "anim": 5, "imper": 5, "draw": 5, "canva": 5, "fortun": 5, "bridg": 5, "queryselector": 5, "shadow": 5, "root": 5, "mess": 5, "reconcili": 5, "shadowroot": 5, "drawcanva": 5, "faceting_control": 5, "construct": 5, "programmat": 5, "member": 5, "oppos": [5, 10], "destroi": 5, "recreat": 5, "brought": 5, "lose": 5, "previous": [5, 10], "held": 5, "snippet": 5, "examplemodul": 5, "facetingcontrol": 5, "createel": 5, "facetschang": 5, "event": 5, "customev": 5, "contextnam": 5, "addeventlisten": 5, "eventlisten": 5, "clang": 5, "instead": [5, 10], "id": [5, 10], "dosometh": 5, "litel": 5, "reactiveel": 5, "annotated_text_modul": 5, "annotated_text_vi": 5, "On": [5, 10], "regist": 5, "properli": 5, "clean": 5, "leav": 5, "yarn": 5, "termin": 5, "cd": 5, "watch": 5, "m": [5, 6], "example_nam": 5, "localhost": [5, 6], "ctrl": 5, "cmd": 5, "r": 5, "hard": 5, "refresh": 5, "pick": 5, "hot": 5, "subsequ": 5, "conjunct": 5, "stale": 5, "effort": 5, "potato": [5, 7], "theme": 5, "custom_modul": 5, "env": 5, "asset": 5, "client_root": 5, "potato_demo": 5, "parent_dir": 5, "o": 5, "join": 5, "pathlib": 5, "__file__": 5, "absolut": 5, "enum": 5, "litmodulenam": 5, "potato_layout": 5, "classificationmodul": 5, "spud": 5, "tastic": 5, "jump": [6, 9], "short": 6, "video": [6, 10], "rout": 6, "foo_data": 6, "foodataset": 6, "bar_data": 6, "bardataset": 6, "my_model": 6, "mymodel": 6, "wealth": 6, "overview": [6, 9], "lit_sentiment_classifi": 6, "403": 6, "cooki": 6, "site": [6, 9], "outsid": [6, 9], "blaze": 6, "quickstart": 6, "alsologtostderr": 6, "cpu": 6, "overload": 7, "codebas": 7, "folder": 7, "fledg": 7, "job": 7, "fulli": 7, "capit": 7, "accompani": 7, "emoji": 7, "pronounc": 7, "ell": 7, "ey": 7, "tee": 7, "formerli": 7, "polym": 7, "coincident": 7, "cours": 7, "neural": 7, "network": 7, "composit": 7, "strictli": 7, "speak": 7, "litmodul": [7, 9], "noun": 7, "verb": 7, "notabl": 7, "layout": [7, 9], "maxim": 7, "gui": 7, "welcom": 9, "cite": 9, "demonstr": 9, "misc": 9, "tenney2020languag": 9, "author": 9, "ian": 9, "tennei": 9, "jame": 9, "wexler": 9, "jasmijn": 9, "bast": 9, "tolga": 9, "bolukbasi": 9, "andi": 9, "coenen": 9, "sebastian": 9, "gehrmann": 9, "ellen": 9, "jiang": 9, "mahima": 9, "pushkarna": 9, "carei": 9, "radebaugh": 9, "emili": 9, "reif": 9, "ann": 9, "yuan": 9, "booktitl": 9, "proceed": 9, "2020": 9, "confer": 9, "empir": 9, "year": 9, "publish": 9, "linguist": 9, "107": 9, "118": 9, "www": 9, "aclweb": 9, "anthologi": 9, "emnlp": 9, "stand": 9, "alon": 9, "multimod": 9, "journei": 9, "tcav": 9, "architectur": 9, "style": 9, "glossari": 9, "privaci": 9, "secur": 9, "tour": 10, "live": 10, "draggabl": 10, "alloc": 10, "vertic": 10, "horizont": 10, "adopt": 10, "organiz": 10, "scheme": 10, "reflect": 10, "focu": 10, "arrow": 10, "darker": 10, "dialog": 10, "declutt": 10, "someon": 10, "els": 10, "clear": 10, "deselect": 10, "legend": 10, "unpin": 10, "indetermin": 10, "progress": 10, "pend": 10, "fail": 10, "failur": 10, "until": 10, "x": 10, "icon": 10, "messag": 10, "star": 10, "screenshot": 10, "toggl": 10, "dimens": 10, "hover": 10, "lasso": 10, "unselect": 10, "consecut": 10, "light": 10, "sort": 10, "were": 10, "prefix": 10, "hide": 10, "exce": 10, "truncat": 10, "ellipsi": 10, "entireti": 10, "collaps": 10, "film": 10, "again": 10, "unstar": 10, "neighbor": 10, "immut": 10, "simplic": 10, "blank": 10, "creation": 10, "altogeth": 10, "highest": 10, "opposit": 10, "hello": 10, "bad": 10, "exact": 10, "broken": 10, "made": 10, "versu": 10, "ax": 10, "agreement": 10, "disagr": 10, "clickabl": 10, "disagre": 10, "laid": 10, "axi": 10, "methodologi": 10, "capabl": 10, "wise": 10, "autorun": 10, "uncheck": 10, "valuabl": 10, "expens": 10, "attend": 10, "opac": 10, "magnitud": 10, "negat": 10, "stanford": 10, "treebank": 10, "56": 10, "surprisingli": 10, "But": 10, "truli": 10, "review": 10, "ultim": 10, "depress": 10, "era": 10, "gangster": 10, "strongli": 10, "worst": 10, "elicit": 10, "mildli": 10, "incorrect": 10, "occup": 10, "technician": 10, "particip": 10, "male": 10, "domin": 10, "25": 10, "femal": 10, "bl": 10, "agre": 10, "stereotyp": 10, "resolv": 10, "83": 10, "37": 10, "cnn": 10, "dm": 10, "middl": 10, "erron": 10, "constitu": 10, "alastair": 10, "cook": 10, "captain": 10, "dig": 10, "deeper": 10, "28": 10, "arriv": 10, "searcher": 10, "fast": 10, "approxim": 10, "nearest": 10, "corpu": 10, "retriev": 10, "34": 10, "16": 10, "occurr": 10, "strong": 10, "toward": 10, "phrase": 10}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"lit": [0, 3, 4, 6, 9], "python": 0, "api": 0, "design": 0, "overview": [0, 5], "ad": 0, "model": [0, 1, 2, 4, 6, 10], "data": [0, 1, 2, 4, 10], "valid": 0, "dataset": [0, 4], "transform": [0, 1], "more": 0, "output": [0, 1, 10], "option": [0, 10], "input": [0, 1], "interpret": [0, 9], "compon": [0, 1, 4, 6], "metric": [0, 1, 10], "gener": [0, 1, 10], "backtransl": 0, "configur": 0, "ui": [0, 4, 10], "type": [0, 1, 4], "system": 0, "convent": 0, "compat": 0, "check": 0, "an": [0, 6], "In": 0, "depth": 0, "exampl": [0, 1, 4, 6], "avail": 0, "server": 0, "custom": [0, 3, 5, 6], "layout": [0, 5, 10], "access": 0, "notebook": [0, 6], "us": [0, 4, 6], "outsid": [0, 4], "featur": 1, "framework": 1, "support": 1, "huggingfac": 1, "tf1": 1, "x": 1, "estim": 1, "remot": 1, "host": [1, 6], "static": 1, "predict": [1, 2], "load": [1, 4], "classif": [1, 2], "regress": [1, 2], "score": [1, 2], "multi": 1, "label": 1, "seq2seq": [1, 2], "span": 1, "structur": [1, 2], "multipl": 1, "segment": 1, "tabular": [1, 2], "imag": [1, 2, 3], "token": 1, "base": 1, "salienc": [1, 10], "gradient": 1, "norm": 1, "dot": 1, "integr": [1, 3, 4], "lime": 1, "target": 1, "select": [1, 10], "sequenc": [1, 2], "cluster": 1, "attribut": 1, "pixel": 1, "attent": [1, 10], "embed": [1, 10], "projector": [1, 10], "aggreg": 1, "analysi": [1, 10], "confus": [1, 10], "matrix": [1, 10], "scalar": [1, 10], "plot": 1, "binari": 1, "threshold": 1, "partial": 1, "depend": 1, "dive": 1, "tcav": 1, "statist": 1, "signific": 1, "sort": 1, "cosin": 1, "similar": [1, 2], "counterfactu": 1, "manual": 1, "edit": 1, "demo": [2, 6], "do": [2, 6], "NOT": [2, 6], "remov": [2, 6], "section": 2, "header": 2, "sentiment": [2, 10], "nli": 2, "glue": 2, "multilingu": 2, "xnli": 2, "textual": 2, "st": 2, "b": 2, "stsb": 2, "gemma": 2, "t5": 2, "languag": [2, 4], "lm": 2, "bert": 2, "gpt": 2, "2": 2, "gender": [2, 10], "bia": [2, 10], "corefer": [2, 10], "coref": 2, "multimod": 2, "penguin": 2, "mobilenet": 2, "run": [3, 6], "docker": 3, "contain": 3, "basic": 3, "usag": 3, "instanc": 3, "default": 3, "build": 3, "your": [3, 6], "own": 3, "frequent": 4, "ask": 4, "question": 4, "scale": 4, "size": 4, "larg": 4, "privaci": 4, "secur": 4, "i": 4, "have": 4, "proprietari": 4, "my": 4, "team": 4, "workflow": 4, "send": 4, "from": 4, "anoth": 4, "tool": [4, 9], "download": 4, "export": 4, "train": 4, "frontend": 5, "develop": 5, "guid": [5, 10], "high": 5, "level": 5, "applic": 5, "architectur": 5, "bootstrap": 5, "initi": 5, "modul": [5, 10], "litmodul": 5, "setup": 5, "function": 5, "escap": 5, "hatch": 5, "state": 5, "child": 5, "element": 5, "style": 5, "tip": 5, "open": 5, "sourc": 5, "client": 5, "get": 6, "start": 6, "instal": 6, "colab": 6, "stand": 6, "alon": 6, "standalon": 6, "exist": 6, "glossari": 7, "learn": 9, "research": 9, "datapoint": 10, "toolbar": 10, "top": 10, "bar": 10, "global": 10, "set": 10, "url": 10, "share": 10, "main": 10, "statu": 10, "compar": 10, "slice": 10, "detail": 10, "tabl": 10, "editor": 10, "map": 10, "user": 10, "journei": 10, "debug": 10, "text": 10}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 60}, "alltitles": {"LIT Python API": [[0, "lit-python-api"]], "Design Overview": [[0, "design-overview"]], "Adding Models and Data": [[0, "adding-models-and-data"]], "Validating Models and Data": [[0, "validating-models-and-data"]], "Datasets": [[0, "datasets"]], "Transformations": [[0, "transformations"]], "Models": [[0, "models"]], "Adding more outputs": [[0, "adding-more-outputs"]], "Optional inputs": [[0, "optional-inputs"]], "Interpretation Components": [[0, "interpretation-components"]], "Metrics": [[0, "metrics"], [1, "metrics"]], "Generators": [[0, "generators"], [1, "generators"]], "Backtranslator Generator": [[0, "backtranslator-generator"]], "Configuration UI": [[0, "configuration-ui"]], "Type System": [[0, "type-system"]], "Conventions": [[0, "conventions"]], "Compatibility Checks": [[0, "compatibility-checks"]], "An In-Depth Example": [[0, "an-in-depth-example"]], "Available types": [[0, "available-types"]], "Server Configuration": [[0, "server-configuration"]], "Customizing the Layout": [[0, "customizing-the-layout"]], "Accessing the LIT UI in Notebooks": [[0, "accessing-the-lit-ui-in-notebooks"]], "Using LIT components outside of LIT": [[0, "using-lit-components-outside-of-lit"]], "Components and Features": [[1, "components-and-features"]], "Framework and Model Support": [[1, "framework-and-model-support"]], "HuggingFace Transformers": [[1, "huggingface-transformers"]], "TF1.x Estimator": [[1, "tf1-x-estimator"]], "Remote or hosted models": [[1, "remote-or-hosted-models"]], "Static predictions": [[1, "static-predictions"]], "Data loading": [[1, "data-loading"]], "Input and Output Types": [[1, "input-and-output-types"]], "Classification": [[1, "classification"]], "Regression / Scoring": [[1, "regression-scoring"]], "Multi-label classification": [[1, "multi-label-classification"]], "Seq2Seq / Generation": [[1, "seq2seq-generation"]], "Span Labeling and Structured Prediction": [[1, "span-labeling-and-structured-prediction"]], "Multiple input segments": [[1, "multiple-input-segments"]], "Tabular data": [[1, "tabular-data"]], "Images": [[1, "images"]], "Token-based Salience": [[1, "token-based-salience"]], "Gradient Norm": [[1, "gradient-norm"]], "Gradient-dot-Input": [[1, "gradient-dot-input"]], "Integrated Gradients": [[1, "integrated-gradients"]], "LIME": [[1, "lime"]], "Target Selection on Classification Output": [[1, "target-selection-on-classification-output"]], "Sequence Salience": [[1, "sequence-salience"]], "Salience Clustering": [[1, "salience-clustering"]], "Tabular Feature Attribution": [[1, "tabular-feature-attribution"]], "Pixel-based Salience": [[1, "pixel-based-salience"]], "Attention": [[1, "attention"], [10, "attention"]], "Embedding Projector": [[1, "embedding-projector"], [10, "embedding-projector"]], "Aggregate Analysis": [[1, "aggregate-analysis"]], "Confusion Matrix": [[1, "confusion-matrix"], [10, "confusion-matrix"]], "Scalar Plots": [[1, "scalar-plots"]], "Binary Classification Thresholds": [[1, "binary-classification-thresholds"]], "Partial Dependence Plots": [[1, "partial-dependence-plots"]], "Dive": [[1, "dive"]], "TCAV": [[1, "tcav"]], "Example": [[1, "example"]], "Statistical Significance": [[1, "statistical-significance"]], "Sorting by Cosine Similarity": [[1, "sorting-by-cosine-similarity"]], "Counterfactual Analysis": [[1, "counterfactual-analysis"]], "Manual Editing": [[1, "manual-editing"]], "Demos": [[2, "demos"]], "Classification ": [[2, "classification"]], "Sentiment and NLI ": [[2, "sentiment-and-nli"]], "Multilingual (XNLI) ": [[2, "multilingual-xnli"]], "Regression / Scoring ": [[2, "regression-scoring"]], "Textual Similarity (STS-B) ": [[2, "textual-similarity-sts-b"]], "Sequence-to-Sequence ": [[2, "sequence-to-sequence"]], "Gemma ": [[2, "gemma"]], "T5 ": [[2, "t5"]], "Language Modeling ": [[2, "language-modeling"]], "BERT and GPT-2 ": [[2, "bert-and-gpt-2"]], "Structured Prediction ": [[2, "structured-prediction"]], "Gender Bias in Coreference ": [[2, "gender-bias-in-coreference"]], "Multimodal ": [[2, "multimodal"]], "Tabular Data: Penguin Classification ": [[2, "tabular-data-penguin-classification"]], "Image Classification with MobileNet ": [[2, "image-classification-with-mobilenet"]], "Running LIT in a Docker container": [[3, "running-lit-in-a-docker-container"]], "Basic Usage": [[3, "basic-usage"]], "Integrating Custom LIT Instances with the Default Docker Image": [[3, "integrating-custom-lit-instances-with-the-default-docker-image"]], "Building Your Own Image": [[3, "building-your-own-image"]], "Frequently Asked Questions": [[4, "frequently-asked-questions"]], "Model and Data Types": [[4, "model-and-data-types"]], "Languages": [[4, "languages"]], "Scale": [[4, "scale"]], "Dataset Size": [[4, "dataset-size"]], "Large Models": [[4, "large-models"]], "Privacy and Security": [[4, "privacy-and-security"]], "I have proprietary data. Is LIT secure for my team to use?": [[4, "i-have-proprietary-data-is-lit-secure-for-my-team-to-use"]], "Workflow and Integrations": [[4, "workflow-and-integrations"]], "Sending examples from another tool": [[4, "sending-examples-from-another-tool"]], "Downloading or exporting data": [[4, "downloading-or-exporting-data"]], "Loading data from the UI": [[4, "loading-data-from-the-ui"]], "Using components outside the LIT UI": [[4, "using-components-outside-the-lit-ui"]], "Training models with LIT": [[4, "training-models-with-lit"]], "Frontend Developer Guide": [[5, "frontend-developer-guide"]], "High Level Overview": [[5, "high-level-overview"]], "Application Architecture": [[5, "application-architecture"]], "Bootstrapping": [[5, "bootstrapping"]], "Layout": [[5, "layout"]], "Initialization": [[5, "initialization"]], "Modules (LitModule)": [[5, "modules-litmodule"]], "Setup": [[5, "setup"]], "Functionality": [[5, "functionality"]], "Escape Hatches": [[5, "escape-hatches"]], "Stateful Child Elements": [[5, "stateful-child-elements"]], "Style Guide": [[5, "style-guide"]], "Development Tips (open-source)": [[5, "development-tips-open-source"]], "Custom Client / Modules": [[5, "custom-client-modules"]], "Getting Started with LIT": [[6, "getting-started-with-lit"]], "Installation": [[6, "installation"]], "Hosted demos": [[6, "hosted-demos"]], "LIT with your model ": [[6, "lit-with-your-model"]], "Using LIT in notebooks ": [[6, "using-lit-in-notebooks"]], "Stand-alone components ": [[6, "stand-alone-components"]], "Run an existing example ": [[6, "run-an-existing-example"]], "Glossary": [[7, "glossary"]], "Learning Interpretability Tool (LIT)": [[9, "learning-interpretability-tool-lit"]], "Research": [[9, "research"]], "UI Guide": [[10, "ui-guide"]], "General Layout": [[10, "general-layout"]], "Layout Options": [[10, "layout-options"]], "Datapoint Selections": [[10, "datapoint-selections"]], "Toolbars": [[10, "toolbars"]], "Top Bar": [[10, "top-bar"]], "Global Settings": [[10, "global-settings"]], "URL Sharing": [[10, "url-sharing"]], "Main Toolbar": [[10, "main-toolbar"]], "Status Bar": [[10, "status-bar"]], "Comparing Models": [[10, "comparing-models"]], "Comparing Datapoints": [[10, "comparing-datapoints"]], "Slices": [[10, "slices"]], "Module Details": [[10, "module-details"]], "Data Table": [[10, "data-table"]], "Datapoint Editor": [[10, "datapoint-editor"]], "Datapoint Generator": [[10, "datapoint-generator"]], "Metrics Table": [[10, "metrics-table"]], "Scalars": [[10, "scalars"]], "Model Output": [[10, "model-output"]], "Salience Maps": [[10, "salience-maps"]], "User Journeys": [[10, "user-journeys"]], "Sentiment Analysis": [[10, "sentiment-analysis"]], "Gender Bias in Coreference": [[10, "gender-bias-in-coreference"]], "Debugging Text Generation": [[10, "debugging-text-generation"]]}, "indexentries": {}})
\ No newline at end of file
+Search.setIndex({"docnames": ["api", "components", "demos", "docker", "faq", "frontend_development", "getting_started", "glossary", "includes/highlight_demos", "index", "ui_guide"], "filenames": ["api.md", "components.md", "demos.md", "docker.md", "faq.md", "frontend_development.md", "getting_started.md", "glossary.md", "includes/highlight_demos.md", "index.md", "ui_guide.md"], "titles": ["LIT Python API", "Components and Features", "Demos", "Running LIT in a Docker container", "Frequently Asked Questions", "Frontend Developer Guide", "Getting Started with LIT", "Glossary", "<no title>", "Learning Interpretability Tool (LIT)", "UI Guide"], "terms": {"i": [0, 1, 3, 5, 6, 7, 10], "modular": 0, "compris": [0, 10], "collect": [0, 4, 5, 10], "backend": [0, 4, 5, 7, 10], "written": [0, 4], "frontend": [0, 4, 7, 9], "modul": [0, 1, 2, 3, 4, 7, 9], "typescript": [0, 5], "most": [0, 1, 4, 5, 6, 7, 10], "user": [0, 1, 3, 4, 5, 6, 9], "develop": [0, 1, 4, 9, 10], "against": [0, 1, 2, 4, 10], "which": [0, 1, 2, 3, 4, 5, 6, 7, 10], "document": [0, 1, 4, 5, 6], "below": [0, 1, 2, 3, 5, 10], "allow": [0, 1, 4, 5, 10], "extend": [0, 5], "counterfactu": [0, 2, 6, 7, 9, 10], "The": [0, 1, 2, 3, 4, 5, 6, 7, 9, 10], "ar": [0, 1, 3, 4, 5, 6, 7, 10], "provid": [0, 1, 3, 4, 5, 6, 7, 10], "librari": [0, 1, 5], "can": [0, 1, 3, 4, 5, 6, 10], "through": [0, 1, 4, 10], "own": [0, 1, 9], "demo": [0, 1, 3, 4, 5, 7, 9], "binari": [0, 2, 6, 7, 10], "via": [0, 1, 4], "colab": [0, 1, 4], "also": [0, 1, 3, 4, 5, 6, 10], "regular": [0, 4, 7], "class": [0, 1, 4, 5, 6, 7, 10], "without": [0, 1, 4, 5, 10], "start": [0, 1, 3, 4, 5, 9], "see": [0, 1, 2, 4, 5, 6, 7, 10], "detail": [0, 1, 3, 4, 5, 6, 9], "serv": [0, 1, 3, 4, 5, 6, 7], "each": [0, 1, 2, 3, 5, 6, 10], "implement": [0, 1, 3, 4, 5, 6, 7], "minim": [0, 1, 3, 7, 10], "reli": [0, 5], "spec": [0, 1, 5], "detect": 0, "field": [0, 1, 4, 10], "verifi": [0, 1, 3, 10], "stateless": 0, "cach": [0, 4, 5, 6], "layer": [0, 1, 2, 4, 10], "predict": [0, 4, 5, 7, 9, 10], "thi": [0, 1, 2, 3, 4, 5, 6, 7, 10], "simplifi": [0, 5], "interact": [0, 1, 2, 4, 5, 7, 9, 10], "larg": [0, 1], "like": [0, 1, 4, 5, 7, 10], "bert": [0, 1, 4, 10], "t5": [0, 1, 4, 10], "state": [0, 4, 7, 10], "singl": [0, 1, 2, 4, 5, 6, 7, 10], "page": [0, 1, 5, 6, 9, 10], "app": [0, 3, 4, 5, 10], "built": [0, 2, 3, 5, 6, 7, 10], "1": [0, 1, 4, 5, 6, 10], "mobx": [0, 5], "manag": [0, 1, 4, 5], "It": [0, 1, 5, 10], "consist": [0, 1, 5, 7, 10], "core": [0, 5], "framework": [0, 4, 5, 6, 7, 9], "set": [0, 1, 2, 3, 4, 5, 6, 7], "share": [0, 4, 5], "servic": [0, 5, 7], "persist": [0, 1, 10], "independ": 0, "render": [0, 1, 5, 6, 7], "visual": [0, 1, 2, 4, 5, 7, 9, 10], "support": [0, 2, 4, 5, 6, 9, 10], "For": [0, 1, 2, 4, 5, 6, 10], "guid": [0, 1, 4, 6, 7, 9], "To": [0, 1, 6, 10], "run": [0, 1, 4, 5, 7, 9, 10], "your": [0, 1, 4, 5, 9, 10], "you": [0, 1, 3, 4, 5, 6, 9, 10], "creat": [0, 1, 3, 4, 5, 7, 10], "py": [0, 1, 2, 3, 4, 5, 6, 7], "script": [0, 3, 4, 6], "pass": [0, 1, 3, 4, 5, 6, 10], "def": [0, 1, 3], "main": [0, 1, 3, 5, 9], "_": 0, "mulitinlidata": 0, "mnli_match": 0, "multinlidata": 0, "path": [0, 1, 3, 4, 5, 6], "dev_match": 0, "tsv": [0, 6], "mnli_mismatch": 0, "dev_mismatch": 0, "nlimodel": 0, "model_foo": 0, "foo": [0, 5, 6], "file": [0, 3, 4, 5, 6], "model_bar": 0, "bar": [0, 1, 6, 7], "lit_demo": [0, 6], "lit_nlp": [0, 3, 5, 6], "dev_serv": [0, 3, 5, 6], "port": [0, 3, 5, 6], "4321": [0, 6], "__name__": 0, "__main__": 0, "conceptu": [0, 5], "just": [0, 4, 5, 6, 10], "list": [0, 1, 3, 4, 6, 10], "function": [0, 1, 2, 3, 10], "take": [0, 1, 4, 5, 6, 10], "return": [0, 1, 3, 5, 10], "metadata": [0, 1, 5], "describ": [0, 1, 3, 5, 6, 10], "themselv": 0, "other": [0, 1, 2, 4, 5, 6, 7, 10], "pre": [0, 1, 2, 4, 5, 6, 10], "out": [0, 1, 2, 5, 6, 9, 10], "http": [0, 1, 2, 3, 5, 6, 7, 9], "github": [0, 1, 2, 3, 4, 6, 9], "com": 0, "pair": [0, 1, 2, 3, 6, 9], "code": [0, 1, 2, 3, 4, 5, 6, 9], "tree": 0, "ensur": [0, 1, 3, 5], "match": [0, 4], "valu": [0, 1, 3, 4, 5, 10], "veri": [0, 1, 2, 5], "help": [0, 1, 3, 4, 10], "dure": [0, 5], "new": [0, 1, 4, 5, 10], "wrapper": [0, 1, 2, 4, 7], "correct": [0, 10], "behavior": [0, 1, 5], "At": [0, 10], "startup": 0, "flag": [0, 2, 3, 4, 5, 6, 10], "enabl": [0, 1, 6, 10], "There": [0, 1, 4, 7, 10], "three": [0, 2, 4, 5, 10], "mode": [0, 1, 2, 5, 10], "first": [0, 4, 5, 10], "sampl": [0, 1, 4], "5": [0, 1, 2, 5, 10], "all": [0, 1, 3, 4, 5, 7, 10], "from": [0, 1, 2, 3, 5, 6, 7, 10], "addition": [0, 1, 10], "call": [0, 1, 3, 5, 10], "directli": [0, 1, 4, 5, 6, 10], "associ": [0, 2, 5, 9, 10], "inform": [0, 4, 5, 10], "follow": [0, 3, 5, 10], "": [0, 1, 2, 3, 4, 5, 10], "should": [0, 1, 3, 4, 5, 6], "flat": 0, "dict": [0, 1], "self": [0, 1, 5], "_exampl": 0, "oper": [0, 4], "load": [0, 2, 5, 6, 10], "includ": [0, 1, 2, 3, 4, 5, 7, 10], "therefor": [0, 3], "care": 0, "size": [0, 1, 2], "fit": [0, 4], "memori": [0, 1, 4], "displai": [0, 1, 5, 10], "browser": [0, 4, 5], "note": [0, 1, 3, 5, 6, 10], "faq": [0, 1, 9], "limit": [0, 4], "subclass": [0, 5], "usual": [0, 1, 7], "few": [0, 1, 4, 5, 6, 7], "line": [0, 1, 3, 6, 10], "complet": [0, 1, 3, 10], "multinli": [0, 2], "loader": [0, 1], "nli_label": 0, "entail": 0, "neutral": 0, "contradict": 0, "__init__": [0, 1], "str": [0, 1, 3], "read": [0, 4, 5], "eval": [0, 2], "distribut": [0, 1], "glue": [0, 1, 3], "benchmark": 0, "df": 0, "panda": 0, "read_csv": 0, "sep": 0, "t": [0, 1, 3, 4, 5, 7, 10], "store": [0, 4, 5, 10], "conform": 0, "premis": [0, 2], "row": [0, 1, 10], "sentence1": 0, "hypothesi": [0, 1, 2], "sentence2": 0, "label": [0, 2, 10], "gold_label": 0, "genr": 0, "iterrow": 0, "lit_typ": [0, 1], "textseg": [0, 1], "categorylabel": [0, 1], "vocab": [0, 1], "we": [0, 1, 3, 4, 5, 6, 7, 10], "addit": [0, 1, 3, 4, 5, 6, 10], "don": [0, 1, 4, 5], "have": [0, 1, 3, 5, 10], "four": 0, "string": [0, 1, 4, 5], "semant": [0, 1], "tell": 0, "bit": [0, 5], "about": [0, 1, 5, 10], "how": [0, 1, 3, 5, 6, 10], "them": [0, 1, 4, 5, 10], "treat": [0, 1, 4], "natur": [0, 2, 9], "languag": [0, 1, 7, 9, 10], "text": [0, 1, 2, 4], "categor": [0, 1, 2, 4, 10], "featur": [0, 2, 4, 5, 6, 7, 9, 10], "fix": [0, 1], "known": [0, 7, 10], "possibl": [0, 1, 5], "unknown": 0, "open": [0, 1, 9], "tensorflow": [0, 2], "simpli": [0, 1, 3], "wrap": [0, 1, 4], "freeli": 0, "add": [0, 1, 3, 4, 5, 10], "abov": [0, 1, 3, 4, 5, 6, 10], "mai": [0, 1, 4, 5, 6, 7, 10], "awar": 0, "recogn": [0, 1], "slice": [0, 1, 2, 4, 7, 9], "bin": [0, 1], "etc": [0, 3, 4], "thei": [0, 1, 4, 5, 10], "similar": [0, 3, 10], "tf": [0, 1], "scope": 0, "aim": [0, 5], "quick": [0, 1, 2, 10], "iter": 0, "step": [0, 3, 5], "end": [0, 3, 10], "same": [0, 1, 5], "datapoint": [0, 1, 4, 7, 9], "n": 0, "seed": 0, "42": 0, "random": [0, 1, 10], "remap": 0, "field_map": 0, "renam": 0, "both": [0, 1, 5], "latter": 0, "shortcut": 0, "one": [0, 1, 3, 4, 5, 6, 7, 10], "anoth": [0, 1, 5, 7, 10], "expect": [0, 4], "2": [0, 1, 5], "produc": [0, 1, 3, 10], "method": [0, 1, 2, 3, 4, 5, 6, 7, 9, 10], "input_spec": [0, 1], "necessari": [0, 3, 5], "output_spec": [0, 1], "ani": [0, 1, 3, 4, 5, 10], "sequenc": [0, 3, 9], "satisfi": 0, "yield": 0, "parallel": 0, "might": [0, 1, 3, 10], "look": [0, 1, 3, 4, 10], "someth": [0, 1, 5, 7], "infer": [0, 1, 2, 4, 5, 6, 10], "model_path": 0, "kw": 0, "so": [0, 1, 3, 4, 10], "re": [0, 4, 5, 7], "readi": 0, "_model": 0, "_load_my_model": 0, "pred": 0, "stream": 0, "convert_dict_input": 0, "d": [0, 3, 10], "preprocess": 0, "predict_exampl": 0, "parent": [0, 1, 5], "keyword": 0, "where": [0, 1, 2, 3, 5, 6, 10], "gold": [0, 1, 2], "when": [0, 1, 4, 5, 7, 10], "comput": [0, 1, 4, 5, 9], "proba": [0, 1], "multiclasspr": [0, 1, 10], "unlik": [0, 1], "incomplet": 0, "ll": [0, 1, 4, 5], "need": [0, 1, 3, 4, 5, 6, 7], "accordingli": [0, 1], "post": [0, 1, 2], "process": [0, 1, 2, 5, 9], "token": [0, 2, 4, 9, 10], "mani": [0, 1, 3, 4, 5, 6, 7, 10], "deep": 0, "learn": [0, 1, 7, 10], "batch": 0, "thu": [0, 5], "batchedmodel": 0, "simpl": [0, 1, 5, 6, 10], "must": [0, 1, 5, 10], "predict_minibatch": 0, "convert": [0, 1], "jsondict": 0, "object": [0, 4], "appropri": [0, 1, 4], "represent": [0, 1, 2], "typic": [0, 1], "map": [0, 1, 2, 3, 5], "align": [0, 1], "tensor": 0, "befor": [0, 3, 4, 6, 10], "want": [0, 1, 3, 4, 5, 6, 9, 10], "overrid": [0, 3, 5], "max_minibatch_s": 0, "determin": [0, 1, 5, 10], "If": [0, 1, 4, 5, 6, 9, 10], "remot": [0, 7], "consid": 0, "batchedremotemodel": 0, "base": [0, 2, 3, 5, 9, 10], "request": [0, 4, 5], "thread": 0, "pool": 0, "defin": [0, 1, 3, 5, 10], "black": [0, 1], "box": [0, 1, 10], "intern": [0, 1, 5], "richer": 0, "view": [0, 1, 4, 10], "correspond": [0, 1], "hidden": [0, 5], "activ": [0, 1, 10], "gradient": [0, 2, 10], "word": [0, 1, 2, 10], "embed": [0, 2, 4, 9], "attent": [0, 2, 9], "sever": [0, 1, 3, 4], "two": [0, 1, 2, 3, 5, 10], "differ": [0, 1, 2, 4, 5, 10], "vector": [0, 1], "easili": [0, 1, 3, 4, 6, 10], "output_emb": 0, "cl": 0, "top": [0, 1, 5], "mean_word_emb": 0, "mean": [0, 1, 10], "than": [0, 1, 4, 5, 10], "segment": [0, 2], "premise_token": 0, "hypothesis_token": 0, "here": [0, 3, 5], "refer": [0, 1, 2, 3, 5, 7, 10], "premise_grad": 0, "tokengradi": [0, 1], "hypothesis_grad": 0, "similarli": [0, 1, 10], "full": [0, 1, 4, 5, 7, 10], "baz": 0, "spam": 0, "egg": 0, "attention_layer0": 0, "attentionhead": [0, 1], "attention_layer1": 0, "attention_layer2": 0, "sinc": [0, 1, 4, 5], "dictionari": [0, 5], "dataclass": 0, "popul": 0, "loop": 0, "would": [0, 1], "entri": [0, 5, 10], "becaus": [0, 1, 3, 5], "often": [0, 1, 7], "tightli": 0, "coupl": [0, 4], "intermedi": 0, "level": [0, 1, 2, 9, 10], "expos": 0, "automat": [0, 1, 5, 10], "projector": [0, 2, 4, 9], "salienc": [0, 2, 6, 7, 9], "multi": [0, 2, 4, 10], "head": [0, 1, 10], "wai": [0, 1, 2, 4, 5, 10], "By": [0, 1, 10], "default": [0, 1, 4, 5, 6, 9, 10], "requir": [0, 1, 3, 5, 10], "howev": [0, 4, 5], "fals": [0, 1, 10], "wish": 0, "accept": [0, 1], "And": [0, 3, 5], "logic": [0, 1, 5], "bypass": 0, "input_token": [0, 1], "ex": 0, "get": [0, 1, 3, 5, 7, 9, 10], "rest": [0, 1], "though": [0, 1, 5, 6, 10], "omit": 0, "entir": [0, 1, 2, 10], "aid": 0, "umap": [0, 1, 2, 7, 10], "plug": [0, 1], "ins": 0, "some": [0, 1, 2, 5, 6, 10], "3": [0, 1, 5, 10], "local": [0, 1, 4], "gradientnorm": 0, "score": [0, 9, 10], "integr": [0, 2, 9], "tokenembed": [0, 1], "well": [0, 1, 2, 4, 5, 6, 10], "target": [0, 2, 10], "pin": [0, 1, 4, 10], "involv": [0, 1, 3, 4, 6], "lit_model": 0, "lit_dataset": 0, "model_output": 0, "none": [0, 1, 5, 10], "config": 0, "runtim": [0, 1, 4], "threshold": [0, 2, 10], "classif": [0, 4, 9, 10], "unconstrain": 0, "up": [0, 1, 4, 5, 10], "correctli": [0, 10], "particular": [0, 1, 5, 10], "while": [0, 1, 3, 4, 5, 10], "aggreg": [0, 9], "summari": 0, "respons": [0, 1, 2, 5], "what": [0, 1, 3, 5, 10], "A": [0, 1, 3, 5, 7, 10], "over": [0, 1, 7, 10], "relev": 0, "find_field": 0, "find": [0, 1, 2, 3, 10], "grad_field": 0, "util": [0, 10], "find_spec_kei": 0, "f": 0, "tokens_field": 0, "pytyp": 0, "disabl": [0, 5], "attribut": [0, 5, 9], "error": [0, 2, 6, 7, 10], "assert": 0, "isinst": 0, "given": [0, 1, 5, 10], "log": [0, 1, 3, 10], "info": [0, 3], "found": [0, 3, 5, 7, 9, 10], "len": 0, "0": [0, 1, 2, 5, 6, 10], "pylint": 0, "g": [0, 1, 3, 4, 7, 10], "explicit": [0, 4, 5], "length": [0, 1, 4, 10], "test": [0, 1, 2, 5], "do": [0, 1, 4, 5], "work": [0, 1, 2, 4, 5], "dtype": [0, 1], "tokensali": 0, "ha": [0, 1, 4, 5, 10], "overhead": 0, "benefit": 0, "flexibl": [0, 1], "specifi": [0, 1, 2, 4, 5], "complex": [0, 1, 5], "depend": [0, 2, 3, 4, 5, 10], "between": [0, 1, 2, 5, 10], "multipl": [0, 2, 3, 5, 10], "simplemetr": 0, "unpack": 0, "is_compat": 0, "name": [0, 1, 2, 3, 4, 5, 7, 10], "regressionmetr": 0, "standard": [0, 1, 4, 10], "regress": [0, 9, 10], "field_spec": 0, "littyp": 0, "bool": 0, "true": [0, 2, 3, 5, 10], "regressionscor": [0, 1], "float": [0, 1], "label_spec": 0, "scalar": [0, 4], "pred_spec": 0, "del": 0, "mse": 0, "sklearn_metr": 0, "mean_squared_error": 0, "pearsonr": 0, "scipy_stat": 0, "spearmanr": 0, "kei": [0, 1], "compar": [0, 1, 2, 4, 9], "These": [0, 1, 3, 4, 5, 10], "onli": [0, 1, 3, 4, 5, 10], "techniqu": [0, 1, 2], "back": [0, 1, 5, 7, 10], "translat": [0, 1, 2, 4, 5, 10], "feedback": [0, 5, 10], "adversari": [0, 1, 2], "attack": [0, 1], "generate_al": 0, "arg": [0, 3, 5], "current": [0, 1, 4, 5, 10], "belong": [0, 1, 10], "conveni": [0, 4, 5, 10], "As": [0, 1, 10], "argument": [0, 3], "substitut": [0, 1], "replac": [0, 1, 10], "foreign": 0, "sourc": [0, 1, 3, 6, 9, 10], "order": [0, 1, 4, 5, 10], "paraphras": [0, 1, 10], "googl": [0, 1, 2], "cloud": [0, 2], "perform": [0, 1, 5, 10], "those": [0, 1, 3, 4, 10], "project": [0, 1, 5, 7, 10], "doc": [0, 3, 5, 10], "setup": [0, 10], "Then": [0, 1, 3, 6], "download": [0, 10], "applic": [0, 1, 7, 9, 10], "credenti": 0, "google_application_credenti": 0, "environ": [0, 3, 4], "variabl": [0, 3, 5], "point": [0, 1, 4, 5, 10], "With": [0, 5, 10], "make": [0, 1, 4, 5, 10], "backtranl": 0, "constructor": [0, 4, 5, 6], "time": [0, 4, 10], "number": [0, 1, 2, 4, 5, 10], "lime": [0, 2, 10], "pivot": 0, "dsl": 0, "auto": 0, "form": 0, "config_spec": 0, "sparsemultilabel": [0, 1], "bg": 0, "de": 0, "el": 0, "en": [0, 2], "e": [0, 1, 3, 4, 7, 10], "fr": 0, "hi": [0, 2, 10], "ru": 0, "sw": 0, "th": 0, "tr": 0, "ur": 0, "vi": 0, "zh": 0, "give": [0, 1, 6, 10], "its": [0, 1, 5, 10], "interpreter_control": 0, "slider": [0, 1, 10], "numer": [0, 1, 2, 4, 10], "min_val": 0, "max_val": 0, "boolean": [0, 5], "booleanlittyp": 0, "checkbox": [0, 1, 10], "dropdown": [0, 10], "seri": 0, "comma": [0, 10], "separ": [0, 1, 3, 4, 5, 10], "pars": [0, 1, 3, 5], "suppli": 0, "singlefieldmatch": 0, "act": [0, 1], "multifieldmatch": 0, "except": [0, 7], "select": [0, 2, 4, 5, 9], "control": [0, 1, 4, 5, 7, 10], "choos": [0, 1], "perturb": [0, 1, 2], "hotflip": [0, 1, 2, 10], "around": [0, 5], "web": [0, 4, 5, 7, 10], "record": 0, "serial": 0, "json": 0, "commun": [0, 1], "client": [0, 7, 9, 10], "introduc": [0, 5, 10], "understand": [0, 1, 2, 10], "shape": [0, 1], "being": [0, 1, 5, 10], "specif": [0, 1, 2, 5, 7, 10], "properti": [0, 5], "whose": 0, "annot": [0, 1], "hierarch": 0, "inherit": [0, 7], "canon": 0, "kind": 0, "v": [0, 10], "meta": 0, "context": 0, "implic": 0, "per": [0, 1, 2, 5, 10], "everi": [0, 1, 5, 10], "init_spec": 0, "instanc": [0, 2, 5, 7, 9], "empti": [0, 3], "noth": 0, "show": [0, 1, 2, 5, 10], "section": [0, 1, 10], "global": [0, 1, 5, 7], "subset": [0, 1, 2, 10], "paramet": [0, 3, 10], "alwai": [0, 1, 3, 7], "meta_spec": 0, "essenti": 0, "ideal": [0, 1], "wherea": 0, "slight": 0, "variat": 0, "tradit": 0, "metricresult": 0, "higher": [0, 1], "lower": [0, 5, 10], "closer": 0, "zero": 0, "better": [0, 1, 10], "encapsul": 0, "impli": 0, "index": [0, 5, 10], "element": [0, 1, 4, 7], "contain": [0, 1, 4, 5, 7, 10], "po": 0, "sequencetag": [0, 1], "item": 0, "indic": [0, 1, 10], "transit": [0, 1], "zip": 0, "strict": 0, "10": [0, 1, 5], "pseudo": 0, "ground": [0, 1, 10], "truth": [0, 1, 10], "repres": [0, 1], "appear": [0, 1, 10], "assum": [0, 3, 10], "pattern": [0, 1, 5], "manipul": [0, 5, 10], "could": [0, 1, 7, 10], "former": [0, 7, 10], "swap": 0, "mask": [0, 1, 2], "feed": [0, 1, 7], "fill": [0, 10], "plai": [0, 6, 9], "critic": 0, "role": 0, "reliabl": 0, "interoper": 0, "robust": [0, 1, 10], "univers": 0, "is_compatible_with_dataset": 0, "wordreplac": 0, "doe": [0, 1, 3, 4, 10], "curv": 0, "gradientdotinput": [0, 1], "shoulddisplaymodul": 0, "suffici": [0, 1], "caus": [0, 5, 10], "jitter": [0, 2, 10], "disappear": 0, "reorder": [0, 5], "resiz": [0, 4, 7], "switch": [0, 1, 2, 10], "heterogen": 0, "findspeckei": 0, "identifi": 0, "interest": [0, 1, 10], "respect": [0, 1, 10], "import": [0, 1, 2, 5, 10], "build": [0, 1, 5, 9], "litmetadata": 0, "litapp": [0, 5], "raw": [0, 1], "jupyt": [0, 6], "repl": 0, "encourag": [0, 1], "explicitli": [0, 1, 5, 10], "avoid": [0, 5], "chase": 0, "red": [0, 10], "her": 0, "mnli": 0, "buffet": 0, "la": 0, "cart": 0, "travel": 0, "task": [0, 1, 2, 5, 6, 10], "notic": 0, "967": 0, "024": 0, "009": 0, "classificationresult": 0, "human": [0, 1], "readabl": 0, "classification_result": 0, "predicted_class": 0, "integ": 0, "predefin": [0, 1], "rang": [0, 1, 2, 10], "overridden": 0, "seen": 0, "summar": [0, 2, 10], "tabl": [0, 1, 4, 5], "bracket": 0, "syntax": 0, "num_token": [0, 1], "numpi": [0, 1], "arrai": [0, 1], "insid": [0, 3, 10], "descript": [0, 3, 5], "untoken": 0, "generatedtext": [0, 1], "seq2seq": 0, "url": [0, 1, 4, 5, 9], "generatedurl": 0, "may": 0, "real": 0, "inappropri": 0, "searchqueri": 0, "search": [0, 1, 10], "queri": [0, 1, 2, 4, 5], "opaqu": 0, "ignor": 0, "referencetext": [0, 1], "mt": 0, "tupl": [0, 1], "generatedtextcandid": [0, 1], "candid": [0, 1, 2, 10], "beam": [0, 1, 2], "tokentopkpr": 0, "32768": 0, "32767": 0, "int": 0, "imagebyt": [0, 1], "imag": [0, 4, 9], "base64": [0, 1], "encod": [0, 1, 4, 10], "jpegbyt": 0, "pngbyte": 0, "referencescor": [0, 1], "vocabulari": 0, "multiclass": [0, 2], "probabl": [0, 1, 2, 4, 10], "num_label": 0, "non": [0, 1, 2, 4, 10], "exclus": [0, 1], "sparsemultilabelpr": [0, 1], "spars": 0, "tag": [0, 1, 3, 5, 10], "spanlabel": [0, 1], "span": [0, 10], "j": [0, 1, 5], "edgelabel": [0, 1], "edg": [0, 2], "structur": [0, 5, 9, 10], "corefer": 0, "srl": [0, 1], "arxiv": [0, 1], "org": [0, 1, 9], "ab": [0, 1], "1905": 0, "06316": 0, "multisegmentannot": [0, 1], "byte": [0, 1], "annotationclust": 0, "emb_dim": [0, 1], "imagegradi": [0, 1], "pixel": [0, 9], "image_height": [0, 1], "image_width": [0, 1], "color_channel": [0, 1], "group": [0, 1, 5, 7, 10], "num_head": [0, 1], "plain": [0, 1, 2], "further": 0, "stringlittyp": 0, "urllittyp": 0, "collis": 0, "protect": 0, "page_titl": 0, "titl": [0, 1, 5, 9], "canonical_url": [0, 10], "shortlink": 0, "copi": [0, 4, 5, 10], "link": [0, 6, 10], "default_layout": 0, "demo_mod": [0, 3, 4], "kiosk": 0, "save": [0, 1, 4, 5, 10], "untrust": 0, "inline_doc": 0, "markdown": 0, "panel": [0, 5, 7, 10], "onboard_start_doc": 0, "onboard": 0, "splash": 0, "screen": [0, 5, 10], "onboard_end_doc": 0, "last": [0, 2, 3], "server_flag": [0, 5], "litcanonicallayout": [0, 5], "lm_layout": 0, "upper": [0, 5, 10], "embeddingsmodul": 0, "datatablemodul": [0, 5], "datapointeditormodul": [0, 5], "languagemodelpredictionmodul": 0, "confusionmatrixmodul": 0, "generatormodul": 0, "lm": 0, "get_flag": 0, "lm_demo": [0, 2, 3, 5], "achiev": [0, 2], "major": [0, 10], "content": [0, 4, 10], "area": [0, 10], "left": [0, 1, 2, 5, 10], "right": [0, 1, 2, 5, 10], "shown": [0, 1, 3, 10], "initi": [0, 1, 10], "set_default": [0, 3, 5], "my_layout_nam": 0, "fly": 0, "param": [0, 4, 5], "preced": 0, "comprehens": 0, "remov": [0, 1, 5, 10], "chang": [0, 1, 3, 5, 10], "experiment": [0, 1, 4, 5, 10], "desir": [0, 2, 5, 10], "altern": [0, 1], "connect": [0, 1, 4], "after": [0, 1, 5, 10], "instal": [0, 3, 9], "pip": [0, 6], "litwidget": [0, 6], "height": [0, 5, 10], "cell": [0, 1, 4, 6, 10], "widget": [0, 4, 7], "tab": [0, 1, 4, 5, 10], "open_in_new_tab": 0, "certain": 0, "stop": 0, "shut": 0, "down": [0, 1, 10], "free": 0, "resourc": 0, "plan": 0, "doesn": [0, 7, 10], "still": [0, 1], "standalon": [0, 4], "handi": [0, 1], "reload": [0, 1, 5], "click": [0, 1, 2, 5, 10], "sst2data": 0, "print": 0, "sentenc": [0, 1, 2, 10], "glue_model": [0, 4], "sst2model": 0, "cls_emb": 0, "Or": 0, "lime_explain": 0, "lit_components_exampl": [0, 6], "ipynb": [0, 1, 6], "happi": 0, "coincid": 0, "tool": [0, 1, 5, 7, 10], "relat": [0, 1, 10], "solv": 0, "case": [0, 1, 2, 3, 4, 5, 6, 10], "question": [0, 1], "qa": 0, "trivial": 0, "practic": [0, 1, 5], "endpoint": [0, 1, 5, 7], "lit": [1, 2, 5, 7, 10], "agnost": 1, "compat": [1, 5, 10], "python": [1, 3, 4, 5, 6, 7, 9, 10], "In": [1, 4, 5, 6, 10], "ve": 1, "tf2": [1, 2], "jax": 1, "pytorch": 1, "us": [1, 2, 3, 5, 7, 9, 10], "custom": [1, 2, 4, 7, 9, 10], "c": [1, 10], "clif": 1, "rpc": 1, "aren": [1, 10], "constraint": [1, 2], "beyond": 1, "impos": 1, "platform": 1, "coexist": 1, "underli": [1, 4], "hardwar": 1, "gpu": [1, 4, 6], "scale": [1, 9], "dataset": [1, 2, 5, 6, 7, 9, 10], "kera": [1, 2, 4], "easi": [1, 5, 10], "access": [1, 4, 5, 6, 9, 10], "interpret": [1, 2, 6, 7, 10], "great": [1, 10], "place": [1, 5, 10], "small": [1, 5, 6], "experi": 1, "academ": 1, "more": [1, 3, 4, 5, 6, 7, 10], "due": 1, "graph": [1, 10], "session": 1, "cannot": 1, "invoc": 1, "weight": [1, 5], "export": [1, 5, 10], "savedmodel": [1, 2], "an": [1, 3, 4, 5, 7, 9, 10], "eager": 1, "lead": [1, 5, 10], "much": [1, 5, 10], "simpler": 1, "servomat": 1, "usag": [1, 4, 9], "remain": 1, "server": [1, 3, 4, 5, 6, 7, 9, 10], "stub": [1, 5], "handl": [1, 2, 4, 5, 7, 10], "format": [1, 2, 4, 5, 10], "convers": 1, "purpos": 1, "interfac": [1, 5], "remote_model": [1, 4], "best": [1, 2, 5], "isn": [1, 10], "situat": 1, "staticpredict": 1, "lookup": [1, 10], "quickli": [1, 10], "brows": 1, "retain": 1, "rich": 1, "almost": 1, "tfrecord": [1, 6], "capacitor": 1, "sstabl": 1, "even": 1, "sql": 1, "api": [1, 3, 4, 5, 6, 7, 9, 10], "our": [1, 3, 4, 5, 6, 9, 10], "tfd": 1, "ui": [1, 5, 6, 7, 9], "workflow": [1, 9], "extens": [1, 5, 9], "system": [1, 3, 5, 7, 9, 10], "modal": [1, 4], "common": [1, 2, 5], "nlp": [1, 3, 6, 9], "domain": 1, "ad": [1, 4, 5, 9, 10], "explor": [1, 2, 5, 7, 10], "side": [1, 2, 10], "avail": [1, 4, 5, 6, 10], "matric": 1, "basic": [1, 9], "sentiment": 1, "simple_tf2_demo": 1, "evalu": [1, 2, 4, 10], "neg": [1, 10], "design": [1, 4, 9], "null_idx": 1, "commonli": [1, 7, 10], "precis": [1, 10], "recal": 1, "f1": 1, "auc": 1, "aucpr": 1, "comment": [1, 3], "toxic": [1, 10], "posit": [1, 2, 10], "margin": [1, 2, 10], "bia": 1, "space": [1, 2, 10], "bucket": [1, 10], "facet": [1, 5, 7, 10], "scatterplot": 1, "st": 1, "b": [1, 10], "textual": 1, "glue_demo": [1, 2, 3, 6], "image_demo": [1, 2, 3], "quit": 1, "matur": [1, 5], "highlight": [1, 2, 10], "diff": [1, 2], "decod": [1, 2, 10], "emit": 1, "varieti": [1, 2, 4, 6], "part": [1, 2, 7, 10], "speech": 1, "entiti": 1, "ner": 1, "mix": 1, "privileg": 1, "otherwis": [1, 5], "kernel": 1, "shap": 1, "valid": [1, 3], "check": [1, 2, 6, 9, 10], "mark": 1, "option": [1, 3, 5], "least": 1, "multilabel": 1, "penguin": [1, 4], "stat": 1, "penguin_demo": [1, 2], "That": 1, "awai": [1, 2], "move": [1, 5], "individu": [1, 2, 4, 5, 10], "color": [1, 5, 10], "break": 1, "exist": [1, 4, 9, 10], "open_imag": 1, "classifi": [1, 2, 10], "comparison": [1, 5, 7, 10], "onc": [1, 10], "io": [1, 2, 3, 6, 7, 9], "html": [1, 2, 5, 7], "navig": [1, 6, 10], "explan": [1, 10], "proport": 1, "l2": [1, 2], "propto": 1, "nabla_": 1, "x_i": 1, "hat": 1, "y": [1, 10], "_2": 1, "result": [1, 2, 5, 6, 10], "choic": 1, "argmax": 1, "product": [1, 4], "cdot": 1, "grad": [1, 2], "direct": [1, 3], "influenc": [1, 10], "suggest": [1, 4, 10], "stronger": 1, "wa": [1, 5, 10], "grad_for": 1, "normal": [1, 5, 10], "contribut": 1, "along": [1, 4, 7, 10], "sundararajan": 1, "et": [1, 2], "al": [1, 2], "2017": 1, "algorithm": 1, "instrument": 1, "modifi": [1, 5, 10], "plu": 1, "grad_target_field_kei": 1, "convent": [1, 5], "interpol": 1, "token_emb": 1, "input_text": 1, "token_grad": 1, "concret": 1, "contact": 1, "team": [1, 2], "assist": 1, "drop": [1, 10], "train": [1, 2, 10], "linear": 1, "reconstruct": 1, "origin": [1, 5, 10], "trade": 1, "off": [1, 5, 10], "slow": [1, 4], "noisi": 1, "longer": [1, 10], "ablat": 1, "compens": 1, "increas": 1, "explain": [1, 3, 10], "movi": [1, 10], "terribl": [1, 10], "ask": [1, 10], "receiv": 1, "okai": 1, "present": 1, "long": [1, 3, 4, 10], "deriv": [1, 5], "impact": 1, "prompt": [1, 2], "causal": 1, "granular": [1, 2], "sub": [1, 10], "paragraph": [1, 2], "refin": 1, "densiti": 1, "shot": 1, "eaxmpl": 1, "chain": 1, "thought": [1, 5], "walkthrough": [1, 2], "debug": [1, 2, 7], "llm": [1, 2], "ai": [1, 2], "toolkit": [1, 2, 5], "dev": [1, 2, 7], "model_behavior": [1, 2], "paper": [1, 9], "2404": 1, "07498": 1, "pleas": [1, 5, 9], "cite": [1, 9], "articl": 1, "tenney2024interact": 1, "author": [1, 9], "tennei": [1, 9], "ian": [1, 9], "mullin": 1, "ryan": 1, "du": 1, "pandya": 1, "shree": 1, "kahng": 1, "minsuk": 1, "dixon": 1, "luca": 1, "journal": 1, "preprint": 1, "year": [1, 9], "2024": 1, "gemma": 1, "llama": 1, "mistral": 1, "gpt": 1, "either": [1, 5, 10], "kerasnlp": [1, 2], "lit_gemma": 1, "lm_salience_demo": [1, 2], "instrumented_keras_lm": 1, "pretrained_lm": 1, "ebert": 1, "2022": 1, "k": 1, "bag": 1, "appli": 1, "button": [1, 5, 10], "6": [1, 5], "seek": 1, "column": [1, 10], "tutori": [1, 2], "analyz": [1, 10], "actual": [1, 5, 10], "been": [1, 3, 5, 10], "grad_target": 1, "channel": 1, "2d": [1, 10], "abl": [1, 4], "mobilenet": [1, 4], "blur": 1, "xrai": 1, "imagesali": 1, "layer_0": 1, "align_in": 1, "align_out": 1, "layer_1": 1, "layer_2": 1, "target_token": 1, "version": 1, "latent": [1, 2], "pca": [1, 2, 7, 10], "pan": [1, 10], "zoom": 1, "rotat": [1, 10], "shift": [1, 5, 10], "colormap": 1, "menu": [1, 2, 4, 10], "bleu": [1, 10], "whole": [1, 4], "try": 1, "power": [1, 5], "header": [1, 7, 10], "intersect": 1, "drag": [1, 10], "decis": 1, "boundari": 1, "respond": [1, 4, 5], "calcul": [1, 10], "optim": 1, "cost": 1, "ratio": 1, "rel": 1, "penalti": 1, "equal": 1, "costli": 1, "term": [1, 7, 10], "twice": 1, "overal": [1, 5, 10], "fair": [1, 2], "One": [1, 5], "demograph": [1, 2], "pariti": [1, 2], "attempt": 1, "percentag": 1, "accuraci": [1, 10], "opportun": 1, "among": 1, "effect": [1, 5], "minimum": 1, "maximum": [1, 10], "chart": 1, "averag": 1, "chosen": [1, 5], "done": [1, 5, 10], "across": [1, 5, 10], "inspir": 1, "prior": [1, 10], "toolbar": [1, 5, 9], "megaplot": [1, 4], "100k": [1, 4], "mous": 1, "scroll": 1, "reset": [1, 10], "futur": [1, 10], "releas": 1, "contrast": 1, "high": [1, 9, 10], "concept": 1, "gender": 1, "race": [1, 5], "akin": 1, "cav": 1, "intuit": 1, "measur": [1, 10], "sensit": [1, 10], "mention": [1, 2], "rather": [1, 5], "begin": 1, "low": 1, "15": [1, 4, 9], "selector": 1, "actor": 1, "actress": 1, "next": [1, 10], "final": [1, 5], "newli": [1, 4, 10], "split": 1, "second": [1, 5, 10], "9": [1, 5], "blue": [1, 10], "baselin": 1, "technic": 1, "null": [1, 5, 10], "pitfal": 1, "potenti": 1, "meaningless": 1, "randomli": [1, 10], "meaning": 1, "guard": 1, "whether": [1, 5], "possibli": 1, "remaind": 1, "reject": 1, "insignific": 1, "p": [1, 3], "greater": [1, 5], "05": 1, "happen": 1, "warn": 1, "100": [1, 10], "uniqu": 1, "accommod": 1, "cross": [1, 4], "approach": [1, 5], "too": 1, "min": 1, "upcom": 1, "pictur": 1, "insight": 1, "answer": [1, 10], "my": 1, "behav": 1, "under": [1, 3, 6, 10], "delet": [1, 10], "systemat": 1, "editor": 1, "enter": [1, 4], "duplic": [1, 5, 7, 10], "keep": [1, 4, 10], "track": [1, 10], "relationship": 1, "cycl": [1, 10], "scrambl": [1, 10], "regex": [1, 10], "hand": [1, 7, 10], "shelf": 1, "scrambler": [1, 10], "ebrahimi": 1, "tri": 1, "special": 1, "get_embedding_t": 1, "flip": 1, "maintain": [2, 7], "host": [2, 3, 4, 7, 9], "launcher": 2, "type": [2, 5, 6, 9, 10], "publicli": 2, "visibl": [2, 5], "exampl": [2, 3, 5, 7, 9, 10], "analysi": [2, 9], "sst": 2, "input": [2, 4, 5, 9, 10], "huggingfac": 2, "widest": 2, "output": [2, 4, 5, 6, 9], "metric": [2, 6, 7], "plot": [2, 10], "confid": 2, "ones": 2, "cluster": [2, 9, 10], "gener": [2, 4, 6, 7, 9], "tip": [2, 9], "studi": 2, "public": [2, 3], "websit": 2, "xnli_demo": 2, "14": 2, "jp": 2, "fine": 2, "tune": 2, "whitespac": 2, "delimit": 2, "wordpiec": 2, "stsb_dev": 2, "unrel": 2, "wide": 2, "scatter": [2, 10], "filter": [2, 10], "2b": 2, "7b": 2, "dot": 2, "depth": 2, "t5_demo": [2, 3], "hypothes": 2, "veer": 2, "roug": [2, 10], "machin": [2, 3, 4, 7], "cnndm": 2, "wmt": [2, 4], "corpora": 2, "coref_demo": [2, 3], "ontonot": [2, 10], "winogend": [2, 10], "schema": 2, "ruding": 2, "2018": 2, "profess": 2, "stratifi": 2, "quantifi": 2, "pronoun": [2, 10], "bureau": 2, "labor": 2, "statist": 2, "partial": 2, "setter": 2, "imagenet": 2, "deploi": 3, "onto": 3, "face": 3, "container": 3, "prefer": [3, 5], "engin": [3, 5], "dockerfil": 3, "directori": [3, 5, 6], "front": 3, "gunicorn": 3, "invok": [3, 5], "get_wsgi_app": 3, "wsgi": [3, 4, 7], "gunicorn_config": 3, "shell": 3, "command": [3, 6], "rememb": 3, "trail": 3, "know": [3, 10], "now": 3, "rm": 3, "5432": [3, 5], "launch": [3, 10], "demo_nam": 3, "demo_port": 3, "children": 3, "nest": 3, "coref": 3, "is_ev": 3, "is_eval_demo": 3, "Be": 3, "sure": [3, 4, 5], "2345": 3, "bring": 3, "togeth": [3, 10], "combin": 3, "background": [3, 10], "execut": 3, "meet": 3, "locat": 3, "litservertyp": 3, "server_typ": 3, "extern": [3, 4], "unus": 3, "sy": 3, "argv": 3, "known_onli": 3, "rebuild": 3, "your_server_script_path_her": 3, "consum": 3, "absl": 3, "stricli": 3, "recommend": [3, 4, 5, 6], "consol": 3, "suppos": 3, "come": 3, "soon": 3, "submit": 4, "bug": 4, "issu": [4, 5], "modern": 4, "ml": 4, "good": [4, 10], "tabular": [4, 9], "palmer": 4, "unicod": 4, "english": 4, "latin": [4, 5], "modif": 4, "xnli": 4, "lingual": 4, "nli": 4, "multilingu": 4, "comfort": 4, "10k": 4, "speed": 4, "caveat": [4, 5], "lot": 4, "larger": [4, 5], "warm_start": [4, 5, 6], "nativ": 4, "resolut": 4, "smaller": [4, 6], "anywai": 4, "256x256": 4, "thing": [4, 5, 7], "webgl": 4, "scattergl": 4, "dive": [4, 10], "older": [4, 10], "thousand": 4, "preload": 4, "wait": [4, 5, 10], "visit": 4, "bewar": 4, "node": [4, 5], "favorit": 4, "lightweight": 4, "alreadi": [4, 10], "ephemer": 4, "window": [4, 7], "manual": [4, 5, 10], "data_dir": [4, 5], "disk": [4, 5], "abil": 4, "edit": [4, 10], "write": [4, 6], "anyon": 4, "address": [4, 10], "restrict": 4, "configur": [4, 5, 6, 9, 10], "thin": [4, 7], "middlewar": 4, "docker": [4, 9], "data_": 4, "fieldnam": 4, "total": 4, "within": [4, 10], "data0": 4, "data1": 4, "data2": 4, "data0_": 4, "csv": [4, 10], "push": 4, "writabl": 4, "notebook": [4, 9], "ui_stat": 4, "primari": [4, 10], "bulk": 4, "pipelin": [4, 7], "littl": 4, "difficult": 4, "refactor": 4, "primarili": 4, "offici": 4, "facilit": 4, "reus": 4, "fact": 4, "exactli": 4, "quickstart_sst_demo": 4, "gotcha": 5, "central": 5, "piec": [5, 10], "tech": 5, "compon": [5, 7, 9, 10], "observ": 5, "orient": 5, "templat": 5, "declar": 5, "isol": 5, "center": 5, "data": [5, 6, 9], "scalabl": 5, "highli": 5, "fairli": 5, "digest": 5, "heavier": 5, "angular": 5, "roughli": [5, 7], "divid": [5, 10], "itself": 5, "coordin": 5, "footer": 5, "lai": 5, "variou": 5, "go": 5, "later": 5, "bundl": 5, "attach": 5, "dom": 5, "kick": 5, "singleton": [5, 7], "arraign": 5, "minimalist": [5, 10], "bottom": [5, 10], "analyt": [5, 10], "put": [5, 10], "exploratori": [5, 10], "inspect": [5, 10], "v1": [5, 10], "appreci": [5, 10], "custom_layout": 5, "updat": [5, 10], "factor": 5, "model": [5, 7, 9], "modules_servic": 5, "fetch": 5, "static": 5, "helper": [5, 7], "outlin": [5, 10], "dummi": 5, "pig": 5, "customel": 5, "demotextmodul": 5, "selectionserviceindex": 5, "shouldreact": 5, "duplicateformodelcomparison": 5, "4": 5, "privat": 5, "readonli": 5, "colorservic": 5, "getservic": 5, "piglatin": 5, "firstupd": 5, "reactimmedi": 5, "selectionservic": [5, 7], "primaryselectedinputdata": 5, "7": [5, 10], "gettransl": 5, "async": 5, "indexedinput": 5, "const": 5, "promis": 5, "apiservic": 5, "getpiglatin": 5, "8": 5, "await": 5, "loadlatest": 5, "renderimpl": 5, "getdatapointcolor": 5, "div": 5, "stylemap": 5, "checkmodul": 5, "modelspec": 5, "modelsmap": 5, "datasetspec": 5, "11": 5, "12": 5, "htmlelementtagnamemap": 5, "illustr": 5, "definit": 5, "css": 5, "shared_styl": 5, "super": 5, "inject": 5, "easier": 5, "mock": 5, "appstat": 5, "u": [5, 10], "decor": 5, "unit": 5, "reactiv": 5, "mobxlitel": 5, "trigger": 5, "excel": 5, "asynchron": 5, "leverag": 5, "machineri": 5, "lifecycl": 5, "reaction": 5, "whatev": [5, 10], "immedi": 5, "whenev": 5, "react": 5, "won": 5, "condit": 5, "rapidli": 5, "supersed": 5, "recent": 5, "rerend": 5, "seem": 5, "pure": 5, "worth": 5, "sometim": [5, 7], "inadequ": 5, "advanc": 5, "suit": 5, "anim": 5, "imper": 5, "draw": 5, "canva": 5, "fortun": 5, "bridg": 5, "queryselector": 5, "shadow": 5, "root": 5, "mess": 5, "reconcili": 5, "shadowroot": 5, "drawcanva": 5, "faceting_control": 5, "construct": 5, "programmat": 5, "member": 5, "oppos": [5, 10], "destroi": 5, "recreat": 5, "brought": 5, "lose": 5, "previous": [5, 10], "held": 5, "snippet": 5, "examplemodul": 5, "facetingcontrol": 5, "createel": 5, "facetschang": 5, "event": 5, "customev": 5, "contextnam": 5, "addeventlisten": 5, "eventlisten": 5, "clang": 5, "instead": [5, 10], "id": [5, 10], "dosometh": 5, "litel": 5, "reactiveel": 5, "annotated_text_modul": 5, "annotated_text_vi": 5, "On": [5, 10], "regist": 5, "properli": 5, "clean": 5, "leav": 5, "yarn": 5, "termin": 5, "cd": 5, "watch": 5, "m": [5, 6], "example_nam": 5, "localhost": [5, 6], "ctrl": 5, "cmd": 5, "r": 5, "hard": 5, "refresh": 5, "pick": 5, "hot": 5, "subsequ": 5, "conjunct": 5, "stale": 5, "effort": 5, "potato": [5, 7], "theme": 5, "custom_modul": 5, "env": 5, "asset": 5, "client_root": 5, "potato_demo": 5, "parent_dir": 5, "o": 5, "join": 5, "pathlib": 5, "__file__": 5, "absolut": 5, "enum": 5, "litmodulenam": 5, "potato_layout": 5, "classificationmodul": 5, "spud": 5, "tastic": 5, "jump": [6, 9], "short": 6, "video": [6, 10], "rout": 6, "foo_data": 6, "foodataset": 6, "bar_data": 6, "bardataset": 6, "my_model": 6, "mymodel": 6, "wealth": 6, "overview": [6, 9], "lit_sentiment_classifi": 6, "403": 6, "cooki": 6, "site": [6, 9], "outsid": [6, 9], "blaze": 6, "quickstart": 6, "alsologtostderr": 6, "cpu": 6, "overload": 7, "codebas": 7, "folder": 7, "fledg": 7, "job": 7, "fulli": 7, "capit": 7, "accompani": 7, "emoji": 7, "pronounc": 7, "ell": 7, "ey": 7, "tee": 7, "formerli": 7, "polym": 7, "coincident": 7, "cours": 7, "neural": 7, "network": 7, "composit": 7, "strictli": 7, "speak": 7, "litmodul": [7, 9], "noun": 7, "verb": 7, "notabl": 7, "layout": [7, 9], "maxim": 7, "gui": 7, "welcom": 9, "demonstr": 9, "misc": 9, "tenney2020languag": 9, "jame": 9, "wexler": 9, "jasmijn": 9, "bast": 9, "tolga": 9, "bolukbasi": 9, "andi": 9, "coenen": 9, "sebastian": 9, "gehrmann": 9, "ellen": 9, "jiang": 9, "mahima": 9, "pushkarna": 9, "carei": 9, "radebaugh": 9, "emili": 9, "reif": 9, "ann": 9, "yuan": 9, "booktitl": 9, "proceed": 9, "2020": 9, "confer": 9, "empir": 9, "publish": 9, "linguist": 9, "107": 9, "118": 9, "www": 9, "aclweb": 9, "anthologi": 9, "emnlp": 9, "stand": 9, "alon": 9, "multimod": 9, "journei": 9, "tcav": 9, "architectur": 9, "style": 9, "glossari": 9, "privaci": 9, "secur": 9, "tour": 10, "live": 10, "draggabl": 10, "alloc": 10, "vertic": 10, "horizont": 10, "adopt": 10, "organiz": 10, "scheme": 10, "reflect": 10, "focu": 10, "arrow": 10, "darker": 10, "dialog": 10, "declutt": 10, "someon": 10, "els": 10, "clear": 10, "deselect": 10, "legend": 10, "unpin": 10, "indetermin": 10, "progress": 10, "pend": 10, "fail": 10, "failur": 10, "until": 10, "x": 10, "icon": 10, "messag": 10, "star": 10, "screenshot": 10, "toggl": 10, "dimens": 10, "hover": 10, "lasso": 10, "unselect": 10, "consecut": 10, "light": 10, "sort": 10, "were": 10, "prefix": 10, "hide": 10, "exce": 10, "truncat": 10, "ellipsi": 10, "entireti": 10, "collaps": 10, "film": 10, "again": 10, "unstar": 10, "neighbor": 10, "immut": 10, "simplic": 10, "blank": 10, "creation": 10, "altogeth": 10, "highest": 10, "opposit": 10, "hello": 10, "bad": 10, "exact": 10, "broken": 10, "made": 10, "versu": 10, "ax": 10, "agreement": 10, "disagr": 10, "clickabl": 10, "disagre": 10, "laid": 10, "axi": 10, "methodologi": 10, "capabl": 10, "wise": 10, "autorun": 10, "uncheck": 10, "valuabl": 10, "expens": 10, "attend": 10, "opac": 10, "magnitud": 10, "negat": 10, "stanford": 10, "treebank": 10, "56": 10, "surprisingli": 10, "But": 10, "truli": 10, "review": 10, "ultim": 10, "depress": 10, "era": 10, "gangster": 10, "strongli": 10, "worst": 10, "elicit": 10, "mildli": 10, "incorrect": 10, "occup": 10, "technician": 10, "particip": 10, "male": 10, "domin": 10, "25": 10, "femal": 10, "bl": 10, "agre": 10, "stereotyp": 10, "resolv": 10, "83": 10, "37": 10, "cnn": 10, "dm": 10, "middl": 10, "erron": 10, "constitu": 10, "alastair": 10, "cook": 10, "captain": 10, "dig": 10, "deeper": 10, "28": 10, "arriv": 10, "searcher": 10, "fast": 10, "approxim": 10, "nearest": 10, "corpu": 10, "retriev": 10, "34": 10, "16": 10, "occurr": 10, "strong": 10, "toward": 10, "phrase": 10}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"lit": [0, 3, 4, 6, 9], "python": 0, "api": 0, "design": 0, "overview": [0, 5], "ad": 0, "model": [0, 1, 2, 4, 6, 10], "data": [0, 1, 2, 4, 10], "valid": 0, "dataset": [0, 4], "transform": [0, 1], "more": 0, "output": [0, 1, 10], "option": [0, 10], "input": [0, 1], "interpret": [0, 9], "compon": [0, 1, 4, 6], "metric": [0, 1, 10], "gener": [0, 1, 10], "backtransl": 0, "configur": 0, "ui": [0, 4, 10], "type": [0, 1, 4], "system": 0, "convent": 0, "compat": 0, "check": 0, "an": [0, 6], "In": 0, "depth": 0, "exampl": [0, 1, 4, 6], "avail": 0, "server": 0, "custom": [0, 3, 5, 6], "layout": [0, 5, 10], "access": 0, "notebook": [0, 6], "us": [0, 4, 6], "outsid": [0, 4], "featur": 1, "framework": 1, "support": 1, "huggingfac": 1, "tf1": 1, "x": 1, "estim": 1, "remot": 1, "host": [1, 6], "static": 1, "predict": [1, 2], "load": [1, 4], "classif": [1, 2], "regress": [1, 2], "score": [1, 2], "multi": 1, "label": 1, "seq2seq": [1, 2], "span": 1, "structur": [1, 2], "multipl": 1, "segment": 1, "tabular": [1, 2], "imag": [1, 2, 3], "token": 1, "base": 1, "salienc": [1, 10], "gradient": 1, "norm": 1, "dot": 1, "integr": [1, 3, 4], "lime": 1, "target": 1, "select": [1, 10], "sequenc": [1, 2], "cluster": 1, "attribut": 1, "pixel": 1, "attent": [1, 10], "embed": [1, 10], "projector": [1, 10], "aggreg": 1, "analysi": [1, 10], "confus": [1, 10], "matrix": [1, 10], "scalar": [1, 10], "plot": 1, "binari": 1, "threshold": 1, "partial": 1, "depend": 1, "dive": 1, "tcav": 1, "statist": 1, "signific": 1, "sort": 1, "cosin": 1, "similar": [1, 2], "counterfactu": 1, "manual": 1, "edit": 1, "demo": [2, 6], "do": [2, 6], "NOT": [2, 6], "remov": [2, 6], "section": 2, "header": 2, "sentiment": [2, 10], "nli": 2, "glue": 2, "multilingu": 2, "xnli": 2, "textual": 2, "st": 2, "b": 2, "stsb": 2, "gemma": 2, "t5": 2, "languag": [2, 4], "lm": 2, "bert": 2, "gpt": 2, "2": 2, "gender": [2, 10], "bia": [2, 10], "corefer": [2, 10], "coref": 2, "multimod": 2, "penguin": 2, "mobilenet": 2, "run": [3, 6], "docker": 3, "contain": 3, "basic": 3, "usag": 3, "instanc": 3, "default": 3, "build": 3, "your": [3, 6], "own": 3, "frequent": 4, "ask": 4, "question": 4, "scale": 4, "size": 4, "larg": 4, "privaci": 4, "secur": 4, "i": 4, "have": 4, "proprietari": 4, "my": 4, "team": 4, "workflow": 4, "send": 4, "from": 4, "anoth": 4, "tool": [4, 9], "download": 4, "export": 4, "train": 4, "frontend": 5, "develop": 5, "guid": [5, 10], "high": 5, "level": 5, "applic": 5, "architectur": 5, "bootstrap": 5, "initi": 5, "modul": [5, 10], "litmodul": 5, "setup": 5, "function": 5, "escap": 5, "hatch": 5, "state": 5, "child": 5, "element": 5, "style": 5, "tip": 5, "open": 5, "sourc": 5, "client": 5, "get": 6, "start": 6, "instal": 6, "colab": 6, "stand": 6, "alon": 6, "standalon": 6, "exist": 6, "glossari": 7, "learn": 9, "research": 9, "datapoint": 10, "toolbar": 10, "top": 10, "bar": 10, "global": 10, "set": 10, "url": 10, "share": 10, "main": 10, "statu": 10, "compar": 10, "slice": 10, "detail": 10, "tabl": 10, "editor": 10, "map": 10, "user": 10, "journei": 10, "debug": 10, "text": 10}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 60}, "alltitles": {"LIT Python API": [[0, "lit-python-api"]], "Design Overview": [[0, "design-overview"]], "Adding Models and Data": [[0, "adding-models-and-data"]], "Validating Models and Data": [[0, "validating-models-and-data"]], "Datasets": [[0, "datasets"]], "Transformations": [[0, "transformations"]], "Models": [[0, "models"]], "Adding more outputs": [[0, "adding-more-outputs"]], "Optional inputs": [[0, "optional-inputs"]], "Interpretation Components": [[0, "interpretation-components"]], "Metrics": [[0, "metrics"], [1, "metrics"]], "Generators": [[0, "generators"], [1, "generators"]], "Backtranslator Generator": [[0, "backtranslator-generator"]], "Configuration UI": [[0, "configuration-ui"]], "Type System": [[0, "type-system"]], "Conventions": [[0, "conventions"]], "Compatibility Checks": [[0, "compatibility-checks"]], "An In-Depth Example": [[0, "an-in-depth-example"]], "Available types": [[0, "available-types"]], "Server Configuration": [[0, "server-configuration"]], "Customizing the Layout": [[0, "customizing-the-layout"]], "Accessing the LIT UI in Notebooks": [[0, "accessing-the-lit-ui-in-notebooks"]], "Using LIT components outside of LIT": [[0, "using-lit-components-outside-of-lit"]], "Components and Features": [[1, "components-and-features"]], "Framework and Model Support": [[1, "framework-and-model-support"]], "HuggingFace Transformers": [[1, "huggingface-transformers"]], "TF1.x Estimator": [[1, "tf1-x-estimator"]], "Remote or hosted models": [[1, "remote-or-hosted-models"]], "Static predictions": [[1, "static-predictions"]], "Data loading": [[1, "data-loading"]], "Input and Output Types": [[1, "input-and-output-types"]], "Classification": [[1, "classification"]], "Regression / Scoring": [[1, "regression-scoring"]], "Multi-label classification": [[1, "multi-label-classification"]], "Seq2Seq / Generation": [[1, "seq2seq-generation"]], "Span Labeling and Structured Prediction": [[1, "span-labeling-and-structured-prediction"]], "Multiple input segments": [[1, "multiple-input-segments"]], "Tabular data": [[1, "tabular-data"]], "Images": [[1, "images"]], "Token-based Salience": [[1, "token-based-salience"]], "Gradient Norm": [[1, "gradient-norm"]], "Gradient-dot-Input": [[1, "gradient-dot-input"]], "Integrated Gradients": [[1, "integrated-gradients"]], "LIME": [[1, "lime"]], "Target Selection on Classification Output": [[1, "target-selection-on-classification-output"]], "Sequence Salience": [[1, "sequence-salience"]], "Salience Clustering": [[1, "salience-clustering"]], "Tabular Feature Attribution": [[1, "tabular-feature-attribution"]], "Pixel-based Salience": [[1, "pixel-based-salience"]], "Attention": [[1, "attention"], [10, "attention"]], "Embedding Projector": [[1, "embedding-projector"], [10, "embedding-projector"]], "Aggregate Analysis": [[1, "aggregate-analysis"]], "Confusion Matrix": [[1, "confusion-matrix"], [10, "confusion-matrix"]], "Scalar Plots": [[1, "scalar-plots"]], "Binary Classification Thresholds": [[1, "binary-classification-thresholds"]], "Partial Dependence Plots": [[1, "partial-dependence-plots"]], "Dive": [[1, "dive"]], "TCAV": [[1, "tcav"]], "Example": [[1, "example"]], "Statistical Significance": [[1, "statistical-significance"]], "Sorting by Cosine Similarity": [[1, "sorting-by-cosine-similarity"]], "Counterfactual Analysis": [[1, "counterfactual-analysis"]], "Manual Editing": [[1, "manual-editing"]], "Demos": [[2, "demos"]], "Classification ": [[2, "classification"]], "Sentiment and NLI ": [[2, "sentiment-and-nli"]], "Multilingual (XNLI) ": [[2, "multilingual-xnli"]], "Regression / Scoring ": [[2, "regression-scoring"]], "Textual Similarity (STS-B) ": [[2, "textual-similarity-sts-b"]], "Sequence-to-Sequence ": [[2, "sequence-to-sequence"]], "Gemma ": [[2, "gemma"]], "T5 ": [[2, "t5"]], "Language Modeling ": [[2, "language-modeling"]], "BERT and GPT-2 ": [[2, "bert-and-gpt-2"]], "Structured Prediction ": [[2, "structured-prediction"]], "Gender Bias in Coreference ": [[2, "gender-bias-in-coreference"]], "Multimodal ": [[2, "multimodal"]], "Tabular Data: Penguin Classification ": [[2, "tabular-data-penguin-classification"]], "Image Classification with MobileNet ": [[2, "image-classification-with-mobilenet"]], "Running LIT in a Docker container": [[3, "running-lit-in-a-docker-container"]], "Basic Usage": [[3, "basic-usage"]], "Integrating Custom LIT Instances with the Default Docker Image": [[3, "integrating-custom-lit-instances-with-the-default-docker-image"]], "Building Your Own Image": [[3, "building-your-own-image"]], "Frequently Asked Questions": [[4, "frequently-asked-questions"]], "Model and Data Types": [[4, "model-and-data-types"]], "Languages": [[4, "languages"]], "Scale": [[4, "scale"]], "Dataset Size": [[4, "dataset-size"]], "Large Models": [[4, "large-models"]], "Privacy and Security": [[4, "privacy-and-security"]], "I have proprietary data. Is LIT secure for my team to use?": [[4, "i-have-proprietary-data-is-lit-secure-for-my-team-to-use"]], "Workflow and Integrations": [[4, "workflow-and-integrations"]], "Sending examples from another tool": [[4, "sending-examples-from-another-tool"]], "Downloading or exporting data": [[4, "downloading-or-exporting-data"]], "Loading data from the UI": [[4, "loading-data-from-the-ui"]], "Using components outside the LIT UI": [[4, "using-components-outside-the-lit-ui"]], "Training models with LIT": [[4, "training-models-with-lit"]], "Frontend Developer Guide": [[5, "frontend-developer-guide"]], "High Level Overview": [[5, "high-level-overview"]], "Application Architecture": [[5, "application-architecture"]], "Bootstrapping": [[5, "bootstrapping"]], "Layout": [[5, "layout"]], "Initialization": [[5, "initialization"]], "Modules (LitModule)": [[5, "modules-litmodule"]], "Setup": [[5, "setup"]], "Functionality": [[5, "functionality"]], "Escape Hatches": [[5, "escape-hatches"]], "Stateful Child Elements": [[5, "stateful-child-elements"]], "Style Guide": [[5, "style-guide"]], "Development Tips (open-source)": [[5, "development-tips-open-source"]], "Custom Client / Modules": [[5, "custom-client-modules"]], "Getting Started with LIT": [[6, "getting-started-with-lit"]], "Installation": [[6, "installation"]], "Hosted demos": [[6, "hosted-demos"]], "LIT with your model ": [[6, "lit-with-your-model"]], "Using LIT in notebooks ": [[6, "using-lit-in-notebooks"]], "Stand-alone components ": [[6, "stand-alone-components"]], "Run an existing example ": [[6, "run-an-existing-example"]], "Glossary": [[7, "glossary"]], "Learning Interpretability Tool (LIT)": [[9, "learning-interpretability-tool-lit"]], "Research": [[9, "research"]], "UI Guide": [[10, "ui-guide"]], "General Layout": [[10, "general-layout"]], "Layout Options": [[10, "layout-options"]], "Datapoint Selections": [[10, "datapoint-selections"]], "Toolbars": [[10, "toolbars"]], "Top Bar": [[10, "top-bar"]], "Global Settings": [[10, "global-settings"]], "URL Sharing": [[10, "url-sharing"]], "Main Toolbar": [[10, "main-toolbar"]], "Status Bar": [[10, "status-bar"]], "Comparing Models": [[10, "comparing-models"]], "Comparing Datapoints": [[10, "comparing-datapoints"]], "Slices": [[10, "slices"]], "Module Details": [[10, "module-details"]], "Data Table": [[10, "data-table"]], "Datapoint Editor": [[10, "datapoint-editor"]], "Datapoint Generator": [[10, "datapoint-generator"]], "Metrics Table": [[10, "metrics-table"]], "Scalars": [[10, "scalars"]], "Model Output": [[10, "model-output"]], "Salience Maps": [[10, "salience-maps"]], "User Journeys": [[10, "user-journeys"]], "Sentiment Analysis": [[10, "sentiment-analysis"]], "Gender Bias in Coreference": [[10, "gender-bias-in-coreference"]], "Debugging Text Generation": [[10, "debugging-text-generation"]]}, "indexentries": {}})
\ No newline at end of file
diff --git a/docs/tutorials/sequence-salience/index.html b/docs/tutorials/sequence-salience/index.html
index 5c0c6cfc..9e76d614 100644
--- a/docs/tutorials/sequence-salience/index.html
+++ b/docs/tutorials/sequence-salience/index.html
@@ -112,10 +112,11 @@ Prompt Engineering with Sequence Salience
LIT supports additional LLMs, including Llama 2 and Mistral,
via the HuggingFace Transformers and KerasNLP libraries.
This tutorial was adapted from and expands upon LIT's contributions to the
-Responsible Generative AI Tookit and the related paper and
-video submitted to the ACL 2024 Systems Demonstration track.
-This is an active and ongoing research area for the LIT team, so expect changes
-and further expansions to this tutorial over time.
+Responsible Generative AI Tookit and the related
+paper and video submitted to the ACL 2024
+System Demonstrations track. This is an active and ongoing research area for
+the LIT team, so expect changes and further expansions to this tutorial over
+time.
Case Study 1: Debugging Few-Shot Prompts
Few-shot prompting was introduced with GPT-2: an ML developer provides
examples of how to perform a task in a prompt, affixes user-provided content at
From fbd88746263fec0f72f2f01bcc382e88e902ab50 Mon Sep 17 00:00:00 2001
From: Ramesh Sampath
Date: Fri, 19 Apr 2024 04:39:34 -0700
Subject: [PATCH 04/50] Update keras import to fix build error.
`from keras import ops` or `import keras` are valid, but `import keras.ops` is no longer valid since ops is not a module in keras namespace, but points to the public API.
PiperOrigin-RevId: 626326912
---
lit_nlp/examples/models/instrumented_keras_lms.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/lit_nlp/examples/models/instrumented_keras_lms.py b/lit_nlp/examples/models/instrumented_keras_lms.py
index 44236311..f9b4cbcd 100644
--- a/lit_nlp/examples/models/instrumented_keras_lms.py
+++ b/lit_nlp/examples/models/instrumented_keras_lms.py
@@ -6,8 +6,7 @@
from typing import Sequence
from absl import logging
-import keras.backend
-import keras.ops
+import keras
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.lib import utils as lit_utils
From 483082dcb0beb39795c0fc093fe93036bb6a274c Mon Sep 17 00:00:00 2001
From: Ryan Mullins
Date: Fri, 26 Apr 2024 13:12:40 -0700
Subject: [PATCH 05/50] LIT: Refactor DataService reactions.
PiperOrigin-RevId: 628490534
---
lit_nlp/client/services/data_service.ts | 90 ++++++++++++++-----------
1 file changed, 50 insertions(+), 40 deletions(-)
diff --git a/lit_nlp/client/services/data_service.ts b/lit_nlp/client/services/data_service.ts
index 114fae3d..354702bf 100644
--- a/lit_nlp/client/services/data_service.ts
+++ b/lit_nlp/client/services/data_service.ts
@@ -95,40 +95,39 @@ export class DataService extends LitService {
this.columnData.clear();
});
- // Run classification interpreter when the inputs or margins change.
- const getClassificationInputs = () =>
- [this.appState.currentInputData, this.appState.currentModels,
- this.classificationService.allMarginSettings];
- reaction(getClassificationInputs, () => {
- if (this.appState.currentInputData == null ||
- this.appState.currentInputData.length === 0 ||
- this.appState.currentModels.length === 0 ||
- !this.settingsService.isValidCurrentDataAndModels) {
- return;
- }
- for (const model of this.appState.currentModels) {
- this.runClassification(model, this.appState.currentInputData);
- }
- }, {fireImmediately: true});
-
- // Run other prediction interpreters when necessary.
- const getPredictionInputs =
- () => [this.appState.currentInputData, this.appState.currentModels];
- reaction(getPredictionInputs, () => {
- if (this.appState.currentInputData == null ||
- this.appState.currentInputData.length === 0 ||
- this.appState.currentModels.length === 0 ||
- !this.settingsService.isDatasetValidForModels(
- this.appState.currentDataset, this.appState.currentModels)) {
- return;
- }
- for (const model of this.appState.currentModels) {
- this.runGeneratedTextPreds(model, this.appState.currentInputData);
- this.runRegression(model, this.appState.currentInputData);
- this.runScalarPreds(model, this.appState.currentInputData);
- this.runMultiLabelPreds(model, this.appState.currentInputData);
- }
- }, {fireImmediately: true});
+ // Fetch predicitons when the current dataset or model changes.
+ reaction(
+ () => [
+ this.appState.currentInputData, this.appState.currentModels
+ ] as const,
+ ([currentInputData, currentModels]) => {
+ if (!currentInputData?.length ||
+ !currentModels.length ||
+ !this.settingsService.isValidCurrentDataAndModels) {
+ return;
+ }
+ for (const model of currentModels) {
+ this.runClassification(model, currentInputData);
+ this.runGeneratedTextPreds(model, currentInputData);
+ this.runMultiLabelPreds(model, currentInputData);
+ this.runRegression(model, currentInputData);
+ this.runScalarPreds(model, currentInputData);
+ }
+ },
+ {fireImmediately: true}
+ );
+
+ // Additonally, run the classification interpreter when the margins change.
+ reaction(
+ () => this.classificationService.allMarginSettings,
+ () => {
+ if (!this.settingsService.isValidCurrentDataAndModels) {return;}
+ for (const model of this.appState.currentModels) {
+ this.runClassification(model, this.appState.currentInputData);
+ }
+ },
+ {fireImmediately: true}
+ );
this.appState.addNewDatapointsCallback(async (newDatapoints) =>
this.setValuesForNewDatapoints(newDatapoints));
@@ -403,9 +402,14 @@ export class DataService extends LitService {
*/
@action
addColumn(
- columnVals: ColumnData, key: string, name: string, dataType: LitType,
- source: Source, getValueFn: ValueFn = () => null,
- colorRange?: ColorRange) {
+ columnVals: ColumnData,
+ key: string,
+ name: string,
+ dataType: LitType,
+ source: Source,
+ getValueFn: ValueFn = () => null,
+ colorRange?: ColorRange
+ ) {
if (!this.columnHeaders.has(name)) {
this.columnHeaders.set(
name, {dataType, source, name, key, getValueFn, colorRange});
@@ -426,9 +430,15 @@ export class DataService extends LitService {
*/
@action
addColumnFromList(
- values: ValueType[], data: IndexedInput[], key: string, name: string,
- dataType: LitType, source: Source, getValueFn: ValueFn = () => null,
- colorRange?: ColorRange) {
+ values: ValueType[],
+ data: IndexedInput[],
+ key: string,
+ name: string,
+ dataType: LitType,
+ source: Source,
+ getValueFn: ValueFn = () => null,
+ colorRange?: ColorRange
+ ) {
if (values.length !== data.length) {
throw new Error(`Attempted to add data column ${
name} with incorrect number of values.`);
From a5265a4feeb701b878986f79665d5fdf9ddc244c Mon Sep 17 00:00:00 2001
From: Ryan Mullins
Date: Fri, 26 Apr 2024 13:27:39 -0700
Subject: [PATCH 06/50] LIT: Add warm_start option to LitWidget
PiperOrigin-RevId: 628494490
---
lit_nlp/notebook.py | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/lit_nlp/notebook.py b/lit_nlp/notebook.py
index e3996529..aae2eb9a 100644
--- a/lit_nlp/notebook.py
+++ b/lit_nlp/notebook.py
@@ -95,13 +95,16 @@ def _encode(v):
class LitWidget(object):
"""Class for using LIT inside notebooks."""
- def __init__(self,
- *args,
- height=1000,
- render=False,
- proxy_url=None,
- layouts: Optional[layout.LitComponentLayouts] = None,
- **kw):
+ def __init__(
+ self,
+ *args,
+ height=1000,
+ render=False,
+ proxy_url=None,
+ layouts: Optional[layout.LitComponentLayouts] = None,
+ warm_start: bool = False,
+ **kw,
+ ):
"""Start LIT server and optionally render the UI immediately.
Args:
@@ -111,15 +114,16 @@ def __init__(self,
to False.
proxy_url: Optional proxy URL, if using in a notebook with a server proxy.
Defaults to None.
- layouts: Optional custom UI layouts. TODO(lit-dev): support simple module
- lists here as well.
+ layouts: Optional custom UI layouts.
+ warm_start: If true, run predictions for every model on every compatible
+ dataset before returning a renderable widget.
**kw: Keyword arguments for the LitApp.
"""
app_flags = dict(server_config.get_flags())
app_flags['server_type'] = 'notebook'
app_flags['host'] = 'localhost'
app_flags['port'] = None
- app_flags['warm_start'] = 1
+ app_flags['warm_start'] = 1 if warm_start else 0
app_flags['warm_start_progress_indicator'] = progress_indicator
app_flags['sync_state'] = True
From 4fb3bde897c68fdeb3bd829f6e5a88223bc131a4 Mon Sep 17 00:00:00 2001
From: Ian Tenney
Date: Tue, 30 Apr 2024 17:09:01 -0700
Subject: [PATCH 07/50] Pretty-printing of Model objects
PiperOrigin-RevId: 629571604
---
lit_nlp/api/model.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/lit_nlp/api/model.py b/lit_nlp/api/model.py
index ce034a20..49a68649 100644
--- a/lit_nlp/api/model.py
+++ b/lit_nlp/api/model.py
@@ -89,6 +89,18 @@ def description(self) -> str:
"""
return inspect.getdoc(self) or ''
+ def __str__(self) -> str:
+ classname = self.__class__.__module__ + '.' + self.__class__.__qualname__
+ indented_description = ' ' + self.description().replace('\n', '\n ')
+ return f'{classname}(...):\n{indented_description}'
+
+ def _repr_pretty_(self, p, cycle):
+ """Pretty-printing for IPython environments, both notebooks and repl."""
+ if not cycle:
+ p.text(str(self))
+ else:
+ p.text('...')
+
@classmethod
def init_spec(cls) -> Optional[Spec]:
"""Attempts to infer a Spec describing a Model's constructor parameters.
From 675ca2de21b68dc62e4909c80a2cd57d8ee8b601 Mon Sep 17 00:00:00 2001
From: Bin Du
Date: Fri, 10 May 2024 14:56:23 -0700
Subject: [PATCH 08/50] Add the helper function in Keras setup to clean up
special tokens in GTP2 and SentencePiece tokenizer.
PiperOrigin-RevId: 632611251
---
.../examples/models/instrumented_keras_lms.py | 33 +++++++++++++++----
1 file changed, 26 insertions(+), 7 deletions(-)
diff --git a/lit_nlp/examples/models/instrumented_keras_lms.py b/lit_nlp/examples/models/instrumented_keras_lms.py
index f9b4cbcd..b3bdb41c 100644
--- a/lit_nlp/examples/models/instrumented_keras_lms.py
+++ b/lit_nlp/examples/models/instrumented_keras_lms.py
@@ -10,7 +10,6 @@
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.lib import utils as lit_utils
-import numpy as np
# pylint: disable=g-import-not-at-top
@@ -87,10 +86,6 @@ def __init__(
self.max_length = max_length
self.dynamic_sequence_length = dynamic_sequence_length
- self.ids_to_tokens = np.vectorize(
- self.model.preprocessor.tokenizer.id_to_token
- )
-
# map ids: [batch_size, num_tokens]
# to embs: [batch_size, num_tokens, emb_dim]
self.embedder = self.model.backbone.token_embedding
@@ -141,6 +136,30 @@ def encode_inputs(self, texts: Sequence[str]):
# Actually trim the input tensors.
return {k: v[:, :longest_sequence] for k, v in encoded_inputs.items()}
+ def clean_subword_token(self, tok: str) -> str:
+ """Clean up special subword token from the tokenizers if necessary.
+
+ Args:
+ tok: the token to clean up.
+ Returns:
+ The replaced token if the provided token matches the special subword token
+ below; otherwise, the original token is returned.
+ """
+ # For GPT2 tokenizer.
+ tok = tok.replace("Ċ", "\n") # newlines
+ tok = tok.replace("Ġ", "▁") # start of word -> magic underscore
+ # For SentencePiece Tokenizer.
+ tok = tok.replace("<0x0A>", "\n") # newlines
+ return tok
+
+ def ids_to_clean_tokens(self, ids: Sequence[int]) -> Sequence[str]:
+ return [
+ self.clean_subword_token(
+ self.model.preprocessor.tokenizer.id_to_token(id)
+ )
+ for id in ids
+ ]
+
@classmethod
def from_loaded(cls, existing: "_KerasBaseModel", *args, **kw):
"""Share weights and underlying Keras model with another instance."""
@@ -419,7 +438,7 @@ def _postprocess(self, preds):
"""Post-process single-example preds. Operates on numpy arrays."""
mask = preds.pop("padding_mask").astype(bool)
ids = preds.pop("input_ids")[mask]
- preds[FieldNames.TOKENS] = self.ids_to_tokens(ids)
+ preds[FieldNames.TOKENS] = self.ids_to_clean_tokens(ids)
for key in lit_utils.find_spec_keys(
self.output_spec(), lit_types.TokenScores
):
@@ -479,7 +498,7 @@ def _postprocess(self, preds):
# rather than acting as a boolean mask.
mask = preds.pop("padding_mask").astype(bool)
ids = preds.pop("token_ids")[mask]
- preds[FieldNames.TOKENS] = self.ids_to_tokens(ids)
+ preds[FieldNames.TOKENS] = self.ids_to_clean_tokens(ids)
return preds
def predict_minibatch(self, inputs):
From b16059fbd0320d411298009c0226489e1f548a69 Mon Sep 17 00:00:00 2001
From: Ryan Mullins
Date: Wed, 22 May 2024 13:35:37 -0700
Subject: [PATCH 09/50] Rename lm_salience_module to sequence_salience_module
PiperOrigin-RevId: 636277468
---
lit_nlp/api/layout.py | 22 +++++++-------
...odule.css => sequence_salience_module.css} | 0
..._module.ts => sequence_salience_module.ts} | 30 +++++++++----------
lit_nlp/examples/lm_salience_demo.py | 21 ++++++++-----
requirements_core.txt | 2 +-
5 files changed, 41 insertions(+), 34 deletions(-)
rename lit_nlp/client/modules/{lm_salience_module.css => sequence_salience_module.css} (100%)
rename lit_nlp/client/modules/{lm_salience_module.ts => sequence_salience_module.ts} (97%)
diff --git a/lit_nlp/api/layout.py b/lit_nlp/api/layout.py
index 55e41e6d..be67215e 100644
--- a/lit_nlp/api/layout.py
+++ b/lit_nlp/api/layout.py
@@ -29,19 +29,15 @@ class LitModuleName(dtypes.EnumSerializableAsValues, enum.Enum):
Entries should map the TypeScript class name to the HTML element name,
as declared in HTMLElementTagNameMap in the .ts file defining each LitModule.
"""
- AnnotatedTextModule = 'annotated-text-module'
+ # keep-sorted start
AnnotatedTextGoldModule = 'annotated-text-gold-module'
+ AnnotatedTextModule = 'annotated-text-module'
AttentionModule = 'attention-module'
ClassificationModule = 'classification-module'
ConfusionMatrixModule = 'confusion-matrix-module'
CurvesModule = 'curves-module'
DataTableModule = 'data-table-module'
- SimpleDataTableModule = 'simple-data-table-module'
DatapointEditorModule = 'datapoint-editor-module'
- # Non-replicating version of Datapoint Editor
- SingleDatapointEditorModule = 'single-datapoint-editor-module'
- # Simplified, non-replicating version of Datapoint Editor
- SimpleDatapointEditorModule = 'simple-datapoint-editor-module'
DiveModule = 'dive-module'
DocumentationModule = 'documentation-module'
EmbeddingsModule = 'embeddings-module'
@@ -50,7 +46,7 @@ class LitModuleName(dtypes.EnumSerializableAsValues, enum.Enum):
GeneratedTextModule = 'generated-text-module'
GeneratorModule = 'generator-module'
LanguageModelPredictionModule = 'lm-prediction-module'
- LMSalienceModule = 'lm-salience-module'
+ LegacySequenceSalienceModule = 'legacy-sequence-salience-module'
MetricsModule = 'metrics-module'
MultilabelModule = 'multilabel-module'
PdpModule = 'pdp-module'
@@ -58,14 +54,20 @@ class LitModuleName(dtypes.EnumSerializableAsValues, enum.Enum):
SalienceClusteringModule = 'salience-clustering-module'
SalienceMapModule = 'salience-map-module'
ScalarModule = 'scalar-module'
- LegacySequenceSalienceModule = 'legacy-sequence-salience-module'
+ SequenceSalienceModule = 'sequence-salience-module'
+ SimpleDataTableModule = 'simple-data-table-module'
+ # Simplified, non-replicating version of Datapoint Editor
+ SimpleDatapointEditorModule = 'simple-datapoint-editor-module'
+ # Non-replicating version of Datapoint Editor
+ SingleDatapointEditorModule = 'single-datapoint-editor-module'
SpanGraphGoldModule = 'span-graph-gold-module'
- SpanGraphModule = 'span-graph-module'
SpanGraphGoldModuleVertical = 'span-graph-gold-module-vertical'
+ SpanGraphModule = 'span-graph-module'
SpanGraphModuleVertical = 'span-graph-module-vertical'
TCAVModule = 'tcav-module'
- TrainingDataAttributionModule = 'tda-module'
ThresholderModule = 'thresholder-module'
+ TrainingDataAttributionModule = 'tda-module'
+ # keep-sorted end
def __call__(self, **kw):
return ModuleConfig(self.value, **kw)
diff --git a/lit_nlp/client/modules/lm_salience_module.css b/lit_nlp/client/modules/sequence_salience_module.css
similarity index 100%
rename from lit_nlp/client/modules/lm_salience_module.css
rename to lit_nlp/client/modules/sequence_salience_module.css
diff --git a/lit_nlp/client/modules/lm_salience_module.ts b/lit_nlp/client/modules/sequence_salience_module.ts
similarity index 97%
rename from lit_nlp/client/modules/lm_salience_module.ts
rename to lit_nlp/client/modules/sequence_salience_module.ts
index 40f9a8e6..68f38450 100644
--- a/lit_nlp/client/modules/lm_salience_module.ts
+++ b/lit_nlp/client/modules/sequence_salience_module.ts
@@ -1,5 +1,5 @@
/**
- * @fileoverview Custom viz module for causal LM salience.
+ * @fileoverview Custom viz module for sequence salience with causal LMs.
*/
import '@material/mwc-icon';
@@ -26,7 +26,7 @@ import {cleanSpmText, groupTokensByRegexPrefix, groupTokensByRegexSeparator} fro
import {type IndexedInput, type Preds, SCROLL_SYNC_CSS_CLASS, type Spec} from '../lib/types';
import {cumSumArray, filterToKeys, findSpecKeys, groupAlike, makeModifiedInput, sumArray} from '../lib/utils';
-import {styles} from './lm_salience_module.css';
+import {styles} from './sequence_salience_module.css';
/**
* Max of absolute value
@@ -133,10 +133,10 @@ export class SingleExampleSingleModelModule extends LitModule {
}
/**
- * Custom styled version of for rendering LM salience tokens.
+ * Custom styles for to render sequence salience tokens.
*/
-@customElement('lm-salience-chips')
-class LMSalienceChips extends TextChips {
+@customElement('sequence-salience-chips')
+class SequenceSalienceChips extends TextChips {
@property({type: Boolean}) underline = false;
override holderClass() {
@@ -205,8 +205,8 @@ const CMAP_DEFAULT_RANGE = 0.4;
const DEFAULT_CUSTOM_SEGMENTATION_REGEX = '\\n+';
/** LIT module for model output. */
-@customElement('lm-salience-module')
-export class LMSalienceModule extends SingleExampleSingleModelModule {
+@customElement('sequence-salience-module')
+export class SequenceSalienceModule extends SingleExampleSingleModelModule {
static override title = 'Sequence Salience';
static override numCols = 6; // 60% of screen width if DataTable on left
static override duplicateAsRow = true;
@@ -215,9 +215,9 @@ export class LMSalienceModule extends SingleExampleSingleModelModule {
model: string,
selectionServiceIndex: number,
shouldReact: number,
- ) => html` html`
- `;
+ `;
static override get styles() {
return [sharedStyles, styles];
@@ -561,7 +561,7 @@ export class LMSalienceModule extends SingleExampleSingleModelModule {
});
// If selected example OR selected target string change.
- // NOTE: you may see a console warning: "Element lm-salience-module
+ // NOTE: you may see a console warning: "Element sequence-salience-module
// scheduled an update (generally because a property was set) after an
// update completed, causing a new update to be scheduled."
// This is okay here: this.modifiedData will be updated after
@@ -994,12 +994,12 @@ export class LMSalienceModule extends SingleExampleSingleModelModule {
// prettier-ignore
return html`
-
-
+
`;
}
@@ -1081,7 +1081,7 @@ export class LMSalienceModule extends SingleExampleSingleModelModule {
declare global {
interface HTMLElementTagNameMap {
- 'lm-salience-chips': LMSalienceChips;
- 'lm-salience-module': LMSalienceModule;
+ 'sequence-salience-chips': SequenceSalienceChips;
+ 'sequence-salience-module': SequenceSalienceModule;
}
-}
\ No newline at end of file
+}
diff --git a/lit_nlp/examples/lm_salience_demo.py b/lit_nlp/examples/lm_salience_demo.py
index 510d90e7..37a68d1f 100644
--- a/lit_nlp/examples/lm_salience_demo.py
+++ b/lit_nlp/examples/lm_salience_demo.py
@@ -54,7 +54,10 @@
from lit_nlp import server_flags
from lit_nlp.api import layout
from lit_nlp.examples.datasets import lm as lm_data
-from lit_nlp.lib import file_cache
+
+# TODO(b/333698148): file_cache doesn't work well with certain HF and KerasNLP
+# preset names. Disabling until resolved.
+# from lit_nlp.lib import file_cache
# NOTE: additional flags defined in server_flags.py
@@ -132,7 +135,7 @@
"Editor": [modules.SingleDatapointEditorModule],
},
upper={ # if 'lower' not specified, this fills the right side
- "Salience": [modules.LMSalienceModule],
+ "Salience": [modules.SequenceSalienceModule],
},
layoutSettings=layout.LayoutSettings(leftWidth=40),
description="Left/right layout for language model salience.",
@@ -143,7 +146,7 @@
"Editor": [modules.SimpleDatapointEditorModule],
},
lower={
- "Salience": [modules.LMSalienceModule],
+ "Salience": [modules.SequenceSalienceModule],
},
layoutSettings=layout.LayoutSettings(
hideToolbar=True,
@@ -162,7 +165,7 @@
"Datapoint Generators": [modules.GeneratorModule],
},
lower={
- "Salience": [modules.LMSalienceModule],
+ "Salience": [modules.SequenceSalienceModule],
"Metrics": [modules.MetricsModule],
},
layoutSettings=layout.LayoutSettings(
@@ -279,10 +282,12 @@ def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
model_name, path = model_string.split(":", 1)
logging.info("Loading model '%s' from '%s'", model_name, path)
- path = file_cache.cached_path(
- path,
- extract_compressed_file=path.endswith(".tar.gz"),
- )
+ # TODO(b/333698148): file_cache doesn't work well with certain HF and
+ # KerasNLP preset names. Disabling until resolved.
+ # path = file_cache.cached_path(
+ # path,
+ # extract_compressed_file=path.endswith(".tar.gz"),
+ # )
if _DL_FRAMEWORK.value == "kerasnlp":
# pylint: disable=g-import-not-at-top
diff --git a/requirements_core.txt b/requirements_core.txt
index 2ceb6ed4..bacdd68a 100644
--- a/requirements_core.txt
+++ b/requirements_core.txt
@@ -21,7 +21,7 @@ filelock>=3.12.3
google-cloud-translate>=3.11.1
ipython>=7.34.0
Levenshtein>=0.21.1
-matplotlib>=3.7.1
+matplotlib>=3.6.0,<3.9.0
ml-collections>=0.1.1
numpy>=1.24.1
pandas>=2.0.3
From b3c120b22138fb03a712f11778197cf4966d0c3a Mon Sep 17 00:00:00 2001
From: Ryan Mullins
Date: Fri, 24 May 2024 13:28:10 -0700
Subject: [PATCH 10/50] LIT: Relax min Python version to 3.9
PiperOrigin-RevId: 637017601
---
.github/workflows/ci.yml | 2 +-
lit_nlp/api/dataset_test.py | 1 -
lit_nlp/components/ablation_flip.py | 2 +-
lit_nlp/components/backtranslator.py | 2 +-
lit_nlp/components/classification_results_test.py | 5 -----
lit_nlp/components/shap_explainer.py | 5 ++---
lit_nlp/components/tcav.py | 2 +-
lit_nlp/lib/caching.py | 2 +-
lit_nlp/lib/utils.py | 2 +-
lit_nlp/lib/utils_test.py | 2 +-
pyproject.toml | 4 ++--
requirements_core.txt | 2 +-
12 files changed, 12 insertions(+), 19 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 236359e5..270b0666 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -33,7 +33,7 @@ jobs:
strategy:
matrix:
node-version: [18]
- python-version: ["3.10", "3.11"]
+ python-version: ["3.9", "3.10", "3.11"]
defaults:
run:
shell: bash -l {0}
diff --git a/lit_nlp/api/dataset_test.py b/lit_nlp/api/dataset_test.py
index 7463e28e..ab77f323 100644
--- a/lit_nlp/api/dataset_test.py
+++ b/lit_nlp/api/dataset_test.py
@@ -179,7 +179,6 @@ def test_init_from_examples_without_ids(self):
examples=examples
)
- # TODO(b/266681945): Enabled zip(..., strict=true) once updated to Py3.10
for indexed_example, example, original in zip(
dataset.indexed_examples, dataset.examples, examples
):
diff --git a/lit_nlp/components/ablation_flip.py b/lit_nlp/components/ablation_flip.py
index f6d41e11..b83be51b 100644
--- a/lit_nlp/components/ablation_flip.py
+++ b/lit_nlp/components/ablation_flip.py
@@ -209,7 +209,7 @@ def _generate_leave_one_out_ablation_score(
self._create_cf(example, input_spec, [(field, idx)])
for idx in idxs
])
- for idx, cf_output in zip(idxs, cf_outputs, strict=True):
+ for idx, cf_output in zip(idxs, cf_outputs):
loo_score = cf_utils.prediction_difference(
cf_output, orig_output, output_spec, pred_key
)
diff --git a/lit_nlp/components/backtranslator.py b/lit_nlp/components/backtranslator.py
index 4da15647..96d8d97c 100644
--- a/lit_nlp/components/backtranslator.py
+++ b/lit_nlp/components/backtranslator.py
@@ -94,7 +94,7 @@ def generate_all(self,
# Generate by substituting in each field.
# TODO(lit-team): substitute on a combination of fields?
for field_name, candidates in candidates_by_field.items():
- for i, (inp, cands) in enumerate(zip(inputs, candidates, strict=True)):
+ for i, (inp, cands) in enumerate(zip(inputs, candidates)):
for cand in cands:
all_outputs[i].append(utils.make_modified_input(
inp, {field_name: cand}, 'Backtranslator'
diff --git a/lit_nlp/components/classification_results_test.py b/lit_nlp/components/classification_results_test.py
index 64514115..27cdd985 100644
--- a/lit_nlp/components/classification_results_test.py
+++ b/lit_nlp/components/classification_results_test.py
@@ -53,7 +53,6 @@ def test_no_label(self):
{'probas': dtypes.ClassificationResult([0.2, 0.8], '1', None)},
]
self.assertListEqual(['probas'], list(results[0].keys()))
- # TODO(b/266681945): Add strict=True to ensure equal lengths
for result, expected in zip(results, expected_results):
np.testing.assert_array_equal(
expected['probas'].scores, result['probas'].scores
@@ -77,7 +76,6 @@ def test_no_margins(self):
{'probas': dtypes.ClassificationResult([0.2, 0.8], '1', False)},
]
self.assertListEqual(['probas'], list(results[0].keys()))
- # TODO(b/266681945): Add strict=True to ensure equal lengths
for result, expected in zip(results, expected_results):
np.testing.assert_array_equal(
expected['probas'].scores, result['probas'].scores
@@ -105,7 +103,6 @@ def test_single_margin(self):
{'probas': dtypes.ClassificationResult([0.2, 0.8], '0', True)},
]
self.assertListEqual(['probas'], list(results[0].keys()))
- # TODO(b/266681945): Add strict=True to ensure equal lengths
for result, expected in zip(results, expected_results):
np.testing.assert_array_equal(
expected['probas'].scores, result['probas'].scores
@@ -145,7 +142,6 @@ def test_faceted_margins_text(self):
{'probas': dtypes.ClassificationResult([0.2, 0.8], '1', False)},
]
self.assertListEqual(['probas'], list(results[0].keys()))
- # TODO(b/266681945): Add strict=True to ensure equal lengths
for result, expected in zip(results, expected_results):
np.testing.assert_array_equal(
expected['probas'].scores, result['probas'].scores
@@ -188,7 +184,6 @@ def test_faceted_margins_num(self):
{'probas': dtypes.ClassificationResult([0.2, 0.8], '1', False)},
]
self.assertListEqual(['probas'], list(results[0].keys()))
- # TODO(b/266681945): Add strict=True to ensure equal lengths
for result, expected in zip(results, expected_results):
np.testing.assert_array_equal(
expected['probas'].scores, result['probas'].scores
diff --git a/lit_nlp/components/shap_explainer.py b/lit_nlp/components/shap_explainer.py
index 4df68535..f4434fe4 100644
--- a/lit_nlp/components/shap_explainer.py
+++ b/lit_nlp/components/shap_explainer.py
@@ -163,8 +163,7 @@ def run(
def prediction_fn(examples):
dict_examples: list[JsonDict] = [
- dict(zip(input_feats, feature_values, strict=True))
- for feature_values in examples
+ dict(zip(input_feats, feature_values)) for feature_values in examples
]
preds: list[Union[int, float]] = []
@@ -189,7 +188,7 @@ def prediction_fn(examples):
explainer = shap.KernelExplainer(prediction_fn, background)
shap_values_by_example = explainer.shap_values(inputs_to_use)
salience = [
- dict(zip(input_feats, example_shap_values, strict=True))
+ dict(zip(input_feats, example_shap_values))
for example_shap_values in shap_values_by_example
]
return [{'saliency': dtypes.FeatureSalience(s)} for s in salience]
diff --git a/lit_nlp/components/tcav.py b/lit_nlp/components/tcav.py
index 32b7a189..dacab22b 100644
--- a/lit_nlp/components/tcav.py
+++ b/lit_nlp/components/tcav.py
@@ -142,7 +142,7 @@ def get_predictions(
t for t in target_fields if getattr(output_spec[t], 'parent')
]
modified_inputs = []
- for ex, preds in zip(inputs, predictions, strict=True):
+ for ex, preds in zip(inputs, predictions):
overrides = {}
for field in valid_target_fields:
label_idx = np.argmax(preds[field])
diff --git a/lit_nlp/lib/caching.py b/lit_nlp/lib/caching.py
index ab42885f..169dfd6d 100644
--- a/lit_nlp/lib/caching.py
+++ b/lit_nlp/lib/caching.py
@@ -269,7 +269,7 @@ def fit_transform(self, inputs: Iterable[JsonDict]):
)
outputs = list(wrapped.fit_transform(inputs_as_list))
with self._cache.lock:
- for cache_key, output in zip(cache_keys, outputs, strict=True):
+ for cache_key, output in zip(cache_keys, outputs):
self._cache.put(output, cache_key)
return outputs
diff --git a/lit_nlp/lib/utils.py b/lit_nlp/lib/utils.py
index 4351465d..6f5ff7b1 100644
--- a/lit_nlp/lib/utils.py
+++ b/lit_nlp/lib/utils.py
@@ -203,7 +203,7 @@ def pad1d(
min_len: int,
pad_val: T,
pad_left: bool = False,
- max_len: int | None = None,
+ max_len: Optional[int] = None,
) -> list[T]:
"""Pad a list to the target length."""
if pad_left:
diff --git a/lit_nlp/lib/utils_test.py b/lit_nlp/lib/utils_test.py
index 5646181f..52b3011d 100644
--- a/lit_nlp/lib/utils_test.py
+++ b/lit_nlp/lib/utils_test.py
@@ -293,7 +293,7 @@ def test_pad1d(
pad_val: T,
expected: list[T],
pad_left: bool = False,
- max_len: int | None = None,
+ max_len: Optional[int] = None,
):
self.assertEqual(
utils.pad1d(
diff --git a/pyproject.toml b/pyproject.toml
index 5bed29b2..b93fe7c5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,13 +11,13 @@ authors = [
description = "🔥LIT: The Learning Interpretability Tool"
readme = "README.md"
license = { file = "LICENSE" }
-requires-python = ">=3.10"
+requires-python = ">=3.9"
# LINT.IfChange
dependencies = [
"absl-py>=1.4.0",
"annoy>=1.17.3",
"attrs>=22.1.0",
- "etils[epath]>=1.7.0",
+ "etils[epath]>=1.5.0",
"filelock>=3.12.3",
"google-cloud-translate>=3.11.1",
"ipython>=7.34.0",
diff --git a/requirements_core.txt b/requirements_core.txt
index bacdd68a..8863d139 100644
--- a/requirements_core.txt
+++ b/requirements_core.txt
@@ -16,7 +16,7 @@
absl-py>=1.4.0
annoy>=1.17.3
attrs>=22.1.0
-etils[epath]>=1.7.0
+etils[epath]>=1.5.0
filelock>=3.12.3
google-cloud-translate>=3.11.1
ipython>=7.34.0
From 5188c8c835328efcc9dff5a0a4cf4cd79fabe099 Mon Sep 17 00:00:00 2001
From: Ryan Mullins
Date: Wed, 29 May 2024 11:21:19 -0700
Subject: [PATCH 11/50] Relaxing SHAP version.
PiperOrigin-RevId: 638351905
---
pyproject.toml | 2 +-
requirements_core.txt | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index b93fe7c5..97e19939 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,7 +34,7 @@ dependencies = [
"saliency>=0.1.3",
"scikit-learn>=1.0.2",
"scipy>=1.10.1",
- "shap==0.42.0",
+ "shap>=0.42.0,<0.46.0",
"six>=1.16.0",
"termcolor>=2.3.0",
"tqdm>=4.64.0",
diff --git a/requirements_core.txt b/requirements_core.txt
index 8863d139..c1bb2434 100644
--- a/requirements_core.txt
+++ b/requirements_core.txt
@@ -33,7 +33,7 @@ sacrebleu>=2.3.1
saliency>=0.1.3
scikit-learn>=1.0.2
scipy>=1.10.1
-shap==0.42.0
+shap>=0.42.0,<0.46.0
six>=1.16.0
termcolor>=2.3.0
tqdm>=4.64.0
From 15eccb1197366c925a5beff310fb5d7d369bde0c Mon Sep 17 00:00:00 2001
From: Ian Tenney
Date: Thu, 30 May 2024 09:43:37 -0700
Subject: [PATCH 12/50] Simplify file_cache logic and re-enable for sequence
salience demo.
PiperOrigin-RevId: 638684231
---
lit_nlp/examples/lm_salience_demo.py | 19 +++++++++----------
lit_nlp/lib/file_cache.py | 1 +
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/lit_nlp/examples/lm_salience_demo.py b/lit_nlp/examples/lm_salience_demo.py
index 37a68d1f..fb555bec 100644
--- a/lit_nlp/examples/lm_salience_demo.py
+++ b/lit_nlp/examples/lm_salience_demo.py
@@ -54,10 +54,7 @@
from lit_nlp import server_flags
from lit_nlp.api import layout
from lit_nlp.examples.datasets import lm as lm_data
-
-# TODO(b/333698148): file_cache doesn't work well with certain HF and KerasNLP
-# preset names. Disabling until resolved.
-# from lit_nlp.lib import file_cache
+from lit_nlp.lib import file_cache
# NOTE: additional flags defined in server_flags.py
@@ -282,12 +279,14 @@ def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
model_name, path = model_string.split(":", 1)
logging.info("Loading model '%s' from '%s'", model_name, path)
- # TODO(b/333698148): file_cache doesn't work well with certain HF and
- # KerasNLP preset names. Disabling until resolved.
- # path = file_cache.cached_path(
- # path,
- # extract_compressed_file=path.endswith(".tar.gz"),
- # )
+ # Limit scope of caching to archive files and remote paths, as some preset
+ # names like "google/gemma-1.1-7b-it" look like file paths but should not
+ # be handled as such.
+ if path.endswith(".tar.gz") or file_cache.is_remote(path):
+ path = file_cache.cached_path(
+ path,
+ extract_compressed_file=path.endswith(".tar.gz"),
+ )
if _DL_FRAMEWORK.value == "kerasnlp":
# pylint: disable=g-import-not-at-top
diff --git a/lit_nlp/lib/file_cache.py b/lit_nlp/lib/file_cache.py
index a8417ce0..88ab3934 100644
--- a/lit_nlp/lib/file_cache.py
+++ b/lit_nlp/lib/file_cache.py
@@ -217,6 +217,7 @@ def filename_fom_url(url: str, etag: Optional[str] = None) -> str:
def is_remote(url_of_filepath: str) -> bool:
+ """Check if a path represents a remote URL or non-local file."""
parsed = urllib_parse.urlparse(url_of_filepath)
return parsed.scheme in ('http', 'https')
From f4c099082f0e89986aad162cc3cd0ac9bc2214c7 Mon Sep 17 00:00:00 2001
From: Googler
Date: Mon, 3 Jun 2024 12:29:54 -0700
Subject: [PATCH 13/50] No public description
PiperOrigin-RevId: 639874888
---
lit_nlp/examples/blank_slate_demo.py | 4 ++--
.../examples/{datasets/penguin_data.py => penguin/data.py} | 0
lit_nlp/examples/{penguin_demo.py => penguin/demo.py} | 6 +++---
.../examples/{models/penguin_model.py => penguin/model.py} | 2 +-
.../penguin_model_int_test.py => penguin/model_int_test.py} | 2 +-
website/sphinx_src/components.md | 2 +-
website/sphinx_src/demos.md | 2 +-
7 files changed, 9 insertions(+), 9 deletions(-)
rename lit_nlp/examples/{datasets/penguin_data.py => penguin/data.py} (100%)
rename lit_nlp/examples/{penguin_demo.py => penguin/demo.py} (93%)
rename lit_nlp/examples/{models/penguin_model.py => penguin/model.py} (97%)
rename lit_nlp/examples/{models/penguin_model_int_test.py => penguin/model_int_test.py} (93%)
diff --git a/lit_nlp/examples/blank_slate_demo.py b/lit_nlp/examples/blank_slate_demo.py
index b64db1c0..9d235dcb 100644
--- a/lit_nlp/examples/blank_slate_demo.py
+++ b/lit_nlp/examples/blank_slate_demo.py
@@ -35,13 +35,13 @@
from lit_nlp.examples.datasets import imagenette
from lit_nlp.examples.datasets import lm
from lit_nlp.examples.datasets import mt
-from lit_nlp.examples.datasets import penguin_data
from lit_nlp.examples.datasets import summarization
from lit_nlp.examples.models import glue_models
from lit_nlp.examples.models import mobilenet
-from lit_nlp.examples.models import penguin_model
from lit_nlp.examples.models import pretrained_lms
from lit_nlp.examples.models import t5
+from lit_nlp.examples.penguin import data as penguin_data
+from lit_nlp.examples.penguin import model as penguin_model
# NOTE: additional flags defined in server_flags.py
diff --git a/lit_nlp/examples/datasets/penguin_data.py b/lit_nlp/examples/penguin/data.py
similarity index 100%
rename from lit_nlp/examples/datasets/penguin_data.py
rename to lit_nlp/examples/penguin/data.py
diff --git a/lit_nlp/examples/penguin_demo.py b/lit_nlp/examples/penguin/demo.py
similarity index 93%
rename from lit_nlp/examples/penguin_demo.py
rename to lit_nlp/examples/penguin/demo.py
index 79cc077c..c48fd538 100644
--- a/lit_nlp/examples/penguin_demo.py
+++ b/lit_nlp/examples/penguin/demo.py
@@ -1,7 +1,7 @@
"""🐧 LIT demo for tabular data using penguin classification.
To run:
- python -m lit_nlp.examples.penguin_demo --port=5432
+ blaze run -c opt --config=cuda examples/penguin:demo -- --port=5432
Then navigate to localhost:5432 to access the demo UI.
"""
@@ -17,8 +17,8 @@
from lit_nlp import server_flags
from lit_nlp.api import layout
from lit_nlp.components import minimal_targeted_counterfactuals
-from lit_nlp.examples.datasets import penguin_data
-from lit_nlp.examples.models import penguin_model
+from lit_nlp.examples.penguin import data as penguin_data
+from lit_nlp.examples.penguin import model as penguin_model
MODEL_PATH = 'https://storage.googleapis.com/what-if-tool-resources/lit-models/penguin.h5' # pylint: disable=line-too-long
diff --git a/lit_nlp/examples/models/penguin_model.py b/lit_nlp/examples/penguin/model.py
similarity index 97%
rename from lit_nlp/examples/models/penguin_model.py
rename to lit_nlp/examples/penguin/model.py
index 783edc5f..6aec9c11 100644
--- a/lit_nlp/examples/models/penguin_model.py
+++ b/lit_nlp/examples/penguin/model.py
@@ -2,7 +2,7 @@
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
-from lit_nlp.examples.datasets import penguin_data
+from lit_nlp.examples.penguin import data as penguin_data
from lit_nlp.lib import file_cache
import numpy as np
import tensorflow as tf
diff --git a/lit_nlp/examples/models/penguin_model_int_test.py b/lit_nlp/examples/penguin/model_int_test.py
similarity index 93%
rename from lit_nlp/examples/models/penguin_model_int_test.py
rename to lit_nlp/examples/penguin/model_int_test.py
index 7ebe86d7..c9dca404 100644
--- a/lit_nlp/examples/models/penguin_model_int_test.py
+++ b/lit_nlp/examples/penguin/model_int_test.py
@@ -1,7 +1,7 @@
"""Integration tests for penguin_model."""
from absl.testing import absltest
-from lit_nlp.examples.models import penguin_model
+from lit_nlp.examples.penguin import model as penguin_model
class PenguinModelIntTest(absltest.TestCase):
diff --git a/website/sphinx_src/components.md b/website/sphinx_src/components.md
index f2366d7c..19c4dde0 100644
--- a/website/sphinx_src/components.md
+++ b/website/sphinx_src/components.md
@@ -233,7 +233,7 @@ these cases, LIT validates model compatibility by checking that:
(`SparseMultilabel`) field.
For a demo using a penguin stats dataset/binary classification task, see
-[examples/penguin_demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/penguin_demo.py).
+[examples/penguin/demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/penguin/demo.py).
### Images
diff --git a/website/sphinx_src/demos.md b/website/sphinx_src/demos.md
index 306d3139..dfc77b50 100644
--- a/website/sphinx_src/demos.md
+++ b/website/sphinx_src/demos.md
@@ -159,7 +159,7 @@ https://pair-code.github.io/lit/tutorials/coref
### Tabular Data: Penguin Classification
**Hosted instance:** https://pair-code.github.io/lit/demos/penguins.html \
-**Code:** [examples/penguin_demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/penguin_demo.py)
+**Code:** [examples/penguin/demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/penguin/demo.py)
* Binary classification on
[penguin dataset](https://www.tensorflow.org/datasets/catalog/penguins).
From 6aa2eb64eddb8ca154401bfd6a039762bc374d6d Mon Sep 17 00:00:00 2001
From: Fan Ye
Date: Tue, 4 Jun 2024 10:05:10 -0700
Subject: [PATCH 14/50] Remove the image demo, models, dataset, and other
related documentation from the LIT examples.
PiperOrigin-RevId: 640191591
---
lit_nlp/examples/blank_slate_demo.py | 15 -
lit_nlp/examples/datasets/imagenette.py | 34 -
lit_nlp/examples/datasets/open_images.py | 32 -
lit_nlp/examples/image_demo.py | 100 -
lit_nlp/examples/models/imagenet_labels.py | 2019 --------------------
lit_nlp/examples/models/mobilenet.py | 78 -
lit_nlp/examples/models/mobilenet_test.py | 100 -
website/sphinx_src/components.md | 11 +-
website/sphinx_src/demos.md | 10 -
website/sphinx_src/faq.md | 8 +-
10 files changed, 6 insertions(+), 2401 deletions(-)
delete mode 100644 lit_nlp/examples/datasets/imagenette.py
delete mode 100644 lit_nlp/examples/datasets/open_images.py
delete mode 100644 lit_nlp/examples/image_demo.py
delete mode 100644 lit_nlp/examples/models/imagenet_labels.py
delete mode 100644 lit_nlp/examples/models/mobilenet.py
delete mode 100644 lit_nlp/examples/models/mobilenet_test.py
diff --git a/lit_nlp/examples/blank_slate_demo.py b/lit_nlp/examples/blank_slate_demo.py
index 9d235dcb..6fbefd40 100644
--- a/lit_nlp/examples/blank_slate_demo.py
+++ b/lit_nlp/examples/blank_slate_demo.py
@@ -12,7 +12,6 @@
left-to-right language model, with the Stanford Sentiment Treebank dataset,
the IMDB reviews dataset, Billion Word Benchmark (lm1b) dataset and the option
to load sentences from a flat text file.
-- MobileNet model, with the Imagenette TFDS dataset.
To run:
python -m lit_nlp.examples.blank_slate_demo --port=5432
@@ -32,12 +31,10 @@
from lit_nlp import server_flags
from lit_nlp.examples.datasets import classification
from lit_nlp.examples.datasets import glue
-from lit_nlp.examples.datasets import imagenette
from lit_nlp.examples.datasets import lm
from lit_nlp.examples.datasets import mt
from lit_nlp.examples.datasets import summarization
from lit_nlp.examples.models import glue_models
-from lit_nlp.examples.models import mobilenet
from lit_nlp.examples.models import pretrained_lms
from lit_nlp.examples.models import t5
from lit_nlp.examples.penguin import data as penguin_data
@@ -111,12 +108,6 @@ def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
pretrained_lms.GPT2LanguageModel.init_spec(),
)
- # image model loaders.
- model_loaders["image"] = (
- mobilenet.MobileNet,
- mobilenet.MobileNet.init_spec(),
- )
-
datasets = {}
dataset_loaders: lit_app.DatasetLoadersMap = {}
@@ -156,12 +147,6 @@ def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
lm.BillionWordBenchmark.init_spec(),
)
- # image demo dataset loaders.
- dataset_loaders["image"] = (
- imagenette.ImagenetteDataset,
- imagenette.ImagenetteDataset.init_spec(),
- )
-
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(
models,
diff --git a/lit_nlp/examples/datasets/imagenette.py b/lit_nlp/examples/datasets/imagenette.py
deleted file mode 100644
index 7630709f..00000000
--- a/lit_nlp/examples/datasets/imagenette.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""Imagenette dataset from tfds."""
-
-from lit_nlp.api import dataset
-from lit_nlp.api import types as lit_types
-from lit_nlp.lib import image_utils
-from PIL import Image as PILImage
-import tensorflow_datasets as tfds
-
-
-class ImagenetteDataset(dataset.Dataset):
- """Imagenette TFDS dataset.
-
- See https://www.tensorflow.org/datasets/catalog/imagenette for details.
- Images are at low quality and by default load 100 examples from the validation
- set though this can be changed through the `split` parameter, as per TFDS
- documentation.
- """
-
- def __init__(self, split: str = 'validation[:100]'):
- tfds_examples = tfds.as_numpy(
- tfds.load('imagenette/160px', split=split, download=True,
- try_gcs=True))
- def convert_input(inp):
- pil_image = PILImage.fromarray(inp['image'])
- image_str = image_utils.convert_pil_to_image_str(pil_image)
- return {'image': image_str}
- self._examples = [convert_input(inp) for inp in tfds_examples]
-
- def spec(self) -> lit_types.Spec:
- return {
- 'image': lit_types.ImageBytes(),
- }
-
-
diff --git a/lit_nlp/examples/datasets/open_images.py b/lit_nlp/examples/datasets/open_images.py
deleted file mode 100644
index 71e0779a..00000000
--- a/lit_nlp/examples/datasets/open_images.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""Open Images dataset from tfds."""
-
-from lit_nlp.api import dataset
-from lit_nlp.api import types as lit_types
-from lit_nlp.lib import image_utils
-from PIL import Image as PILImage
-import tensorflow_datasets as tfds
-
-
-class OpenImagesDataset(dataset.Dataset):
- """OpenImages TFDS dataset.
-
- See https://www.tensorflow.org/datasets/catalog/open_images_v4 for details.
- Images are at 72 JPEG quality and by default load 100 examples from the test
- set though this can be changed through the `split` parameter, as per TFDS
- documentation.
- """
-
- def __init__(self, split: str = 'test[:100]'):
- tfds_examples = tfds.as_numpy(
- tfds.load('open_images_v4/200k', split=split, download=True,
- try_gcs=True))
- def convert_input(inp):
- pil_image = PILImage.fromarray(inp['image'])
- image_str = image_utils.convert_pil_to_image_str(pil_image)
- return {'image': image_str}
- self._examples = [convert_input(inp) for inp in tfds_examples]
-
- def spec(self) -> lit_types.Spec:
- return {
- 'image': lit_types.ImageBytes(),
- }
diff --git a/lit_nlp/examples/image_demo.py b/lit_nlp/examples/image_demo.py
deleted file mode 100644
index 5d9d25a4..00000000
--- a/lit_nlp/examples/image_demo.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""LIT demo for image model.
-
-To run:
- python -m lit_nlp.examples.image_demo --port=5432
-
-Then navigate to localhost:5432 to access the demo UI.
-"""
-
-from collections.abc import Sequence
-import sys
-from typing import Optional
-
-from absl import app
-from absl import flags
-from absl import logging
-from lit_nlp import dev_server
-from lit_nlp import server_flags
-from lit_nlp.api import layout
-from lit_nlp.components import classification_results
-from lit_nlp.components import image_gradient_maps
-from lit_nlp.examples.datasets import imagenette
-from lit_nlp.examples.models import mobilenet
-
-
-FLAGS = flags.FLAGS
-
-FLAGS.set_default('development_demo', True)
-FLAGS.set_default('warm_start', 1)
-FLAGS.set_default('page_title', 'LIT Image Demo')
-
-_MAX_EXAMPLES = flags.DEFINE_integer(
- 'max_examples',
- None,
- (
- 'Maximum number of examples to load into LIT. '
- 'Set --max_examples=200 for a quick start.'
- ),
-)
-
-
-def get_wsgi_app():
- """Returns a LitApp instance for consumption by gunicorn."""
- FLAGS.set_default('server_type', 'external')
- FLAGS.set_default('demo_mode', True)
- # Parse flags without calling app.run(main), to avoid conflict with
- # gunicorn command line flags.
- unused = flags.FLAGS(sys.argv, known_only=True)
- if unused:
- logging.info('image_demo:get_wsgi_app() called with unused args: %s',
- unused)
- return main([])
-
-# Custom frontend layout; see api/layout.py
-modules = layout.LitModuleName
-DEMO_LAYOUT = layout.LitCanonicalLayout(
- upper={
- 'Main': [modules.DataTableModule, modules.DatapointEditorModule],
- },
- lower={
- 'Predictions': [modules.ClassificationModule, modules.ScalarModule],
- 'Explanations': [
- modules.ClassificationModule,
- modules.SalienceMapModule,
- ],
- },
- description='Basic layout for image demo',
-)
-
-CUSTOM_LAYOUTS = layout.DEFAULT_LAYOUTS | {'default': DEMO_LAYOUT}
-
-
-def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
- if len(argv) > 1:
- raise app.UsageError('Too many command-line arguments.')
-
- datasets = {'imagenette': imagenette.ImagenetteDataset()}
- # Truncate datasets if --max_examples is set.
- if _MAX_EXAMPLES.value is not None:
- for name in datasets:
- logging.info("Dataset: '%s' with %d examples", name, len(datasets[name]))
- datasets[name] = datasets[name].slice[: _MAX_EXAMPLES.value]
- logging.info(' truncated to %d examples', len(datasets[name]))
-
- models = {'mobilenet': mobilenet.MobileNet()}
- interpreters = {
- 'classification': classification_results.ClassificationInterpreter(),
- } | image_gradient_maps.all_interpreters()
-
- lit_demo = dev_server.Server(
- models,
- datasets,
- interpreters=interpreters,
- generators={},
- layouts={'default': DEMO_LAYOUT},
- **server_flags.get_flags())
- return lit_demo.serve()
-
-
-if __name__ == '__main__':
- app.run(main)
diff --git a/lit_nlp/examples/models/imagenet_labels.py b/lit_nlp/examples/models/imagenet_labels.py
deleted file mode 100644
index 14db6708..00000000
--- a/lit_nlp/examples/models/imagenet_labels.py
+++ /dev/null
@@ -1,2019 +0,0 @@
-"""Imagenet labels."""
-# pylint: disable=line-too-long
-IMAGENET_2012_LABELS = {
- 0:
- 'tench, Tinca tinca',
- 1:
- 'goldfish, Carassius auratus',
- 2:
- 'great white shark, white shark, man-eater, man-eating shark, '
- 'Carcharodon carcharias',
- 3:
- 'tiger shark, Galeocerdo cuvieri',
- 4:
- 'hammerhead, hammerhead shark',
- 5:
- 'electric ray, crampfish, numbfish, torpedo',
- 6:
- 'stingray',
- 7:
- 'cock',
- 8:
- 'hen',
- 9:
- 'ostrich, Struthio camelus',
- 10:
- 'brambling, Fringilla montifringilla',
- 11:
- 'goldfinch, Carduelis carduelis',
- 12:
- 'house finch, linnet, Carpodacus mexicanus',
- 13:
- 'junco, snowbird',
- 14:
- 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
- 15:
- 'robin, American robin, Turdus migratorius',
- 16:
- 'bulbul',
- 17:
- 'jay',
- 18:
- 'magpie',
- 19:
- 'chickadee',
- 20:
- 'water ouzel, dipper',
- 21:
- 'kite',
- 22:
- 'bald eagle, American eagle, Haliaeetus leucocephalus',
- 23:
- 'vulture',
- 24:
- 'great grey owl, great gray owl, Strix nebulosa',
- 25:
- 'European fire salamander, Salamandra salamandra',
- 26:
- 'common newt, Triturus vulgaris',
- 27:
- 'eft',
- 28:
- 'spotted salamander, Ambystoma maculatum',
- 29:
- 'axolotl, mud puppy, Ambystoma mexicanum',
- 30:
- 'bullfrog, Rana catesbeiana',
- 31:
- 'tree frog, tree-frog',
- 32:
- 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
- 33:
- 'loggerhead, loggerhead turtle, Caretta caretta',
- 34:
- 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
- 35:
- 'mud turtle',
- 36:
- 'terrapin',
- 37:
- 'box turtle, box tortoise',
- 38:
- 'banded gecko',
- 39:
- 'common iguana, iguana, Iguana iguana',
- 40:
- 'American chameleon, anole, Anolis carolinensis',
- 41:
- 'whiptail, whiptail lizard',
- 42:
- 'agama',
- 43:
- 'frilled lizard, Chlamydosaurus kingi',
- 44:
- 'alligator lizard',
- 45:
- 'Gila monster, Heloderma suspectum',
- 46:
- 'green lizard, Lacerta viridis',
- 47:
- 'African chameleon, Chamaeleo chamaeleon',
- 48:
- 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus '
- 'komodoensis',
- 49:
- 'African crocodile, Nile crocodile, Crocodylus niloticus',
- 50:
- 'American alligator, Alligator mississipiensis',
- 51:
- 'triceratops',
- 52:
- 'thunder snake, worm snake, Carphophis amoenus',
- 53:
- 'ringneck snake, ring-necked snake, ring snake',
- 54:
- 'hognose snake, puff adder, sand viper',
- 55:
- 'green snake, grass snake',
- 56:
- 'king snake, kingsnake',
- 57:
- 'garter snake, grass snake',
- 58:
- 'water snake',
- 59:
- 'vine snake',
- 60:
- 'night snake, Hypsiglena torquata',
- 61:
- 'boa constrictor, Constrictor constrictor',
- 62:
- 'rock python, rock snake, Python sebae',
- 63:
- 'Indian cobra, Naja naja',
- 64:
- 'green mamba',
- 65:
- 'sea snake',
- 66:
- 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
- 67:
- 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
- 68:
- 'sidewinder, horned rattlesnake, Crotalus cerastes',
- 69:
- 'trilobite',
- 70:
- 'harvestman, daddy longlegs, Phalangium opilio',
- 71:
- 'scorpion',
- 72:
- 'black and gold garden spider, Argiope aurantia',
- 73:
- 'barn spider, Araneus cavaticus',
- 74:
- 'garden spider, Aranea diademata',
- 75:
- 'black widow, Latrodectus mactans',
- 76:
- 'tarantula',
- 77:
- 'wolf spider, hunting spider',
- 78:
- 'tick',
- 79:
- 'centipede',
- 80:
- 'black grouse',
- 81:
- 'ptarmigan',
- 82:
- 'ruffed grouse, partridge, Bonasa umbellus',
- 83:
- 'prairie chicken, prairie grouse, prairie fowl',
- 84:
- 'peacock',
- 85:
- 'quail',
- 86:
- 'partridge',
- 87:
- 'African grey, African gray, Psittacus erithacus',
- 88:
- 'macaw',
- 89:
- 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
- 90:
- 'lorikeet',
- 91:
- 'coucal',
- 92:
- 'bee eater',
- 93:
- 'hornbill',
- 94:
- 'hummingbird',
- 95:
- 'jacamar',
- 96:
- 'toucan',
- 97:
- 'drake',
- 98:
- 'red-breasted merganser, Mergus serrator', # NOTYPO
- 99:
- 'goose',
- 100:
- 'black swan, Cygnus atratus',
- 101:
- 'tusker',
- 102:
- 'echidna, spiny anteater, anteater',
- 103:
- 'platypus, duckbill, duckbilled platypus, duck-billed platypus, '
- 'Ornithorhynchus anatinus',
- 104:
- 'wallaby, brush kangaroo',
- 105:
- 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
- 106:
- 'wombat',
- 107:
- 'jellyfish',
- 108:
- 'sea anemone, anemone',
- 109:
- 'brain coral',
- 110:
- 'flatworm, platyhelminth',
- 111:
- 'nematode, nematode worm, roundworm',
- 112:
- 'conch',
- 113:
- 'snail',
- 114:
- 'slug',
- 115:
- 'sea slug, nudibranch',
- 116:
- 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
- 117:
- 'chambered nautilus, pearly nautilus, nautilus',
- 118:
- 'Dungeness crab, Cancer magister',
- 119:
- 'rock crab, Cancer irroratus',
- 120:
- 'fiddler crab',
- 121:
- 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, '
- 'Paralithodes camtschatica',
- 122:
- 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
- 123:
- 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
- 124:
- 'crayfish, crawfish, crawdad, crawdaddy',
- 125:
- 'hermit crab',
- 126:
- 'isopod',
- 127:
- 'white stork, Ciconia ciconia',
- 128:
- 'black stork, Ciconia nigra',
- 129:
- 'spoonbill',
- 130:
- 'flamingo',
- 131:
- 'little blue heron, Egretta caerulea',
- 132:
- 'American egret, great white heron, Egretta albus',
- 133:
- 'bittern',
- 134:
- 'crane',
- 135:
- 'limpkin, Aramus pictus',
- 136:
- 'European gallinule, Porphyrio porphyrio',
- 137:
- 'American coot, marsh hen, mud hen, water hen, Fulica americana',
- 138:
- 'bustard',
- 139:
- 'ruddy turnstone, Arenaria interpres',
- 140:
- 'red-backed sandpiper, dunlin, Erolia alpina',
- 141:
- 'redshank, Tringa totanus',
- 142:
- 'dowitcher',
- 143:
- 'oystercatcher, oyster catcher',
- 144:
- 'pelican',
- 145:
- 'king penguin, Aptenodytes patagonica',
- 146:
- 'albatross, mollymawk',
- 147:
- 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, '
- 'Eschrichtius robustus',
- 148:
- 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
- 149:
- 'dugong, Dugong dugon',
- 150:
- 'sea lion',
- 151:
- 'Chihuahua',
- 152:
- 'Japanese spaniel',
- 153:
- 'Maltese dog, Maltese terrier, Maltese',
- 154:
- 'Pekinese, Pekingese, Peke',
- 155:
- 'Shih-Tzu',
- 156:
- 'Blenheim spaniel',
- 157:
- 'papillon',
- 158:
- 'toy terrier',
- 159:
- 'Rhodesian ridgeback',
- 160:
- 'Afghan hound, Afghan',
- 161:
- 'basset, basset hound',
- 162:
- 'beagle',
- 163:
- 'bloodhound, sleuthhound',
- 164:
- 'bluetick',
- 165:
- 'black-and-tan coonhound',
- 166:
- 'Walker hound, Walker foxhound',
- 167:
- 'English foxhound',
- 168:
- 'redbone',
- 169:
- 'borzoi, Russian wolfhound',
- 170:
- 'Irish wolfhound',
- 171:
- 'Italian greyhound',
- 172:
- 'whippet',
- 173:
- 'Ibizan hound, Ibizan Podenco',
- 174:
- 'Norwegian elkhound, elkhound',
- 175:
- 'otterhound, otter hound',
- 176:
- 'Saluki, gazelle hound',
- 177:
- 'Scottish deerhound, deerhound',
- 178:
- 'Weimaraner',
- 179:
- 'Staffordshire bullterrier, Staffordshire bull terrier',
- 180:
- 'American Staffordshire terrier, Staffordshire terrier, American pit '
- 'bull terrier, pit bull terrier',
- 181:
- 'Bedlington terrier',
- 182:
- 'Border terrier',
- 183:
- 'Kerry blue terrier',
- 184:
- 'Irish terrier',
- 185:
- 'Norfolk terrier',
- 186:
- 'Norwich terrier',
- 187:
- 'Yorkshire terrier',
- 188:
- 'wire-haired fox terrier',
- 189:
- 'Lakeland terrier',
- 190:
- 'Sealyham terrier, Sealyham',
- 191:
- 'Airedale, Airedale terrier',
- 192:
- 'cairn, cairn terrier',
- 193:
- 'Australian terrier',
- 194:
- 'Dandie Dinmont, Dandie Dinmont terrier',
- 195:
- 'Boston bull, Boston terrier',
- 196:
- 'miniature schnauzer',
- 197:
- 'giant schnauzer',
- 198:
- 'standard schnauzer',
- 199:
- 'Scotch terrier, Scottish terrier, Scottie',
- 200:
- 'Tibetan terrier, chrysanthemum dog',
- 201:
- 'silky terrier, Sydney silky',
- 202:
- 'soft-coated wheaten terrier',
- 203:
- 'West Highland white terrier',
- 204:
- 'Lhasa, Lhasa apso',
- 205:
- 'flat-coated retriever',
- 206:
- 'curly-coated retriever',
- 207:
- 'golden retriever',
- 208:
- 'Labrador retriever',
- 209:
- 'Chesapeake Bay retriever',
- 210:
- 'German short-haired pointer',
- 211:
- 'vizsla, Hungarian pointer',
- 212:
- 'English setter',
- 213:
- 'Irish setter, red setter',
- 214:
- 'Gordon setter',
- 215:
- 'Brittany spaniel',
- 216:
- 'clumber, clumber spaniel',
- 217:
- 'English springer, English springer spaniel',
- 218:
- 'Welsh springer spaniel',
- 219:
- 'cocker spaniel, English cocker spaniel, cocker',
- 220:
- 'Sussex spaniel',
- 221:
- 'Irish water spaniel',
- 222:
- 'kuvasz',
- 223:
- 'schipperke',
- 224:
- 'groenendael',
- 225:
- 'malinois',
- 226:
- 'briard',
- 227:
- 'kelpie',
- 228:
- 'komondor',
- 229:
- 'Old English sheepdog, bobtail',
- 230:
- 'Shetland sheepdog, Shetland sheep dog, Shetland',
- 231:
- 'collie',
- 232:
- 'Border collie',
- 233:
- 'Bouvier des Flandres, Bouviers des Flandres',
- 234:
- 'Rottweiler',
- 235:
- 'German shepherd, German shepherd dog, German police dog, alsatian',
- 236:
- 'Doberman, Doberman pinscher',
- 237:
- 'miniature pinscher',
- 238:
- 'Greater Swiss Mountain dog',
- 239:
- 'Bernese mountain dog',
- 240:
- 'Appenzeller',
- 241:
- 'EntleBucher',
- 242:
- 'boxer',
- 243:
- 'bull mastiff',
- 244:
- 'Tibetan mastiff',
- 245:
- 'French bulldog',
- 246:
- 'Great Dane',
- 247:
- 'Saint Bernard, St Bernard',
- 248:
- 'Eskimo dog, husky',
- 249:
- 'malamute, malemute, Alaskan malamute',
- 250:
- 'Siberian husky',
- 251:
- 'dalmatian, coach dog, carriage dog',
- 252:
- 'affenpinscher, monkey pinscher, monkey dog',
- 253:
- 'basenji',
- 254:
- 'pug, pug-dog',
- 255:
- 'Leonberg',
- 256:
- 'Newfoundland, Newfoundland dog',
- 257:
- 'Great Pyrenees',
- 258:
- 'Samoyed, Samoyede',
- 259:
- 'Pomeranian',
- 260:
- 'chow, chow chow',
- 261:
- 'keeshond',
- 262:
- 'Brabancon griffon',
- 263:
- 'Pembroke, Pembroke Welsh corgi',
- 264:
- 'Cardigan, Cardigan Welsh corgi',
- 265:
- 'toy poodle',
- 266:
- 'miniature poodle',
- 267:
- 'standard poodle',
- 268:
- 'Mexican hairless',
- 269:
- 'timber wolf, grey wolf, gray wolf, Canis lupus',
- 270:
- 'white wolf, Arctic wolf, Canis lupus tundrarum',
- 271:
- 'red wolf, maned wolf, Canis rufus, Canis niger',
- 272:
- 'coyote, prairie wolf, brush wolf, Canis latrans',
- 273:
- 'dingo, warrigal, warragal, Canis dingo',
- 274:
- 'dhole, Cuon alpinus',
- 275:
- 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
- 276:
- 'hyena, hyaena',
- 277:
- 'red fox, Vulpes vulpes',
- 278:
- 'kit fox, Vulpes macrotis',
- 279:
- 'Arctic fox, white fox, Alopex lagopus',
- 280:
- 'grey fox, gray fox, Urocyon cinereoargenteus',
- 281:
- 'tabby, tabby cat',
- 282:
- 'tiger cat',
- 283:
- 'Persian cat',
- 284:
- 'Siamese cat, Siamese',
- 285:
- 'Egyptian cat',
- 286:
- 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
- 287:
- 'lynx, catamount',
- 288:
- 'leopard, Panthera pardus',
- 289:
- 'snow leopard, ounce, Panthera uncia',
- 290:
- 'jaguar, panther, Panthera onca, Felis onca',
- 291:
- 'lion, king of beasts, Panthera leo',
- 292:
- 'tiger, Panthera tigris',
- 293:
- 'cheetah, chetah, Acinonyx jubatus',
- 294:
- 'brown bear, bruin, Ursus arctos',
- 295:
- 'American black bear, black bear, Ursus americanus, Euarctos americanus',
- 296:
- 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
- 297:
- 'sloth bear, Melursus ursinus, Ursus ursinus',
- 298:
- 'mongoose',
- 299:
- 'meerkat, mierkat',
- 300:
- 'tiger beetle',
- 301:
- 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
- 302:
- 'ground beetle, carabid beetle',
- 303:
- 'long-horned beetle, longicorn, longicorn beetle',
- 304:
- 'leaf beetle, chrysomelid',
- 305:
- 'dung beetle',
- 306:
- 'rhinoceros beetle',
- 307:
- 'weevil',
- 308:
- 'fly',
- 309:
- 'bee',
- 310:
- 'ant, emmet, pismire',
- 311:
- 'grasshopper, hopper',
- 312:
- 'cricket',
- 313:
- 'walking stick, walkingstick, stick insect',
- 314:
- 'cockroach, roach',
- 315:
- 'mantis, mantid',
- 316:
- 'cicada, cicala',
- 317:
- 'leafhopper',
- 318:
- 'lacewing, lacewing fly',
- 319:
- "dragonfly, darning needle, devil's darning needle, sewing needle, "
- 'snake feeder, snake doctor, mosquito hawk, skeeter hawk',
- 320:
- 'damselfly',
- 321:
- 'admiral',
- 322:
- 'ringlet, ringlet butterfly',
- 323:
- 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
- 324:
- 'cabbage butterfly',
- 325:
- 'sulphur butterfly, sulfur butterfly',
- 326:
- 'lycaenid, lycaenid butterfly',
- 327:
- 'starfish, sea star',
- 328:
- 'sea urchin',
- 329:
- 'sea cucumber, holothurian',
- 330:
- 'wood rabbit, cottontail, cottontail rabbit',
- 331:
- 'hare',
- 332:
- 'Angora, Angora rabbit',
- 333:
- 'hamster',
- 334:
- 'porcupine, hedgehog',
- 335:
- 'fox squirrel, eastern fox squirrel, Sciurus niger',
- 336:
- 'marmot',
- 337:
- 'beaver',
- 338:
- 'guinea pig, Cavia cobaya',
- 339:
- 'sorrel',
- 340:
- 'zebra',
- 341:
- 'hog, pig, grunter, squealer, Sus scrofa',
- 342:
- 'wild boar, boar, Sus scrofa',
- 343:
- 'warthog',
- 344:
- 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
- 345:
- 'ox',
- 346:
- 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
- 347:
- 'bison',
- 348:
- 'ram, tup',
- 349:
- 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky '
- 'Mountain sheep, Ovis canadensis',
- 350:
- 'ibex, Capra ibex',
- 351:
- 'hartebeest',
- 352:
- 'impala, Aepyceros melampus',
- 353:
- 'gazelle',
- 354:
- 'Arabian camel, dromedary, Camelus dromedarius',
- 355:
- 'llama',
- 356:
- 'weasel',
- 357:
- 'mink',
- 358:
- 'polecat, fitch, foulmart, foumart, Mustela putorius',
- 359:
- 'black-footed ferret, ferret, Mustela nigripes',
- 360:
- 'otter',
- 361:
- 'skunk, polecat, wood pussy',
- 362:
- 'badger',
- 363:
- 'armadillo',
- 364:
- 'three-toed sloth, ai, Bradypus tridactylus',
- 365:
- 'orangutan, orang, orangutang, Pongo pygmaeus',
- 366:
- 'gorilla, Gorilla gorilla',
- 367:
- 'chimpanzee, chimp, Pan troglodytes',
- 368:
- 'gibbon, Hylobates lar',
- 369:
- 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
- 370:
- 'guenon, guenon monkey',
- 371:
- 'patas, hussar monkey, Erythrocebus patas',
- 372:
- 'baboon',
- 373:
- 'macaque',
- 374:
- 'langur',
- 375:
- 'colobus, colobus monkey',
- 376:
- 'proboscis monkey, Nasalis larvatus',
- 377:
- 'marmoset',
- 378:
- 'capuchin, ringtail, Cebus capucinus',
- 379:
- 'howler monkey, howler',
- 380:
- 'titi, titi monkey',
- 381:
- 'spider monkey, Ateles geoffroyi',
- 382:
- 'squirrel monkey, Saimiri sciureus',
- 383:
- 'Madagascar cat, ring-tailed lemur, Lemur catta',
- 384:
- 'indri, indris, Indri indri, Indri brevicaudatus',
- 385:
- 'Indian elephant, Elephas maximus',
- 386:
- 'African elephant, Loxodonta africana',
- 387:
- 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
- 388:
- 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
- 389:
- 'barracouta, snoek',
- 390:
- 'eel',
- 391:
- 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
- 392:
- 'rock beauty, Holocanthus tricolor',
- 393:
- 'anemone fish',
- 394:
- 'sturgeon',
- 395:
- 'gar, garfish, garpike, billfish, Lepisosteus osseus',
- 396:
- 'lionfish',
- 397:
- 'puffer, pufferfish, blowfish, globefish',
- 398:
- 'abacus',
- 399:
- 'abaya',
- 400:
- "academic gown, academic robe, judge's robe",
- 401:
- 'accordion, piano accordion, squeeze box',
- 402:
- 'acoustic guitar',
- 403:
- 'aircraft carrier, carrier, flattop, attack aircraft carrier',
- 404:
- 'airliner',
- 405:
- 'airship, dirigible',
- 406:
- 'altar',
- 407:
- 'ambulance',
- 408:
- 'amphibian, amphibious vehicle',
- 409:
- 'analog clock',
- 410:
- 'apiary, bee house',
- 411:
- 'apron',
- 412:
- 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, '
- 'dustbin, trash barrel, trash bin',
- 413:
- 'assault rifle, assault gun',
- 414:
- 'backpack, back pack, knapsack, packsack, rucksack, haversack',
- 415:
- 'bakery, bakeshop, bakehouse',
- 416:
- 'balance beam, beam',
- 417:
- 'balloon',
- 418:
- 'ballpoint, ballpoint pen, ballpen, Biro',
- 419:
- 'Band Aid',
- 420:
- 'banjo',
- 421:
- 'bannister, banister, balustrade, balusters, handrail',
- 422:
- 'barbell',
- 423:
- 'barber chair',
- 424:
- 'barbershop',
- 425:
- 'barn',
- 426:
- 'barometer',
- 427:
- 'barrel, cask',
- 428:
- 'barrow, garden cart, lawn cart, wheelbarrow',
- 429:
- 'baseball',
- 430:
- 'basketball',
- 431:
- 'bassinet',
- 432:
- 'bassoon',
- 433:
- 'bathing cap, swimming cap',
- 434:
- 'bath towel',
- 435:
- 'bathtub, bathing tub, bath, tub',
- 436:
- 'beach wagon, station wagon, wagon, estate car, beach waggon, station '
- 'waggon, waggon',
- 437:
- 'beacon, lighthouse, beacon light, pharos',
- 438:
- 'beaker',
- 439:
- 'bearskin, busby, shako',
- 440:
- 'beer bottle',
- 441:
- 'beer glass',
- 442:
- 'bell cote, bell cot',
- 443:
- 'bib',
- 444:
- 'bicycle-built-for-two, tandem bicycle, tandem',
- 445:
- 'bikini, two-piece',
- 446:
- 'binder, ring-binder',
- 447:
- 'binoculars, field glasses, opera glasses',
- 448:
- 'birdhouse',
- 449:
- 'boathouse',
- 450:
- 'bobsled, bobsleigh, bob',
- 451:
- 'bolo tie, bolo, bola tie, bola',
- 452:
- 'bonnet, poke bonnet',
- 453:
- 'bookcase',
- 454:
- 'bookshop, bookstore, bookstall',
- 455:
- 'bottlecap',
- 456:
- 'bow',
- 457:
- 'bow tie, bow-tie, bowtie',
- 458:
- 'brass, memorial tablet, plaque',
- 459:
- 'brassiere, bra, bandeau',
- 460:
- 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
- 461:
- 'breastplate, aegis, egis',
- 462:
- 'broom',
- 463:
- 'bucket, pail',
- 464:
- 'buckle',
- 465:
- 'bulletproof vest',
- 466:
- 'bullet train, bullet',
- 467:
- 'butcher shop, meat market',
- 468:
- 'cab, hack, taxi, taxicab',
- 469:
- 'caldron, cauldron',
- 470:
- 'candle, taper, wax light',
- 471:
- 'cannon',
- 472:
- 'canoe',
- 473:
- 'can opener, tin opener',
- 474:
- 'cardigan',
- 475:
- 'car mirror',
- 476:
- 'carousel, carrousel, merry-go-round, roundabout, whirligig',
- 477:
- "carpenter's kit, tool kit",
- 478:
- 'carton',
- 479:
- 'car wheel',
- 480:
- 'cash machine, cash dispenser, automated teller machine, automatic '
- 'teller machine, automated teller, automatic teller, ATM',
- 481:
- 'cassette',
- 482:
- 'cassette player',
- 483:
- 'castle',
- 484:
- 'catamaran',
- 485:
- 'CD player',
- 486:
- 'cello, violoncello',
- 487:
- 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
- 488:
- 'chain',
- 489:
- 'chainlink fence',
- 490:
- 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, '
- 'ring armour',
- 491:
- 'chain saw, chainsaw',
- 492:
- 'chest',
- 493:
- 'chiffonier, commode',
- 494:
- 'chime, bell, gong',
- 495:
- 'china cabinet, china closet',
- 496:
- 'Christmas stocking',
- 497:
- 'church, church building',
- 498:
- 'cinema, movie theater, movie theatre, movie house, picture palace',
- 499:
- 'cleaver, meat cleaver, chopper',
- 500:
- 'cliff dwelling',
- 501:
- 'cloak',
- 502:
- 'clog, geta, patten, sabot',
- 503:
- 'cocktail shaker',
- 504:
- 'coffee mug',
- 505:
- 'coffeepot',
- 506:
- 'coil, spiral, volute, whorl, helix',
- 507:
- 'combination lock',
- 508:
- 'computer keyboard, keypad',
- 509:
- 'confectionery, confectionary, candy store',
- 510:
- 'container ship, containership, container vessel',
- 511:
- 'convertible',
- 512:
- 'corkscrew, bottle screw',
- 513:
- 'cornet, horn, trumpet, trump',
- 514:
- 'cowboy boot',
- 515:
- 'cowboy hat, ten-gallon hat',
- 516:
- 'cradle',
- 517:
- 'crane',
- 518:
- 'crash helmet',
- 519:
- 'crate',
- 520:
- 'crib, cot',
- 521:
- 'Crock Pot',
- 522:
- 'croquet ball',
- 523:
- 'crutch',
- 524:
- 'cuirass',
- 525:
- 'dam, dike, dyke',
- 526:
- 'desk',
- 527:
- 'desktop computer',
- 528:
- 'dial telephone, dial phone',
- 529:
- 'diaper, nappy, napkin',
- 530:
- 'digital clock',
- 531:
- 'digital watch',
- 532:
- 'dining table, board',
- 533:
- 'dishrag, dishcloth',
- 534:
- 'dishwasher, dish washer, dishwashing machine',
- 535:
- 'disk brake, disc brake',
- 536:
- 'dock, dockage, docking facility',
- 537:
- 'dogsled, dog sled, dog sleigh',
- 538:
- 'dome',
- 539:
- 'doormat, welcome mat',
- 540:
- 'drilling platform, offshore rig',
- 541:
- 'drum, membranophone, tympan',
- 542:
- 'drumstick',
- 543:
- 'dumbbell',
- 544:
- 'Dutch oven',
- 545:
- 'electric fan, blower',
- 546:
- 'electric guitar',
- 547:
- 'electric locomotive',
- 548:
- 'entertainment center',
- 549:
- 'envelope',
- 550:
- 'espresso maker',
- 551:
- 'face powder',
- 552:
- 'feather boa, boa',
- 553:
- 'file, file cabinet, filing cabinet',
- 554:
- 'fireboat',
- 555:
- 'fire engine, fire truck',
- 556:
- 'fire screen, fireguard',
- 557:
- 'flagpole, flagstaff',
- 558:
- 'flute, transverse flute',
- 559:
- 'folding chair',
- 560:
- 'football helmet',
- 561:
- 'forklift',
- 562:
- 'fountain',
- 563:
- 'fountain pen',
- 564:
- 'four-poster',
- 565:
- 'freight car',
- 566:
- 'French horn, horn',
- 567:
- 'frying pan, frypan, skillet',
- 568:
- 'fur coat',
- 569:
- 'garbage truck, dustcart',
- 570:
- 'gasmask, respirator, gas helmet',
- 571:
- 'gas pump, gasoline pump, petrol pump, island dispenser',
- 572:
- 'goblet',
- 573:
- 'go-kart',
- 574:
- 'golf ball',
- 575:
- 'golfcart, golf cart',
- 576:
- 'gondola',
- 577:
- 'gong, tam-tam',
- 578:
- 'gown',
- 579:
- 'grand piano, grand',
- 580:
- 'greenhouse, nursery, glasshouse',
- 581:
- 'grille, radiator grille',
- 582:
- 'grocery store, grocery, food market, market',
- 583:
- 'guillotine',
- 584:
- 'hair slide',
- 585:
- 'hair spray',
- 586:
- 'half track',
- 587:
- 'hammer',
- 588:
- 'hamper',
- 589:
- 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
- 590:
- 'hand-held computer, hand-held microcomputer',
- 591:
- 'handkerchief, hankie, hanky, hankey',
- 592:
- 'hard disc, hard disk, fixed disk',
- 593:
- 'harmonica, mouth organ, harp, mouth harp',
- 594:
- 'harp',
- 595:
- 'harvester, reaper',
- 596:
- 'hatchet',
- 597:
- 'holster',
- 598:
- 'home theater, home theatre',
- 599:
- 'honeycomb',
- 600:
- 'hook, claw',
- 601:
- 'hoopskirt, crinoline',
- 602:
- 'horizontal bar, high bar',
- 603:
- 'horse cart, horse-cart',
- 604:
- 'hourglass',
- 605:
- 'iPod',
- 606:
- 'iron, smoothing iron',
- 607:
- "jack-o'-lantern",
- 608:
- 'jean, blue jean, denim',
- 609:
- 'jeep, landrover',
- 610:
- 'jersey, T-shirt, tee shirt',
- 611:
- 'jigsaw puzzle',
- 612:
- 'jinrikisha, ricksha, rickshaw',
- 613:
- 'joystick',
- 614:
- 'kimono',
- 615:
- 'knee pad',
- 616:
- 'knot',
- 617:
- 'lab coat, laboratory coat',
- 618:
- 'ladle',
- 619:
- 'lampshade, lamp shade',
- 620:
- 'laptop, laptop computer',
- 621:
- 'lawn mower, mower',
- 622:
- 'lens cap, lens cover',
- 623:
- 'letter opener, paper knife, paperknife',
- 624:
- 'library',
- 625:
- 'lifeboat',
- 626:
- 'lighter, light, igniter, ignitor',
- 627:
- 'limousine, limo',
- 628:
- 'liner, ocean liner',
- 629:
- 'lipstick, lip rouge',
- 630:
- 'Loafer',
- 631:
- 'lotion',
- 632:
- 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
- 633:
- "loupe, jeweler's loupe",
- 634:
- 'lumbermill, sawmill',
- 635:
- 'magnetic compass',
- 636:
- 'mailbag, postbag',
- 637:
- 'mailbox, letter box',
- 638:
- 'maillot',
- 639:
- 'maillot, tank suit',
- 640:
- 'manhole cover',
- 641:
- 'maraca',
- 642:
- 'marimba, xylophone',
- 643:
- 'mask',
- 644:
- 'matchstick',
- 645:
- 'maypole',
- 646:
- 'maze, labyrinth',
- 647:
- 'measuring cup',
- 648:
- 'medicine chest, medicine cabinet',
- 649:
- 'megalith, megalithic structure',
- 650:
- 'microphone, mike',
- 651:
- 'microwave, microwave oven',
- 652:
- 'military uniform',
- 653:
- 'milk can',
- 654:
- 'minibus',
- 655:
- 'miniskirt, mini',
- 656:
- 'minivan',
- 657:
- 'missile',
- 658:
- 'mitten',
- 659:
- 'mixing bowl',
- 660:
- 'mobile home, manufactured home',
- 661:
- 'Model T',
- 662:
- 'modem',
- 663:
- 'monastery',
- 664:
- 'monitor',
- 665:
- 'moped',
- 666:
- 'mortar',
- 667:
- 'mortarboard',
- 668:
- 'mosque',
- 669:
- 'mosquito net',
- 670:
- 'motor scooter, scooter',
- 671:
- 'mountain bike, all-terrain bike, off-roader',
- 672:
- 'mountain tent',
- 673:
- 'mouse, computer mouse',
- 674:
- 'mousetrap',
- 675:
- 'moving van',
- 676:
- 'muzzle',
- 677:
- 'nail',
- 678:
- 'neck brace',
- 679:
- 'necklace',
- 680:
- 'nipple',
- 681:
- 'notebook, notebook computer',
- 682:
- 'obelisk',
- 683:
- 'oboe, hautboy, hautbois',
- 684:
- 'ocarina, sweet potato',
- 685:
- 'odometer, hodometer, mileometer, milometer',
- 686:
- 'oil filter',
- 687:
- 'organ, pipe organ',
- 688:
- 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
- 689:
- 'overskirt',
- 690:
- 'oxcart',
- 691:
- 'oxygen mask',
- 692:
- 'packet',
- 693:
- 'paddle, boat paddle',
- 694:
- 'paddlewheel, paddle wheel',
- 695:
- 'padlock',
- 696:
- 'paintbrush',
- 697:
- "pajama, pyjama, pj's, jammies",
- 698:
- 'palace',
- 699:
- 'panpipe, pandean pipe, syrinx',
- 700:
- 'paper towel',
- 701:
- 'parachute, chute',
- 702:
- 'parallel bars, bars',
- 703:
- 'park bench',
- 704:
- 'parking meter',
- 705:
- 'passenger car, coach, carriage',
- 706:
- 'patio, terrace',
- 707:
- 'pay-phone, pay-station',
- 708:
- 'pedestal, plinth, footstall',
- 709:
- 'pencil box, pencil case',
- 710:
- 'pencil sharpener',
- 711:
- 'perfume, essence',
- 712:
- 'Petri dish',
- 713:
- 'photocopier',
- 714:
- 'pick, plectrum, plectron',
- 715:
- 'pickelhaube',
- 716:
- 'picket fence, paling',
- 717:
- 'pickup, pickup truck',
- 718:
- 'pier',
- 719:
- 'piggy bank, penny bank',
- 720:
- 'pill bottle',
- 721:
- 'pillow',
- 722:
- 'ping-pong ball',
- 723:
- 'pinwheel',
- 724:
- 'pirate, pirate ship',
- 725:
- 'pitcher, ewer',
- 726:
- "plane, carpenter's plane, woodworking plane",
- 727:
- 'planetarium',
- 728:
- 'plastic bag',
- 729:
- 'plate rack',
- 730:
- 'plow, plough',
- 731:
- "plunger, plumber's helper",
- 732:
- 'Polaroid camera, Polaroid Land camera',
- 733:
- 'pole',
- 734:
- 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
- 735:
- 'poncho',
- 736:
- 'pool table, billiard table, snooker table',
- 737:
- 'pop bottle, soda bottle',
- 738:
- 'pot, flowerpot',
- 739:
- "potter's wheel",
- 740:
- 'power drill',
- 741:
- 'prayer rug, prayer mat',
- 742:
- 'printer',
- 743:
- 'prison, prison house',
- 744:
- 'projectile, missile',
- 745:
- 'projector',
- 746:
- 'puck, hockey puck',
- 747:
- 'punching bag, punch bag, punching ball, punchball',
- 748:
- 'purse',
- 749:
- 'quill, quill pen',
- 750:
- 'quilt, comforter, comfort, puff',
- 751:
- 'racer, race car, racing car',
- 752:
- 'racket, racquet',
- 753:
- 'radiator',
- 754:
- 'radio, wireless',
- 755:
- 'radio telescope, radio reflector',
- 756:
- 'rain barrel',
- 757:
- 'recreational vehicle, RV, R.V.',
- 758:
- 'reel',
- 759:
- 'reflex camera',
- 760:
- 'refrigerator, icebox',
- 761:
- 'remote control, remote',
- 762:
- 'restaurant, eating house, eating place, eatery',
- 763:
- 'revolver, six-gun, six-shooter',
- 764:
- 'rifle',
- 765:
- 'rocking chair, rocker',
- 766:
- 'rotisserie',
- 767:
- 'rubber eraser, rubber, pencil eraser',
- 768:
- 'rugby ball',
- 769:
- 'rule, ruler',
- 770:
- 'running shoe',
- 771:
- 'safe',
- 772:
- 'safety pin',
- 773:
- 'saltshaker, salt shaker',
- 774:
- 'sandal',
- 775:
- 'sarong',
- 776:
- 'sax, saxophone',
- 777:
- 'scabbard',
- 778:
- 'scale, weighing machine',
- 779:
- 'school bus',
- 780:
- 'schooner',
- 781:
- 'scoreboard',
- 782:
- 'screen, CRT screen',
- 783:
- 'screw',
- 784:
- 'screwdriver',
- 785:
- 'seat belt, seatbelt',
- 786:
- 'sewing machine',
- 787:
- 'shield, buckler',
- 788:
- 'shoe shop, shoe-shop, shoe store',
- 789:
- 'shoji',
- 790:
- 'shopping basket',
- 791:
- 'shopping cart',
- 792:
- 'shovel',
- 793:
- 'shower cap',
- 794:
- 'shower curtain',
- 795:
- 'ski',
- 796:
- 'ski mask',
- 797:
- 'sleeping bag',
- 798:
- 'slide rule, slipstick',
- 799:
- 'sliding door',
- 800:
- 'slot, one-armed bandit',
- 801:
- 'snorkel',
- 802:
- 'snowmobile',
- 803:
- 'snowplow, snowplough',
- 804:
- 'soap dispenser',
- 805:
- 'soccer ball',
- 806:
- 'sock',
- 807:
- 'solar dish, solar collector, solar furnace',
- 808:
- 'sombrero',
- 809:
- 'soup bowl',
- 810:
- 'space bar',
- 811:
- 'space heater',
- 812:
- 'space shuttle',
- 813:
- 'spatula',
- 814:
- 'speedboat',
- 815:
- "spider web, spider's web",
- 816:
- 'spindle',
- 817:
- 'sports car, sport car',
- 818:
- 'spotlight, spot',
- 819:
- 'stage',
- 820:
- 'steam locomotive',
- 821:
- 'steel arch bridge',
- 822:
- 'steel drum',
- 823:
- 'stethoscope',
- 824:
- 'stole',
- 825:
- 'stone wall',
- 826:
- 'stopwatch, stop watch',
- 827:
- 'stove',
- 828:
- 'strainer',
- 829:
- 'streetcar, tram, tramcar, trolley, trolley car',
- 830:
- 'stretcher',
- 831:
- 'studio couch, day bed',
- 832:
- 'stupa, tope',
- 833:
- 'submarine, pigboat, sub, U-boat',
- 834:
- 'suit, suit of clothes',
- 835:
- 'sundial',
- 836:
- 'sunglass',
- 837:
- 'sunglasses, dark glasses, shades',
- 838:
- 'sunscreen, sunblock, sun blocker',
- 839:
- 'suspension bridge',
- 840:
- 'swab, swob, mop',
- 841:
- 'sweatshirt',
- 842:
- 'swimming trunks, bathing trunks',
- 843:
- 'swing',
- 844:
- 'switch, electric switch, electrical switch',
- 845:
- 'syringe',
- 846:
- 'table lamp',
- 847:
- 'tank, army tank, armored combat vehicle, armoured combat vehicle',
- 848:
- 'tape player',
- 849:
- 'teapot',
- 850:
- 'teddy, teddy bear',
- 851:
- 'television, television system',
- 852:
- 'tennis ball',
- 853:
- 'thatch, thatched roof',
- 854:
- 'theater curtain, theatre curtain',
- 855:
- 'thimble',
- 856:
- 'thresher, thrasher, threshing machine',
- 857:
- 'throne',
- 858:
- 'tile roof',
- 859:
- 'toaster',
- 860:
- 'tobacco shop, tobacconist shop, tobacconist',
- 861:
- 'toilet seat',
- 862:
- 'torch',
- 863:
- 'totem pole',
- 864:
- 'tow truck, tow car, wrecker',
- 865:
- 'toyshop',
- 866:
- 'tractor',
- 867:
- 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry,'
- ' semi',
- 868:
- 'tray',
- 869:
- 'trench coat',
- 870:
- 'tricycle, trike, velocipede',
- 871:
- 'trimaran',
- 872:
- 'tripod',
- 873:
- 'triumphal arch',
- 874:
- 'trolleybus, trolley coach, trackless trolley',
- 875:
- 'trombone',
- 876:
- 'tub, vat',
- 877:
- 'turnstile',
- 878:
- 'typewriter keyboard',
- 879:
- 'umbrella',
- 880:
- 'unicycle, monocycle',
- 881:
- 'upright, upright piano',
- 882:
- 'vacuum, vacuum cleaner',
- 883:
- 'vase',
- 884:
- 'vault',
- 885:
- 'velvet',
- 886:
- 'vending machine',
- 887:
- 'vestment',
- 888:
- 'viaduct',
- 889:
- 'violin, fiddle',
- 890:
- 'volleyball',
- 891:
- 'waffle iron',
- 892:
- 'wall clock',
- 893:
- 'wallet, billfold, notecase, pocketbook',
- 894:
- 'wardrobe, closet, press',
- 895:
- 'warplane, military plane',
- 896:
- 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
- 897:
- 'washer, automatic washer, washing machine',
- 898:
- 'water bottle',
- 899:
- 'water jug',
- 900:
- 'water tower',
- 901:
- 'whiskey jug',
- 902:
- 'whistle',
- 903:
- 'wig',
- 904:
- 'window screen',
- 905:
- 'window shade',
- 906:
- 'Windsor tie',
- 907:
- 'wine bottle',
- 908:
- 'wing',
- 909:
- 'wok',
- 910:
- 'wooden spoon',
- 911:
- 'wool, woolen, woollen',
- 912:
- 'worm fence, snake fence, snake-rail fence, Virginia fence',
- 913:
- 'wreck',
- 914:
- 'yawl',
- 915:
- 'yurt',
- 916:
- 'web site, website, internet site, site',
- 917:
- 'comic book',
- 918:
- 'crossword puzzle, crossword',
- 919:
- 'street sign',
- 920:
- 'traffic light, traffic signal, stoplight',
- 921:
- 'book jacket, dust cover, dust jacket, dust wrapper',
- 922:
- 'menu',
- 923:
- 'plate',
- 924:
- 'guacamole',
- 925:
- 'consomme',
- 926:
- 'hot pot, hotpot',
- 927:
- 'trifle',
- 928:
- 'ice cream, icecream',
- 929:
- 'ice lolly, lolly, lollipop, popsicle',
- 930:
- 'French loaf',
- 931:
- 'bagel, beigel',
- 932:
- 'pretzel',
- 933:
- 'cheeseburger',
- 934:
- 'hotdog, hot dog, red hot',
- 935:
- 'mashed potato',
- 936:
- 'head cabbage',
- 937:
- 'broccoli',
- 938:
- 'cauliflower',
- 939:
- 'zucchini, courgette',
- 940:
- 'spaghetti squash',
- 941:
- 'acorn squash',
- 942:
- 'butternut squash',
- 943:
- 'cucumber, cuke',
- 944:
- 'artichoke, globe artichoke',
- 945:
- 'bell pepper',
- 946:
- 'cardoon',
- 947:
- 'mushroom',
- 948:
- 'Granny Smith',
- 949:
- 'strawberry',
- 950:
- 'orange',
- 951:
- 'lemon',
- 952:
- 'fig',
- 953:
- 'pineapple, ananas',
- 954:
- 'banana',
- 955:
- 'jackfruit, jak, jack',
- 956:
- 'custard apple',
- 957:
- 'pomegranate',
- 958:
- 'hay',
- 959:
- 'carbonara',
- 960:
- 'chocolate sauce, chocolate syrup',
- 961:
- 'dough',
- 962:
- 'meat loaf, meatloaf',
- 963:
- 'pizza, pizza pie',
- 964:
- 'potpie',
- 965:
- 'burrito',
- 966:
- 'red wine',
- 967:
- 'espresso',
- 968:
- 'cup',
- 969:
- 'eggnog',
- 970:
- 'alp',
- 971:
- 'bubble',
- 972:
- 'cliff, drop, drop-off',
- 973:
- 'coral reef',
- 974:
- 'geyser',
- 975:
- 'lakeside, lakeshore',
- 976:
- 'promontory, headland, head, foreland',
- 977:
- 'sandbar, sand bar',
- 978:
- 'seashore, coast, seacoast, sea-coast',
- 979:
- 'valley, vale',
- 980:
- 'volcano',
- 981:
- 'ballplayer, baseball player',
- 982:
- 'groom, bridegroom',
- 983:
- 'scuba diver',
- 984:
- 'rapeseed',
- 985:
- 'daisy',
- 986:
- "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, "
- 'Cypripedium parviflorum',
- 987:
- 'corn',
- 988:
- 'acorn',
- 989:
- 'hip, rose hip, rosehip',
- 990:
- 'buckeye, horse chestnut, conker',
- 991:
- 'coral fungus',
- 992:
- 'agaric',
- 993:
- 'gyromitra',
- 994:
- 'stinkhorn, carrion fungus',
- 995:
- 'earthstar',
- 996:
- 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola '
- 'frondosa',
- 997:
- 'bolete',
- 998:
- 'ear, spike, capitulum',
- 999:
- 'toilet tissue, toilet paper, bathroom tissue'
-}
diff --git a/lit_nlp/examples/models/mobilenet.py b/lit_nlp/examples/models/mobilenet.py
deleted file mode 100644
index a9fc7105..00000000
--- a/lit_nlp/examples/models/mobilenet.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""MobileNet model trained on ImageNet dataset."""
-
-from lit_nlp.api import model
-from lit_nlp.api import types as lit_types
-from lit_nlp.examples.models import imagenet_labels
-from lit_nlp.lib import image_utils
-import numpy as np
-import tensorflow as tf
-
-# Internal shape of the model input (h, w, c).
-IMAGE_SHAPE = (224, 224, 3)
-
-
-class MobileNet(model.BatchedModel):
- """MobileNet model trained on ImageNet dataset."""
-
- def __init__(self, name='mobilenet_v2') -> None:
- # Initialize imagenet labels.
- self.labels = [''] * len(imagenet_labels.IMAGENET_2012_LABELS)
- self.label_to_idx = {}
- for i, l in imagenet_labels.IMAGENET_2012_LABELS.items():
- l = l.split(',', 1)[0]
- self.labels[i] = l
- self.label_to_idx[l] = i
-
- if name == 'mobilenet_v2':
- self.model = tf.keras.applications.mobilenet_v2.MobileNetV2()
- elif name == 'mobilenet':
- self.model = tf.keras.applications.mobilenet.MobileNet()
-
- def predict_minibatch(
- self, input_batch: list[lit_types.JsonDict]
- ) -> list[lit_types.JsonDict]:
- output = []
- for example in input_batch:
- # Convert input to the model acceptable format.
- img_data = example['image']
- if isinstance(img_data, str):
- img_data = image_utils.convert_image_str_to_array(img_data, IMAGE_SHAPE)
- # Get predictions.
- x = img_data[np.newaxis, ...]
- x = tf.convert_to_tensor(x)
- preds = self.model(x).numpy()[0]
- # Determine the gradient target.
- if (grad_target := example.get('label')) is None:
- grad_target_idx = np.argmax(preds)
- else:
- grad_target_idx = self.label_to_idx[grad_target]
- # Calculate gradients.
- with tf.GradientTape() as tape:
- tape.watch(x)
- y = self.model(x)[0, grad_target_idx]
- grads = tape.gradient(y, x).numpy()[0]
- # Add results to the output.
- output.append({
- 'preds': preds,
- 'grads': grads,
- })
-
- return output
-
- def input_spec(self):
- return {
- 'image': lit_types.ImageBytes(),
- # If `grad_target` is not specified then the label with the highest
- # predicted score is used as the gradient target.
- 'label': lit_types.CategoryLabel(vocab=self.labels, required=False),
- }
-
- def output_spec(self):
- return {
- 'preds': lit_types.MulticlassPreds(
- vocab=self.labels, autosort=True, parent='label'
- ),
- 'grads': lit_types.ImageGradients(
- align='image', grad_target_field_key='label'
- ),
- }
diff --git a/lit_nlp/examples/models/mobilenet_test.py b/lit_nlp/examples/models/mobilenet_test.py
deleted file mode 100644
index 6df157cc..00000000
--- a/lit_nlp/examples/models/mobilenet_test.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from absl.testing import absltest
-from absl.testing import parameterized
-from lit_nlp.api import dataset as lit_dataset
-from lit_nlp.api import types
-from lit_nlp.examples.models import mobilenet
-from lit_nlp.lib import image_utils
-import numpy as np
-from PIL import Image as PILImage
-
-
-class MobileNetTest(parameterized.TestCase):
-
- @parameterized.named_parameters(
- dict(
- testcase_name='compatible_spec_model_v2',
- model_name='mobilenet_v2',
- dataset_spec={'image': types.ImageBytes()},
- expected=True,
- ),
- dict(
- testcase_name='empty_spec_model_v2',
- model_name='mobilenet_v2',
- dataset_spec={},
- expected=False,
- ),
- dict(
- testcase_name='no_images_spec_model_v2',
- model_name='mobilenet_v2',
- dataset_spec={'text': types.TextSegment()},
- expected=False,
- ),
- dict(
- testcase_name='wrong_keys_spec_model_v2',
- model_name='mobilenet_v2',
- dataset_spec={'wrong_image_key': types.ImageBytes()},
- expected=False,
- ),
- dict(
- testcase_name='compatible_spec_model_v1',
- model_name='mobilenet',
- dataset_spec={'image': types.ImageBytes()},
- expected=True,
- ),
- dict(
- testcase_name='empty_spec_model_v1',
- model_name='mobilenet',
- dataset_spec={},
- expected=False,
- ),
- dict(
- testcase_name='no_images_spec_model_v1',
- model_name='mobilenet',
- dataset_spec={'text': types.TextSegment()},
- expected=False,
- ),
- dict(
- testcase_name='wrong_keys_spec_model_v1',
- model_name='mobilenet',
- dataset_spec={'wrong_image_key': types.ImageBytes()},
- expected=False,
- ),
- )
- def test_compatibility(
- self, model_name: str, dataset_spec: types.Spec, expected: bool
- ):
- dataset = lit_dataset.Dataset(spec=dataset_spec)
- model = mobilenet.MobileNet(model_name)
- self.assertEqual(model.is_compatible_with_dataset(dataset), expected)
-
- @parameterized.named_parameters(
- dict(
- testcase_name='model_v1',
- model_name='mobilenet',
- ),
- dict(
- testcase_name='model_v2',
- model_name='mobilenet_v2',
- ),
- )
- def test_model(self, model_name: str):
- # Create an input with base64 encoded image.
- input_1 = {
- 'image': np.zeros(shape=(mobilenet.IMAGE_SHAPE), dtype=np.float32)
- }
- # Create an input with image data in Numpy array.
- pil_image = PILImage.new(mode='RGB', size=(300, 200))
- input_2 = {'image': image_utils.convert_pil_to_image_str(pil_image)}
- model = mobilenet.MobileNet(model_name)
- model_out = model.predict([input_1, input_2])
- model_out = list(model_out)
- # Check first output.
- self.assertIn('preds', model_out[0])
- self.assertIn('grads', model_out[0])
- # Check second output.
- self.assertIn('preds', model_out[1])
- self.assertIn('grads', model_out[1])
-
-
-if __name__ == '__main__':
- absltest.main()
diff --git a/website/sphinx_src/components.md b/website/sphinx_src/components.md
index 19c4dde0..3af3a0eb 100644
--- a/website/sphinx_src/components.md
+++ b/website/sphinx_src/components.md
@@ -147,8 +147,6 @@ LIT supports multi-label tasks, when a model can label a single example with
more than one label. Multi-label classification is implemented with the
`SparseMultilabelPreds` and `SparseMultilabel` types.
-* For a basic example on an image labeling task, see
- [examples/image_demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/image_demo.py).
* Models should define a `SparseMultilabelPreds` field in their output spec
with the`vocab=` attribute as the set of class labels, and for each example
should return a list of class score tuples. Each tuple contains two
@@ -246,11 +244,8 @@ NOTE: We may transition images away from encoded strings, moving to individual
pixel color values. We will ensure we don't break existing checked-in code with
such a change.
-* See [examples/datasets/open_images.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/datasets/open_images.py) for a
- dataset containing images, including converting images to base64 encoded
- strings.
-* For a demo of an image classifier, see
- [examples/image_demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/image_demo.py).
+* See [examples/dalle/demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/dalle/demo.py) for a demo on
+ image generation and how LIT handles image bytes.
## Token-based Salience
@@ -488,8 +483,6 @@ using either KerasNLP or Transformers.
* Transformers model wrappers:
[`pretrained_lms.py`](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/models/pretrained_lms.py)
-
-
## Salience Clustering
LIT includes a basic implementation of the salience clustering method from
diff --git a/website/sphinx_src/demos.md b/website/sphinx_src/demos.md
index dfc77b50..c919790d 100644
--- a/website/sphinx_src/demos.md
+++ b/website/sphinx_src/demos.md
@@ -169,13 +169,3 @@ https://pair-code.github.io/lit/tutorials/coref
* Use binary classifier threshold setters to find best thresholds for slices
of examples to achieve specific fairness constraints, such as demographic
parity.
-
-### Image Classification with MobileNet
-
-**Hosted instance:** https://pair-code.github.io/lit/demos/images.html \
-**Code:** [examples/image_demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/image_demo.py)
-
-* Classification on ImageNet labels using a MobileNet model.
-* Showing using of LIT on image data.
-* Explore results of multiple gradient-based image saliency techniques in the
- Salience Maps module.
diff --git a/website/sphinx_src/faq.md b/website/sphinx_src/faq.md
index de6a5b57..9ff5b3d5 100644
--- a/website/sphinx_src/faq.md
+++ b/website/sphinx_src/faq.md
@@ -17,8 +17,8 @@ works with any modern ML framework. For more information, see
In addition to text, LIT has good support for different modalities, including
images and tabular data. For examples, see:
-* [Image demo](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/image_demo.py) -
- image classification, using a Mobilenet model.
+* [DALL·E demo](google3/third_party/py/lit_nlp/examples/dalle/) - image
+ generation, using DALL·E Mini model.
* [Tabular demo](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/penguin_demo.py) -
multi-class classification on tabular (numeric and categorical string) data,
using the
@@ -99,8 +99,8 @@ manually entered in the web UI) are stored in server memory, and if `--data_dir`
is specified, may be cached to disk.
LIT has the ability to create or edit datapoints in the UI and then save them to
-disk. If you do not want the tool to be able to write edited datapoints to
-disk, then pass the `--demo_mode` runtime flag to the LIT server.
+disk. If you do not want the tool to be able to write edited datapoints to disk,
+then pass the `--demo_mode` runtime flag to the LIT server.
### I have proprietary data. Is LIT secure for my team to use?
From c2fb41b4945edb91fac973cf0ddbca48c6257511 Mon Sep 17 00:00:00 2001
From: Fan Ye
Date: Wed, 5 Jun 2024 07:42:32 -0700
Subject: [PATCH 15/50] Remove the toxicity demos from the LIT examples.
PiperOrigin-RevId: 640521996
---
lit_nlp/examples/datasets/classification.py | 32 ---------
lit_nlp/examples/models/glue_models.py | 22 ------
lit_nlp/examples/tools/glue_trainer.py | 5 --
lit_nlp/examples/toxicity_demo.py | 75 ---------------------
website/sphinx_src/components.md | 4 +-
5 files changed, 1 insertion(+), 137 deletions(-)
delete mode 100644 lit_nlp/examples/toxicity_demo.py
diff --git a/lit_nlp/examples/datasets/classification.py b/lit_nlp/examples/datasets/classification.py
index 92a7e916..0a022f1c 100644
--- a/lit_nlp/examples/datasets/classification.py
+++ b/lit_nlp/examples/datasets/classification.py
@@ -187,35 +187,3 @@ def spec(self) -> lit_types.Spec:
"label": lit_types.CategoryLabel(vocab=self.LABELS),
}
-
-class ToxicityData(lit_dataset.Dataset):
- """Jigsaw toxicity dataset; see https://www.tensorflow.org/datasets/catalog/wikipedia_toxicity_subtypes."""
-
- LABELS = ["non-toxic", "toxic"]
-
- def __init__(self, split="test", max_seq_len=500):
- """Dataset constructor, loads the data into memory."""
- raw_examples = load_tfds("wikipedia_toxicity_subtypes", split=split)
- self._examples = [] # populate this with data records
- for record in raw_examples:
- self._examples.append({
- "sentence": record["text"].decode("utf-8"),
- "label": self.LABELS[int(record["toxicity"])],
- "identity_attack": bool(int(record["identity_attack"])),
- "insult": bool(int(record["insult"])),
- "obscene": bool(int(record["obscene"])),
- "severe_toxicity": bool(int(record["severe_toxicity"])),
- "threat": bool(int(record["threat"]))
- })
-
- def spec(self) -> lit_types.Spec:
- """Dataset spec, which should match the model"s input_spec()."""
- return {
- "sentence": lit_types.TextSegment(),
- "label": lit_types.CategoryLabel(vocab=self.LABELS),
- "identity_attack": lit_types.Boolean(),
- "insult": lit_types.Boolean(),
- "obscene": lit_types.Boolean(),
- "severe_toxicity": lit_types.Boolean(),
- "threat": lit_types.Boolean()
- }
diff --git a/lit_nlp/examples/models/glue_models.py b/lit_nlp/examples/models/glue_models.py
index 466f3172..a70cb1f0 100644
--- a/lit_nlp/examples/models/glue_models.py
+++ b/lit_nlp/examples/models/glue_models.py
@@ -648,25 +648,3 @@ def input_spec(self):
ret = super().input_spec()
ret[self.config.label_name] = lit_types.Scalar(min_val=0, max_val=5)
return ret
-
-
-class ToxicityModel(GlueModel):
- """Classification model on Jigsaw Toxicity Dataset."""
-
- def __init__(self, *args, **kw):
- super().__init__(
- *args,
- text_a_name="sentence",
- text_b_name=None,
- labels=["non-toxic", "toxic"],
- null_label_idx=0,
- **kw)
-
- def output_spec(self) -> Spec:
- ret = super().output_spec()
- ret["probas"] = lit_types.MulticlassPreds(
- parent=self.config.label_name,
- vocab=self.config.labels,
- null_idx=self.config.null_label_idx,
- threshold=0.3)
- return ret
diff --git a/lit_nlp/examples/tools/glue_trainer.py b/lit_nlp/examples/tools/glue_trainer.py
index 034668e8..e217f8fb 100644
--- a/lit_nlp/examples/tools/glue_trainer.py
+++ b/lit_nlp/examples/tools/glue_trainer.py
@@ -25,7 +25,6 @@
from absl import flags
from absl import logging
-from lit_nlp.examples.datasets import classification
from lit_nlp.examples.datasets import glue
from lit_nlp.examples.models import glue_models
from lit_nlp.lib import serialize
@@ -130,10 +129,6 @@ def main(argv: Sequence[str]) -> None:
train_data = glue.STSBData("train")
val_data = glue.STSBData("validation")
model = glue_models.STSBModel(_ENCODER_NAME.value)
- elif _TASK.value == "toxicity":
- train_data = classification.ToxicityData("train")
- val_data = classification.ToxicityData("test")
- model = glue_models.ToxicityModel(_ENCODER_NAME.value)
else:
raise ValueError(f"Unrecognized task name: '{_TASK.value:s}'")
diff --git a/lit_nlp/examples/toxicity_demo.py b/lit_nlp/examples/toxicity_demo.py
deleted file mode 100644
index f14a4c4e..00000000
--- a/lit_nlp/examples/toxicity_demo.py
+++ /dev/null
@@ -1,75 +0,0 @@
-r"""LIT Demo for a Toxicity model.
-
-To run locally:
- python -m lit_nlp.examples.toxicity_demo --port=5432
-
-Once you see the ASCII-art LIT logo, navigate to localhost:5432 to access the
-demo UI.
-"""
-
-from collections.abc import Sequence
-import sys
-from typing import Optional
-
-from absl import app
-from absl import flags
-from absl import logging
-
-from lit_nlp import dev_server
-from lit_nlp import server_flags
-from lit_nlp.examples.datasets import classification
-from lit_nlp.examples.models import glue_models
-
-# NOTE: additional flags defined in server_flags.py
-
-FLAGS = flags.FLAGS
-
-FLAGS.set_default("development_demo", True)
-
-_MODEL_PATH = flags.DEFINE_string(
- "model_path",
- "https://storage.googleapis.com/what-if-tool-resources/lit-models/toxicity.tar.gz",
- "Path to saved model (from transformers library).",
-)
-_MAX_EXAMPLES = flags.DEFINE_integer(
- "max_examples", 1000, "Maximum number of examples to load into LIT. ")
-
-
-def get_wsgi_app() -> Optional[dev_server.LitServerType]:
- """Returns a LitApp instance for consumption by gunicorn."""
- FLAGS.set_default("server_type", "external")
- FLAGS.set_default("demo_mode", True)
- # Parse flags without calling app.run(main), to avoid conflict with
- # gunicorn command line flags.
- unused = flags.FLAGS(sys.argv, known_only=True)
- if unused:
- logging.info(
- "toxicity_demo:get_wsgi_app() called with unused args: %s", unused
- )
- return main([])
-
-
-def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
-
- model_path = _MODEL_PATH.value
- logging.info("Working directory: %s", model_path)
-
- # Load our trained model.
- models = {"toxicity": glue_models.ToxicityModel(model_path)}
- datasets = {"toxicity_test": classification.ToxicityData("test")}
-
- # Truncate datasets if --max_examples is set.
- for name in datasets:
- logging.info("Dataset: '%s' with %d examples", name, len(datasets[name]))
- datasets[name] = datasets[name].slice[:_MAX_EXAMPLES.value]
- logging.info(" truncated to %d examples", len(datasets[name]))
-
- # Start the LIT server. See server_flags.py for server options.
- lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
- return lit_demo.serve()
-
-
-if __name__ == "__main__":
- app.run(main)
diff --git a/website/sphinx_src/components.md b/website/sphinx_src/components.md
index 3af3a0eb..c9a959c0 100644
--- a/website/sphinx_src/components.md
+++ b/website/sphinx_src/components.md
@@ -116,9 +116,7 @@ implemented with the `MulticlassPreds` and `CategoryLabel` types.
* A negative class can be designated using the `null_idx` attribute of
`MulticlassPreds` (most commonly, `null_idx=0`), and metrics such as
precision, recall, F1 will be computed for the remaining classes. AUC and
- AUCPR will be computed for binary classification tasks. For an example, see
- the
- [comment toxicity model](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/models/glue_models.py?l=518&rcl=386779180).
+ AUCPR will be computed for binary classification tasks.
* If `null_idx` is set and there is only one other class, the other class
(often, class `1`) is treated as a positive class, and the LIT UI can be
used to change the classification threshold. If `null_idx` is set and there
From dd196e941058a1d4246b3df3a3c37595f9791b18 Mon Sep 17 00:00:00 2001
From: Fan Ye
Date: Wed, 5 Jun 2024 11:49:27 -0700
Subject: [PATCH 16/50] Remove the xnli demos from the LIT examples.
PiperOrigin-RevId: 640597959
---
lit_nlp/examples/datasets/classification.py | 130 --------------------
lit_nlp/examples/xnli_demo.py | 103 ----------------
website/sphinx_src/demos.md | 13 --
website/sphinx_src/faq.md | 3 -
4 files changed, 249 deletions(-)
delete mode 100644 lit_nlp/examples/xnli_demo.py
diff --git a/lit_nlp/examples/datasets/classification.py b/lit_nlp/examples/datasets/classification.py
index 0a022f1c..0d1e1df3 100644
--- a/lit_nlp/examples/datasets/classification.py
+++ b/lit_nlp/examples/datasets/classification.py
@@ -1,10 +1,8 @@
"""Text classification datasets, including single- and two-sentence tasks."""
from typing import Optional
-from absl import logging
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import types as lit_types
-import pandas as pd
import tensorflow_datasets as tfds
@@ -17,134 +15,6 @@ def load_tfds(*args, **kw):
tfds.as_numpy(tfds.load(*args, download=True, try_gcs=True, **kw)))
-class MNLIDataFromTSV(lit_dataset.Dataset):
- """MultiNLI dataset, from TSV.
-
- Compared to the TFDS version, this includes:
- - label2 field for binary labels, with same schema as HANS
- - genre labels, for stratified analysis
-
- The downside is that you need to download the data from
- https://gluebenchmark.com/tasks, and provide a path to the .tsv file.
- """
-
- LABELS3 = ["entailment", "neutral", "contradiction"]
- LABELS2 = ["non-entailment", "entailment"]
-
- def binarize_label(self, label):
- return "entailment" if label == "entailment" else "non-entailment"
-
- def __init__(self, path: str):
- self._examples = self.load_datapoints(path)
-
- def load_datapoints(self, path: str):
- with open(path) as fd:
- df = pd.read_csv(fd, sep="\t")
- # pylint: disable=g-complex-comprehension
- return [{
- "premise": row["sentence1"],
- "hypothesis": row["sentence2"],
- "label": row["gold_label"],
- "label2": self.binarize_label(row["gold_label"]),
- "genre": row["genre"],
- } for _, row in df.iterrows()]
- # pylint: enable=g-complex-comprehension
-
- def load(self, path: str):
- datapoints = self.load_datapoints(path)
- return lit_dataset.Dataset(base=self, examples=datapoints)
-
- def save(self, examples: list[lit_types.IndexedInput], path: str):
- example_data = [ex["data"] for ex in examples]
- df = pd.DataFrame(example_data).rename(columns={
- "premise": "sentence1",
- "hypothesis": "sentence2",
- "label": "gold_label",
- })
- with open(path, "w") as fd:
- df.to_csv(fd, sep="\t")
-
- def spec(self) -> lit_types.Spec:
- """Should match MnliModel's input_spec()."""
- return {
- "premise": lit_types.TextSegment(),
- "hypothesis": lit_types.TextSegment(),
- # 'label' for 3-way NLI labels, 'label2' for binarized.
- "label": lit_types.CategoryLabel(vocab=self.LABELS3),
- "label2": lit_types.CategoryLabel(vocab=self.LABELS2),
- "genre": lit_types.CategoryLabel(),
- }
-
-
-class XNLIData(lit_dataset.Dataset):
- """Cross-lingual NLI; see https://cims.nyu.edu/~sbowman/xnli/."""
-
- LABELS = ["entailment", "neutral", "contradiction"]
-
- def _process_example(self, ex, languages: list[str]):
- # Hypothesis is stored as parallel arrays, so make a map.
- hyp_map = {
- lang.decode("utf-8"): hyp.decode("utf-8") for lang, hyp in zip(
- ex["hypothesis"]["language"], ex["hypothesis"]["translation"])
- }
- for lang in languages:
- if lang not in hyp_map:
- logging.warning("Missing hypothesis (lang=%s) for premise '%s'", lang,
- ex["premise"]["lang"].decode("utf-8"))
- continue
- yield {
- "premise": ex["premise"][lang].decode("utf-8"),
- "hypothesis": hyp_map[lang],
- "label": self.LABELS[ex["label"]],
- "language": lang,
- }
-
- def __init__(self, split: str, languages=("en", "es", "hi", "zh")):
- self._examples = []
- for ex in load_tfds("xnli", split=split):
- # Each TFDS example contains all the translations; we unpack to individual
- # (premise, hypothesis) pairs that are compatible with a standard NLI
- # model.
- self._examples.extend(self._process_example(ex, languages))
-
- def spec(self):
- return {
- "premise": lit_types.TextSegment(),
- "hypothesis": lit_types.TextSegment(),
- "label": lit_types.CategoryLabel(vocab=self.LABELS),
- "language": lit_types.CategoryLabel(),
- }
-
-
-class HansNLIData(lit_dataset.Dataset):
- """HANS NLI challenge set (https://arxiv.org/abs/1902.01007); 30k examples."""
-
- LABELS = ["non-entailment", "entailment"]
-
- def __init__(self, path: str):
- with open(path) as fd:
- df = pd.read_csv(fd, sep="\t", header=0)
- # pylint: disable=g-complex-comprehension
- self._examples = [{
- "premise": row["sentence1"],
- "hypothesis": row["sentence2"],
- "label2": row["gold_label"],
- "heuristic": row["heuristic"],
- "template": row["template"],
- } for _, row in df.iterrows()]
- # pylint: enable=g-complex-comprehension
-
- def spec(self) -> lit_types.Spec:
- return {
- "premise": lit_types.TextSegment(),
- "hypothesis": lit_types.TextSegment(),
- # 'label2' for 2-way NLI labels
- "label2": lit_types.CategoryLabel(vocab=self.LABELS),
- "heuristic": lit_types.CategoryLabel(),
- "template": lit_types.CategoryLabel(),
- }
-
-
class IMDBData(lit_dataset.Dataset):
"""IMDB reviews dataset; see http://ai.stanford.edu/~amaas/data/sentiment/."""
diff --git a/lit_nlp/examples/xnli_demo.py b/lit_nlp/examples/xnli_demo.py
deleted file mode 100644
index 4f209e39..00000000
--- a/lit_nlp/examples/xnli_demo.py
+++ /dev/null
@@ -1,103 +0,0 @@
-r"""Example demo for multilingual NLI on the XNLI eval set.
-
-To run locally with our trained model:
- python -m lit_nlp.examples.xnli_demo --port=5432
-
-Then navigate to localhost:5432 to access the demo UI.
-
-To train a model for this task, use tools/glue_trainer.py or your favorite
-trainer script to fine-tune a multilingual encoder, such as
-bert-base-multilingual-cased, on the mnli task.
-
-Note: the LIT UI can handle around 10k examples comfortably, depending on your
-hardware. The monolingual (english) eval sets for MNLI are about 9.8k each,
-while each language for XNLI is about 2.5k examples, so we recommend using the
---languages flag to load only the languages you're interested in.
-"""
-
-from collections.abc import Sequence
-import sys
-from typing import Optional
-
-from absl import app
-from absl import flags
-from absl import logging
-
-from lit_nlp import dev_server
-from lit_nlp import server_flags
-from lit_nlp.examples.datasets import classification
-from lit_nlp.examples.datasets import glue
-from lit_nlp.examples.models import glue_models
-from lit_nlp.lib import file_cache
-
-# NOTE: additional flags defined in server_flags.py
-
-FLAGS = flags.FLAGS
-
-FLAGS.set_default("development_demo", True)
-
-_LANGUAGES = flags.DEFINE_list(
- "languages", ["en", "es", "hi", "zh"],
- "Languages to load from XNLI. Available languages: "
- "ar,bg,de,el,en,es,fr,hi,ru,sw,th,tr,ur,zh,vi"
-)
-
-_MODEL_PATH = flags.DEFINE_string(
- "model_path",
- "https://storage.googleapis.com/what-if-tool-resources/lit-models/mbert_mnli.tar.gz",
- (
- "Path to fine-tuned model files. Expects model to be in standard "
- "transformers format, e.g. as saved by model.save_pretrained() and "
- "tokenizer.save_pretrained()."
- ),
-)
-
-_MAX_EXAMPLES = flags.DEFINE_integer(
- "max_examples", None, "Maximum number of examples to load into LIT. "
- "Note: MNLI eval set is 10k examples, so will take a while to run and may "
- "be slow on older machines. Set --max_examples=200 for a quick start.")
-
-
-def get_wsgi_app() -> Optional[dev_server.LitServerType]:
- """Returns a LitApp instance for consumption by gunicorn."""
- FLAGS.set_default("server_type", "external")
- FLAGS.set_default("demo_mode", True)
- # Parse flags without calling app.run(main), to avoid conflict with
- # gunicorn command line flags.
- unused = flags.FLAGS(sys.argv, known_only=True)
- if unused:
- logging.info("xnli_demo:get_wsgi_app() called with unused args: %s", unused)
- return main([])
-
-
-def main(argv: Sequence[str]) -> Optional[dev_server.LitServerType]:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
-
- # Normally path is a directory; if it's an archive file, download and
- # extract to the transformers cache.
- model_path = _MODEL_PATH.value
- if model_path.endswith(".tar.gz"):
- model_path = file_cache.cached_path(
- model_path, extract_compressed_file=True)
-
- models = {"nli": glue_models.MNLIModel(model_path, inference_batch_size=16)}
- datasets = {
- "xnli": classification.XNLIData("validation", _LANGUAGES.value),
- "mnli_dev": glue.MNLIData("validation_matched"),
- "mnli_dev_mm": glue.MNLIData("validation_mismatched"),
- }
-
- # Truncate datasets if --max_examples is set.
- for name in datasets:
- logging.info("Dataset: '%s' with %d examples", name, len(datasets[name]))
- datasets[name] = datasets[name].slice[:_MAX_EXAMPLES.value]
- logging.info(" truncated to %d examples", len(datasets[name]))
-
- # Start the LIT server. See server_flags.py for server options.
- lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
- return lit_demo.serve()
-
-
-if __name__ == "__main__":
- app.run(main)
diff --git a/website/sphinx_src/demos.md b/website/sphinx_src/demos.md
index c919790d..336d2b43 100644
--- a/website/sphinx_src/demos.md
+++ b/website/sphinx_src/demos.md
@@ -43,19 +43,6 @@ https://pair-code.github.io/lit/demos/.
Tip: check out a case study for this demo on the public LIT website:
https://pair-code.github.io/lit/tutorials/sentiment
-### Multilingual (XNLI)
-
-**Code:** [examples/xnli_demo.py](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/xnli_demo.py)
-
-* [XNLI](https://cims.nyu.edu/~sbowman/xnli/) dataset translates a subset of
- MultiNLI into 14 different languages.
-* Specify `--languages=en,jp,hi,...` flag to select which languages to load.
-* NLI as a three-way classification task with two-segment input (premise,
- hypothesis).
-* Fine-tuned multilingual BERT model.
-* Salience methods work with non-whitespace-delimited text, by using the
- model's wordpiece tokenization.
-
--------------------------------------------------------------------------------
## Regression / Scoring
diff --git a/website/sphinx_src/faq.md b/website/sphinx_src/faq.md
index 9ff5b3d5..fc70e29c 100644
--- a/website/sphinx_src/faq.md
+++ b/website/sphinx_src/faq.md
@@ -34,9 +34,6 @@ All strings in LIT are unicode and most components use model-provided
tokenization if available, so in most cases non-English languages and non-Latin
scripts should work without any modifications. For examples, see:
-* [XNLI demo](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/xnli_demo.py) -
- cross-lingual NLI, with up to 15 languages supported via a multilingual BERT
- model.
* [T5 demo](https://github.com/PAIR-code/lit/blob/main/lit_nlp/examples/t5_demo.py) -
includes WMT data for machine translation
From 72fd772fa02c7445f27fb517e667987ea8ab34d7 Mon Sep 17 00:00:00 2001
From: Fan Ye
Date: Thu, 6 Jun 2024 07:53:55 -0700
Subject: [PATCH 17/50] Remove the is_eval_demo from the LIT examples.
PiperOrigin-RevId: 640897240
---
lit_nlp/examples/is_eval/datasets.py | 44 ------
lit_nlp/examples/is_eval/is_eval_demo.py | 157 --------------------
lit_nlp/examples/is_eval/is_eval_trainer.py | 138 -----------------
lit_nlp/examples/is_eval/models.py | 48 ------
4 files changed, 387 deletions(-)
delete mode 100644 lit_nlp/examples/is_eval/datasets.py
delete mode 100644 lit_nlp/examples/is_eval/is_eval_demo.py
delete mode 100644 lit_nlp/examples/is_eval/is_eval_trainer.py
delete mode 100644 lit_nlp/examples/is_eval/models.py
diff --git a/lit_nlp/examples/is_eval/datasets.py b/lit_nlp/examples/is_eval/datasets.py
deleted file mode 100644
index 25c3d7ed..00000000
--- a/lit_nlp/examples/is_eval/datasets.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Text classification dataset for binary, single input data."""
-from lit_nlp.api import dataset as lit_dataset
-from lit_nlp.api import types as lit_types
-import pandas as pd
-
-
-class SingleInputClassificationFromTSV(lit_dataset.Dataset):
- """TSV data loader for files having a single input text and a label.
-
- Files must be in TSV format with 2 columns in this order:
- 1. Input text.
- 2. Numeric label.
-
- Exported examples have 2 output keys: "sentence" and "label".
- """
-
- LABELS = ["0", "1"]
-
- def __init__(self, path: str, name: str = ""):
- """Initializes a dataset for the Input Salience Eval demo.
-
- Args:
- path: The path from which examples will be loaded.
- name: Optionally, the name of the dataset. Used by ISEvalModel to
- determine if the model is intended to be compatible with this dataset.
- """
- self._examples = self.load_datapoints(path)
- self.name = name
-
- def load_datapoints(self, path: str):
- with open(path) as fd:
- df = pd.read_csv(fd, sep="\t", header=None, names=["sentence", "label"])
- # pylint: disable=g-complex-comprehension
- return [{
- "sentence": row["sentence"],
- "label": self.LABELS[row["label"]],
- } for _, row in df.iterrows()]
- # pylint: enable=g-complex-comprehension
-
- def spec(self) -> lit_types.Spec:
- return {
- "sentence": lit_types.TextSegment(),
- "label": lit_types.CategoryLabel(vocab=self.LABELS),
- }
diff --git a/lit_nlp/examples/is_eval/is_eval_demo.py b/lit_nlp/examples/is_eval/is_eval_demo.py
deleted file mode 100644
index 0621adef..00000000
--- a/lit_nlp/examples/is_eval/is_eval_demo.py
+++ /dev/null
@@ -1,157 +0,0 @@
-r"""Example demo loading a handful of IS eval models.
-
-To run:
- blaze run -c opt --config=cuda examples/is_eval:is_eval_demo -- \
- --port=5432
-"""
-import sys
-
-from absl import app
-from absl import flags
-from absl import logging
-
-from lit_nlp import dev_server
-from lit_nlp import server_flags
-from lit_nlp.api import layout
-from lit_nlp.examples.is_eval import datasets
-from lit_nlp.examples.is_eval import models as is_eval_models
-from lit_nlp.lib import file_cache
-
-# NOTE: additional flags defined in server_flags.py
-
-FLAGS = flags.FLAGS
-
-FLAGS.set_default("development_demo", True)
-FLAGS.set_default("page_title", "Input Salience Evaluation Demo")
-
-_DOC_STRING = (
- "# Input Salience Evaluation Demo\nThis demo accompanies our "
- "[paper](https://arxiv.org/abs/2211.05485) and "
- "[blogpost](https://ai.googleblog.com/2022/12/will-you-find-these-shortcuts.html)"
- " \"Will you find these shortcuts?\". We manually inserted one out of "
- "three artificial data artifacts (shortcuts) into two datasets (SST2, "
- "Toxicity). In the \"Explanations\" tab you can observe how different "
- "input salience methods put different weights on the nonsense tokens "
- "*zeroa*, *onea*, *synt*.")
-
-_MODELS = flags.DEFINE_list(
- "models",
- [
- "sst2_single_token:https://storage.googleapis.com/what-if-tool-resources/lit-models/sst2_single_token_bert.tar.gz",
- "sst2_token_in_context:https://storage.googleapis.com/what-if-tool-resources/lit-models/sst2_token_in_context_bert.tar.gz",
- "sst2_ordered_pair:https://storage.googleapis.com/what-if-tool-resources/lit-models/sst2_simple_order_bert.tar.gz",
- "toxicity_single_token:https://storage.googleapis.com/what-if-tool-resources/lit-models/toxicity_single_token_bert.tar.gz",
- "toxicity_token_in_context:https://storage.googleapis.com/what-if-tool-resources/lit-models/toxicity_token_in_context_bert.tar.gz",
- "toxicity_ordered_pair:https://storage.googleapis.com/what-if-tool-resources/lit-models/toxicity_simple_order_bert.tar.gz",
- ],
- "List of models to load, as :. "
- "Path should be the output of saving a transformer model, e.g. "
- "model.save_pretrained(path) and tokenizer.save_pretrained(path). Remote "
- ".tar.gz files will be downloaded and cached locally.",
-)
-
-_MAX_EXAMPLES = flags.DEFINE_integer(
- "max_examples", None, "Maximum number of examples to load into LIT. Set "
- "--max_examples=200 for a quick start.")
-
-DATASETS = {
- "sst2_single_token_dev_100_syn": "https://storage.googleapis.com/what-if-tool-resources/lit-data/sst2_single_token-dev.100syn.tsv",
- "sst2_token_in_context_dev_100_syn": "https://storage.googleapis.com/what-if-tool-resources/lit-data/sst2_token_in_context-dev.100syn.tsv",
- "sst2_ordered_pair_dev_100_syn": "https://storage.googleapis.com/what-if-tool-resources/lit-data/sst2_simple_order-dev.100syn.tsv",
- "toxicity_single_token_dev_100_syn": "https://storage.googleapis.com/what-if-tool-resources/lit-data/toxicity_single_token-dev.100syn.tsv",
- "toxicity_token_in_context_dev_100_syn": "https://storage.googleapis.com/what-if-tool-resources/lit-data/toxicity_token_in_context-dev.100syn.tsv",
- "toxicity_ordered_pair_dev_100_syn": "https://storage.googleapis.com/what-if-tool-resources/lit-data/toxicity_simple_order-dev.100syn.tsv",
-}
-
-modules = layout.LitModuleName
-IS_EVAL_LAYOUT = layout.LitCanonicalLayout(
- upper={
- "Main": [
- modules.DocumentationModule,
- modules.EmbeddingsModule,
- modules.DataTableModule,
- modules.DatapointEditorModule,
- ]
- },
- lower={
- "Predictions": [
- modules.ClassificationModule,
- modules.SalienceMapModule,
- modules.ScalarModule,
- ],
- "Salience Clustering": [modules.SalienceClusteringModule],
- "Metrics": [
- modules.MetricsModule,
- modules.ConfusionMatrixModule,
- modules.CurvesModule,
- modules.ThresholderModule,
- ],
- "Counterfactuals": [
- modules.GeneratorModule,
- ],
- },
- description="Custom layout for evaluating input salience methods.")
-CUSTOM_LAYOUTS = layout.DEFAULT_LAYOUTS | {"is_eval": IS_EVAL_LAYOUT}
-# You can change this back via URL param, e.g. localhost:5432/?layout=default
-FLAGS.set_default("default_layout", "is_eval")
-
-
-def get_wsgi_app():
- """Return WSGI app for container-hosted demos."""
- FLAGS.set_default("server_type", "external")
- FLAGS.set_default("demo_mode", True)
- FLAGS.set_default("warm_start", 1.0)
- FLAGS.set_default("max_examples", 1000)
- # Parse flags without calling app.run(main), to avoid conflict with
- # gunicorn command line flags.
- unused = flags.FLAGS(sys.argv, known_only=True)
- if unused:
- logging.info("is_eval_demo:get_wsgi_app() called with unused args: %s",
- unused)
- return main([])
-
-
-def main(_):
- models = {}
- loaded_datasets = {}
-
- for model_string in _MODELS.value:
- # Only split on the first two ':', because path may be a URL
- # containing 'https://'
- name, path = model_string.split(":", 1)
- logging.info("Loading model '%s' from '%s'", name, path)
- # Normally path is a directory; if it's an archive file, download and
- # extract to the transformers cache.
- if path.endswith(".tar.gz"):
- path = file_cache.cached_path(
- path, extract_compressed_file=True)
- # Load the model from disk.
- models[name] = is_eval_models.ISEvalModel(
- name, path, output_attention=False)
-
- logging.info("Loading data for SST-2 task.")
- for data_key, url in DATASETS.items():
- path = file_cache.cached_path(url)
- loaded_datasets[data_key] = datasets.SingleInputClassificationFromTSV(
- path, data_key)
-
- # Truncate datasets if --max_examples is set.
- for name in loaded_datasets:
- logging.info("Dataset: '%s' with %d examples", name,
- len(loaded_datasets[name]))
- loaded_datasets[name] = loaded_datasets[name].shuffle().slice[:_MAX_EXAMPLES
- .value]
- logging.info(" truncated to %d examples", len(loaded_datasets[name]))
-
- # Start the LIT server. See server_flags.py for server options.
- lit_demo = dev_server.Server(
- models,
- loaded_datasets,
- layouts=CUSTOM_LAYOUTS,
- onboard_end_doc=_DOC_STRING,
- **server_flags.get_flags())
- return lit_demo.serve()
-
-
-if __name__ == "__main__":
- app.run(main)
diff --git a/lit_nlp/examples/is_eval/is_eval_trainer.py b/lit_nlp/examples/is_eval/is_eval_trainer.py
deleted file mode 100644
index a7a53da5..00000000
--- a/lit_nlp/examples/is_eval/is_eval_trainer.py
+++ /dev/null
@@ -1,138 +0,0 @@
-r"""Lightweight trainer script to fine-tune a model for IS eval.
-
-Usage:
- python -m lit_nlp.examples.tools.is_eval_trainer \
- --encoder_name=bert-base-uncased \
- --train_path=/path/to/saved/model \
- --train_data_path=/path/to/train/data \
- --dev_data_path=/path/to/dev/data \
-
-This will finetune a BERT model to reproduce findings of the paper ""Will You
-Find These Shortcuts?" A Protocol for Evaluating the Faithfulness of Input
-Salience Methods for Text Classification" [https://arxiv.org/abs/2111.07367].
-
-Please ensure that the model's vocabulary file includes all special shortcut
-tokens. When using the provided datasets of the LIT demo these are:
-"ZEROA", "ZEROB", "ONEA", "ONEB", "onea", "oneb", "zeroa", "zerob", "synt".
-
-This will train a BERT-base model [https://arxiv.org/abs/1810.04805]
-which give validation accuracy in the low 90s on SST-2.
-
-Note: you don't have to use this trainer to use LIT; the classifier
-implementation is just a wrapper around HuggingFace Transformers, using
-AutoTokenizer, AutoConfig, and TFAutoModelForSequenceClassification, and can
-load anything compatible with those classes.
-"""
-
-from collections.abc import Sequence
-import os
-
-from absl import app
-from absl import flags
-from absl import logging
-
-from lit_nlp.examples.is_eval import datasets
-from lit_nlp.examples.models import glue_models
-from lit_nlp.lib import serialize
-import tensorflow as tf
-
-_ENCODER_NAME = flags.DEFINE_string(
- "encoder_name", "bert-base-uncased",
- "Model name or path to pretrained (base) encoder.")
-_TRAIN_DATA_PATH = flags.DEFINE_string("train_data_path", None, "")
-_DEV_DATA_PATH = flags.DEFINE_string("dev_data_path", None, "")
-_TRAIN_PATH = flags.DEFINE_string("train_path", "/tmp/hf_demo",
- "Path to save fine-tuned model.")
-
-_NUM_EPOCHS = flags.DEFINE_integer(
- "num_epochs", 3, "Number of epochs to train for.", lower_bound=1)
-_SAVE_INTERMEDIATES = flags.DEFINE_bool(
- "save_intermediates", False,
- "If true, save intermediate weights after each epoch.")
-
-
-def history_to_dict(keras_history):
- return {
- "epochs": keras_history.epoch,
- "history": keras_history.history,
- "params": keras_history.params,
- "optimizer_params": keras_history.model.optimizer.get_config(),
- }
-
-
-class EpochSaverCallback(tf.keras.callbacks.Callback):
- """Save model at the beginning of training and after every epoch.
-
- Similar to tf.keras.callbacks.ModelCheckpoint, but this allows us to specify
- a custom save fn to call, such as the HuggingFace model.save() which writes
- .h5 files and config information.
- """
-
- def __init__(self, save_path_base: str, save_fn=None):
- super().__init__()
- self.save_path_base = save_path_base
- self.save_fn = save_fn or self.model.save
-
- def on_train_begin(self, logs=None):
- self.on_epoch_end(-1, logs=logs) # write epoch-0
-
- def on_epoch_end(self, epoch, logs=None):
- # Save path 1-indexed = # of completed epochs.
- save_path = os.path.join(self.save_path_base, f"epoch-{epoch+1}")
- self.save_fn(save_path)
-
-
-def train_and_save(model,
- train_data,
- val_data,
- train_path,
- save_intermediates=False,
- **train_kw):
- """Run training and save model."""
- # Set up logging for TensorBoard. To view, run:
- # tensorboard --log_dir=/tensorboard
- keras_callbacks = [
- tf.keras.callbacks.TensorBoard(
- log_dir=os.path.join(train_path, "tensorboard"))
- ]
- if save_intermediates:
- keras_callbacks.append(EpochSaverCallback(train_path, save_fn=model.save))
- history = model.train(
- train_data.examples,
- validation_inputs=val_data.examples,
- keras_callbacks=keras_callbacks,
- **train_kw)
-
- # Save training history too, since this is human-readable and more concise
- # than the TensorBoard log files.
- with open(os.path.join(train_path, "train.history.json"), "w") as fd:
- # Use LIT's custom JSON encoder to handle dicts containing NumPy data.
- fd.write(serialize.to_json(history_to_dict(history), simple=True, indent=2))
-
- model.save(train_path)
- logging.info("Saved model files: \n %s",
- "\n ".join(os.listdir(train_path)))
-
-
-def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
-
- model = glue_models.SST2Model(_ENCODER_NAME.value)
- train_data = datasets.SingleInputClassificationFromTSV(_TRAIN_DATA_PATH.value)
- dev_data = datasets.SingleInputClassificationFromTSV(_DEV_DATA_PATH.value)
-
- train_and_save(
- model,
- train_data,
- dev_data,
- _TRAIN_PATH.value,
- save_intermediates=_SAVE_INTERMEDIATES.value,
- num_epochs=_NUM_EPOCHS.value,
- learning_rate=1e-5,
- batch_size=16,
- )
-
-
-if __name__ == "__main__":
- app.run(main)
diff --git a/lit_nlp/examples/is_eval/models.py b/lit_nlp/examples/is_eval/models.py
deleted file mode 100644
index d6765df7..00000000
--- a/lit_nlp/examples/is_eval/models.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""Custom GLUE Model and ModelSpec for the Input Salience Evaluation demo."""
-from typing import cast
-from lit_nlp.api import dataset as lit_dataset
-from lit_nlp.examples.is_eval import datasets as is_eval_datasets
-from lit_nlp.examples.models import glue_models
-
-
-class ISEvalModel(glue_models.SST2Model):
- """Custom GLUE model for the Input Salience Evaluation demo."""
-
- def __init__(self, model_name: str, *args, **kw):
- """Initializes a custom SST-2 model for the Input Salience Eval demo.
-
- Args:
- model_name: The model's name. Used to determine dataset compatibility.
- *args: Additional positional args to pass to the SST2Model base class.
- **kw: Additional keyword args to pass to the SST2Model base class.
- """
- super().__init__(*args, **kw)
- self._model_name = model_name
-
- def is_compatible_with_dataset(self, dataset: lit_dataset.Dataset) -> bool:
- """Returns true if the model is compatible with the dataset.
-
- The Input Salience Eval demo is somewhat unique in that each model and
- dataset have compatible specs but the intention is to pair them for
- specific tasks.
-
- This class determines compatibility by:
-
- 1. Ensuring that the value of `model_name` is contained in the `default`
- value of the `dataset_name` field in the provided `dataset_spec`.
- 2. Calling super().is_compatible_with_dataset() to check compatibility
- using the base ModelSpec check.
-
- Args:
- dataset: The dataset for which compatibility will be determined.
- """
- if not isinstance(dataset,
- is_eval_datasets.SingleInputClassificationFromTSV):
- return False
-
- eval_dataset = cast(is_eval_datasets.SingleInputClassificationFromTSV,
- dataset)
- if self.model_name in eval_dataset.name:
- return super().is_compatible_with_dataset(dataset)
- else:
- return False
From 71d88fb86eb88ffb80d665cf7571b21d7ae06bd2 Mon Sep 17 00:00:00 2001
From: Fan Ye
Date: Thu, 6 Jun 2024 11:23:01 -0700
Subject: [PATCH 18/50] Remove the coref demo from the LIT examples.
PiperOrigin-RevId: 640962672
---
docs/assets/images/lit-coref-compare.png | Bin 80342 -> 0 bytes
docs/assets/images/lit-coref-data.png | Bin 65860 -> 0 bytes
docs/assets/images/lit-coref-metric-top.png | Bin 39022 -> 0 bytes
docs/assets/images/lit-coref-metrics.png | Bin 124661 -> 0 bytes
docs/assets/images/lit-coref-pred.png | Bin 44707 -> 0 bytes
docs/assets/images/lit-coref-select.png | Bin 103595 -> 0 bytes
lit_nlp/examples/coref/__init__.py | 13 -
lit_nlp/examples/coref/coref_demo.py | 226 ---------------
lit_nlp/examples/coref/datasets/__init__.py | 13 -
lit_nlp/examples/coref/datasets/ontonotes.py | 41 ---
lit_nlp/examples/coref/datasets/winogender.py | 200 -------------
lit_nlp/examples/coref/edge_predictor.py | 273 ------------------
lit_nlp/examples/coref/encoders.py | 96 ------
lit_nlp/examples/coref/model.py | 138 ---------
lit_nlp/examples/coref/retokenize.py | 40 ---
website/sphinx_src/api.md | 2 +-
website/sphinx_src/demos.md | 18 --
website/sphinx_src/docker.md | 1 -
website/sphinx_src/faq.md | 5 +-
.../src/assets/images/lit-coref-compare.png | Bin 80342 -> 0 bytes
website/src/assets/images/lit-coref-data.png | Bin 65860 -> 0 bytes
.../assets/images/lit-coref-metric-top.png | Bin 39022 -> 0 bytes
.../src/assets/images/lit-coref-metrics.png | Bin 124661 -> 0 bytes
website/src/assets/images/lit-coref-pred.png | Bin 44707 -> 0 bytes
.../src/assets/images/lit-coref-select.png | Bin 103595 -> 0 bytes
website/src/tutorials.md | 3 -
website/src/tutorials/coref.md | 64 ----
website/src/tutorials/sequence-salience.md | 4 +-
28 files changed, 3 insertions(+), 1134 deletions(-)
delete mode 100644 docs/assets/images/lit-coref-compare.png
delete mode 100644 docs/assets/images/lit-coref-data.png
delete mode 100644 docs/assets/images/lit-coref-metric-top.png
delete mode 100644 docs/assets/images/lit-coref-metrics.png
delete mode 100644 docs/assets/images/lit-coref-pred.png
delete mode 100644 docs/assets/images/lit-coref-select.png
delete mode 100644 lit_nlp/examples/coref/__init__.py
delete mode 100644 lit_nlp/examples/coref/coref_demo.py
delete mode 100644 lit_nlp/examples/coref/datasets/__init__.py
delete mode 100644 lit_nlp/examples/coref/datasets/ontonotes.py
delete mode 100644 lit_nlp/examples/coref/datasets/winogender.py
delete mode 100644 lit_nlp/examples/coref/edge_predictor.py
delete mode 100644 lit_nlp/examples/coref/encoders.py
delete mode 100644 lit_nlp/examples/coref/model.py
delete mode 100644 lit_nlp/examples/coref/retokenize.py
delete mode 100644 website/src/assets/images/lit-coref-compare.png
delete mode 100644 website/src/assets/images/lit-coref-data.png
delete mode 100644 website/src/assets/images/lit-coref-metric-top.png
delete mode 100644 website/src/assets/images/lit-coref-metrics.png
delete mode 100644 website/src/assets/images/lit-coref-pred.png
delete mode 100644 website/src/assets/images/lit-coref-select.png
delete mode 100644 website/src/tutorials/coref.md
diff --git a/docs/assets/images/lit-coref-compare.png b/docs/assets/images/lit-coref-compare.png
deleted file mode 100644
index eaa0c7e743b1e4fcc13b17d5cf56d0da7171b4f3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 80342
zcmd?Qby$>J_XdoFD5-=
zhvPZ#?>+DN{`~&?=DOy|oolaWueI-W?{GC0d0Z?qEEE(JT!mLM8Yn2}Dkvyu^$+d?
zPfqp6+)z+(B&?*R)fA+qY1EwTKUmqCqoBMBPtd{C)$DtcVG#F8(()d9Uho3?7WyZG
zm)JCr2mCJyXyTuuN%h3Avo^X_|AA{rtiJQd%;TOex|yrMj}bMTPj>oKiAG$FFBW_j
z=P&jpEN-0F=bPO3P_o=o=n2-c;3%tD$34}w_f8!9SX{;kvv6Kjd7%-6r81ey$;+eY
zhR(GoxeMH*v#Xp{s=JxJHIBR=o7szj5*{iTozy!Ra{e5}gF)xn5au7uYl8)Ak$F$G
zvlv3}y=;9I@nW@6BVv5jqx~nNcp`@53lv3GiWosOlu5TAcA6P?Za7`OjB7=ZD
z&uTY;cEW2%%QBat=k^U>3xk!avA+3s)&5}q$VYoav(&dkE64IBZUnta%(`RHtTXK%
zN<+o8O)(|FRA*a9i)W4+G0&L7P#x;~*aiEDr2#EW?|leE5$J4gbJV|gdfdeC|O^IXl-uj&J*{1b^Z
z?5QCV9>P$UbI%pJ(Dkkr%=>*CF=8v&gujS{sbwziRUcIg%El9xSM@4i2+?XR5SdQ5
z1`77*igHtPzG@QB;Ovi$N$L$J`QX@-T_{2TU?Pp9DlXC8q{3F+SZ2|Lm_kr7@f`pdtT^n<1Ho
z-6e(Bn)E~NVHkuVu=msB>uwQDZO$kGOl_;%_KmQaeH=ob$5?VatDh_Q-z(tI-6xO^
z@01~bOMTV@Ieg%O2%99eSjvz-J_o6KyxgP_rJJX5A=rEUgecrU!ijw|__8zNZ5}ci
zd!E3$dHG%=zE)U&BVGmmj-$?ISooFHFzAyx%df-_AvfUD#SaJF<7zXXDf^f`(PuMl
zk;kvz7`$mPA-1aU{~0SfRlDr-HPqYhboi3?N^~al);64@3(tgOhIEI{jiMCOWrt|$
zk2+JPJP!|7vBdHUTKno6kDBb5CF+~dj51!ZXAEBqB*u8v_u_hM%%1n1^5elr*gQPP
zTdy1G54LP+r%i}pv!k=5C|fslw?2^Y&%5LwRkqzhozy7jX=D@>qNnNDA{ZU_@Xs%)
zkwbbfmZq|CIzn>ahBrzMEstT}^JAD;N-e;Q`h&D8^w{zKo&)tUrW6g1On}EH9LCPg
z7t6vVL00dcB;zYelIr43{ek=L#ms%T0LT(e5oT{EsRPy9`@2i`8!>fM@jdQcc8VO3
zNhILC_)iw
zQk-;@KPdCCw`7gzGu3e3NY$xHPY_T>^QbYB#eB~t9K~^A)&KY;S83GMlDHYOTzWVw
zX_VZO=@ON)1EnBp>m6gV*!K=yU82Omd^1y9Jne`Cvp2dNQx7ta`N{~lyTyOWq=k+<
zdN&42AEKv4s`hLwDIMURbGG8yf3I9N^pLoTmSlMPXy<_uI==}8ZP4>*wU^9HJ2Vuu
zX+irM_{)@^Uro^JKNeeRNq+s4Zs-{&E$G=EeFmf3qn2>AX9Nnd@8yqZWj_n%`Di?}
z60^eJBHCgt4m*)j(CgFf(__(PkyR+t%G0W-HQ34{JRWmO=(%LA|
zE}AMfdgEIxr~OsUtVCIZO^Z!OB+pH@Ts6H6R6L}yQ8cVxsU}h4r{pUcRIr@)IRA5L
zwTxNAciCu*lxNCHgEYz+AF9{+2fwDkc%weZnTByaI$NGd&5I8*Lv%)6MSSY6Q^w{Me=_g&@T
zm)l3KO0H*H3nR!p`DyI2fhmJ*oys#w>gk5b*wM^_c7@wCzr|POpBLzx=#}KDC_JZ8
z_^S9m@+DhU4Q5Tg>lpd*{F~bQ#)X42lg!UCpWD?FN3Cxw5{p#rYYZvJ3WO
zvg4K~@3BK=3wrb;pG2M|Y9}frVsmVBWa@3{SC?azTbJz_XqF?(3iWmLj2md@p4-;g
z@z2#lbLv6$U+RhLb#~N5+(iOlaxF3H!Ri^g!MP=Juo|6ei#d{+=^1ySO+gt!BB2S9
zRth_DVe^xSQI6bpp=P0VL6CdvM&1_Dy4LE%=E>ab)R*nI%RaCWIKd9jbbjYrLTNhN
zqAPgYP9O!U+Yka$1{uS|=6m+)_8?U2RK-*YR1Ht{qb$D@e8*4?wD@D=?eIzlHDn%g
z1i|w{uG#IU??RwgvrkL8ueqVUE?3Ey4nFmlE|+7zn!XL^ZkJ!q9j{BSsV-74j<4O%
zCCulZe@*sI=0c-D`*u$rb>VBP?pM7+wCave9b+B!G=o{eS-4X8LCQfd0&N1Xq~~9L
z2u;Id2o4RacmV0)fA2gyxpVV8wrbX7aG7=4ahbe}T6RY6BRyGfj#>Bj3NmkrF*=Q>
zdej&>GwP)|R5Av#s?p+4q@%)Pia&Z1Y!ivF)icCA5fB2o&$pcaac+KiMo>oh?4feF
zbvPkGDd8d$24neaIE8@M{ElN17y-uU!)3{f{}xZlg0>R$y^?hCMW@}%zTi~Caz>R*
z568Hhyfg(S#!}_=*W&Stuap&G>6amdJ;lFp@~X#K$1j`M_w@I$GbHT!DwFoRjr;sl
z_P}jCNWDOX~^@VnL-71NKPJq3XY-gBN_&A>O
zbTVh!f^fIn_g|!{*g@F-so}$`L$)dAJT^CF6|+BQ4K`?;$JTiV`BDu;8!U`|v>G0>
z9Dm)oHColJt=ZfS%)^N83GF$KXz_n|kzXKFC-c_HCi|>wF!De)P}VNe@JXfdb$Psw
zSMks4veumeg1tDUc41`m-gIaFmJ&3vi3?hr*7&yZdL65sxJBE#jQ#B_#CoTsdGcg3
zzjU$UOY6F6lg-6?U3^_!UHUfN6knNuak~#P3HJ**NDEzSr>em*Y&Dxq};+YLwX3NcfyD!Yh+ke-jzf!1)HfHCU!5Em!YUvxMT56&DC17cEc2X
zdkfMpZxB+x7txy#>Nb4(J%*c-sq)fL%$eh0?qYjqJIP*VVYBgz!|`eP!v8Od?8rV+d7N{e!8XqFsZYdrE`OZQ9Imi;Sm6Un=9^-SSp811oCUyz>{s0{k1;JnFk!
zH;?PUOKU=uj|HfY_8+Q|BrOZOXoJ()@gD$6Pbnp9WY_M40O;d{9;{xnmL)9
zgPbkxU7A<8W`G9|9bV}qL`L(c_y&b2?dwWxJP7gbW
zyK+!OJ%oWvJ98Hk8V@^Lkh8Fd7~P*Kgn{e3$6RzYeHP;`4H>z=vIBQwbe1kI4#Heq?(XiK?k_m)oh-Pxg@lB-p7U_=@NfVr
zIGjB}E+!rvAZPmDh5TKPjJdO!la+&ul|6{&u3QsSdsi1RI=Z`x{`33YPIC{d|Eme)
z{8zVt4szYS;o|0e&h?+Nfvlo;PleU2Jj`wNWUTA}nE`c(3-LV{{WJZ)-uz#U|CLks
z|8jD3Kj-=HtpD}uf3v=EHg}S?w*%^Q5&ysO`YZE)zx*qsDA!%r|4S6V<^1PUK+xh?
zqFnzOG;yqT!8Jt+SU)x*Z1m&L^nXr$4y^mjb3d0+;vh<
zLC$S^(a`>Zg8G2wABPZ<&2f7@`w=!$K~ip8hh;~gare+4{r4fj6suLxF@j4l0BkxX
zkuUeJ_gJXtDy&OYSpSpn9(n|a9JMf4q9P(CWEY<~-!ilyT4<<@9OS
z>dj4TOY=NIBv*!=+}!mJy2@NDYM(gj1HrSsmi^{fi5tO7hJf2g9un`_I=s0J{D)%x
zR7XkX=UkUD9;ZzkEec>Gr*<-DtHzQ?!^FHoSFPCd^3;BtF=Fel8om)I5#`JeGCvCd
ze<*T#jsD28xMRV4fA{9Jp}lDlx#tc_cb&4@Y7!zvWjr_;S8cKP-8KM~*G)N1zjco0
zSbzLi`*|LX#FAo>y-pMC%Y;}45Mrk=Tjue4j41BS>{BP`4TIYk1g0Q@CON}Oa}xBS
z_|B>J^;UDN(_mjs{F&$w4*ayA$=uyy=Sx91`^#a}Zb6v%ZrhdZZT34pTS~@G*qn{;
z1R^>)JxM%nE;e9bRq?H_O+8Nx8vT%$Zw6}?m?~S36ya_Wi6sG|?>hdNKzP2@9eM|!
zC{m**mJXWG%M+01opH}Lj
zQdAyG_s;hHZ=n|6(}wQ5Zl~@uCYbKqqxLt!ggbM^rkPaM!NPu!uPuXRv|AOxFTN?-HYg7ZWrl%mwe7lBlh4n}d1?Gut}8!Xe~2sW-qmh;a~soD0aX<_wgH*o#dzk;
zYW$Rn6>m`|Xv4J7Q9&p>K-(W`L@cBe1J>?#1w%iQk96Zz>2G|m1i!9*s^Nh(ND
zzJ$TS^s00@_iQF7lG8fU=}dP>5|`tO!e@ni(LrFr!>s#=6YRM1{bB`qy&;ilkM{_%
zIcm{ACQD*SgxP4TmqCp$aqUnj%9ieXI=h!_7SreK<{dA9$|Uxyw9z>il}WfSKgkG=
zCw~6UU)-?!g!I{OOEkh7K&V2oTcDD1-D|eXC#clge}TGiahnz0IO8Y*dCXR{XMv
zn(m!pKJn|ugDz~P{(R%zS|hVwYVV!uIM?0Nmxhg#N-wugq4t2vx7f81XYXAnVv}hJ
zt34I;+{_M}r;B7Kuow5ebU90EzwW_ztX?31+BKxN?AAkn*&UtcrFFVa8*bnUE}>B0
zn?A-Bn6EO?lQ`%=gRLbQ@5KsupW6Dztai5FBKKRIhlF~qiqgE=CU~M)J)_u-RFj`~
zMnC25%G93@Ib7*&33;N=99dSonue~D(mVg7BiyUZ^2ZGFY;pfZ*=>Wiu1Uzcc?LcG
zpN9U?gB|Y<+QS2iL6Yq6mM_L42^lXunOtmfp-sPzB2yhxnq&3sW69TiSEBhHM0Oj4
z#@)-^R6Oxge(>R>@zRZqZmzs3y-Gw^;k8;RNcH0x9gxDLT;*(9jk`F?4?QR;t$+uy
z(aXo2+{1u;=6Ec~9e(P=PQqKHmGaynr!RfX3oLs0jqnCy-bsj|J7r)GQ4AZjHDm|^
z1pZ^AbkzBJfbbR}xMLSQPdU_9+T8aStV$IV&1YA>ec#t1>#j7CRD>ZBMuw}k_D|1Ch>1Y7f$7Z#TanTT$bhFhg6^vXZovlLp%rB%Yo+NHkXVWDxz^${DWe}GHg
zzp}A6(}QO(Jr{CvdnJXrvpseBD`l`KrGWA)WgsJ~Pc9l|w75|nmg0St`JY10EhlGP
z!F@Me*61gKveFpMIZ@~B1Dq{o>nmc22~9)wpqbm*8?Rb(FQ4@<7T1UCL+Mc0lTrCA
zS9ScPid$fMF&uTsssQF)4smy)+C%6KT&MjAO6flDNPO(n&r*24>j%uy`5*y{p|lF)
z>t7^>qmRNWiVoL@bLAmzOM%$UIYy5du4JRV{9CKo=N9^%fqK3}&v;`k(p?iJ_K0=f
zjuQLM?niu|b(fWvG>sh1mG9jc1N&dEP#a$Dwb<*Nm`g0k4d>E6Gb{#yh8JBd*c{cK%?%1H
zQrO>KE#8V7QSqJsT3aSuOl=k``eo;
z^oJV^DT{?{WX;ojHdAG*d&I`K7sLKB$BsMIy#*lhsdoO4UiVyNrI~*rb1vRD=d_Tz
zhU}Mn?=|&8zkKa~3bQKO2CVCAu`rG_emnhX1*UW@C&;LU!A2x%KEpjdr>$VbrDaE?KUys*?Iaky7UNK}zA-E8ZM3s^eJ%_wLMcO`Z*J
zn2OFsOODdUSatmHCZ1T^yw!f;>Q&BIo&LroLdi2S^jFWuh^LlHg3f7z2GQohT5PDy
z_kEEy^~&7rI6s}qYN1UZ6R+bzftC~hkpB13P2^+ThB6O3tp(dF%ed`>PolqF(=01g
z?_KYANl3Btk#1#?*ciKwQ8TLXLI1`&Q!3*W3g7mdK2@dq+1sq{W=0h+7VxjPrvq#&
zt(%@4EX||R(+Y)YRQrX^BQ_kv25^-N)`aZQ-clFN@~`I~dz4ZL1j9k@r2YXDBz|EGYD>m5U-nx1JB(7wpY#R3tmCg8y;~t_c&7&MKb!7H~T*2
z#X-};0zK25V9SOdzI2C5T))l$#<|Tg{bvJ2`;Q!bhpw2?UTHj`XG7!TsuGkH5t(5K
zYS_Ba!orYMEY?$2S@1?8Ob*H_`6-b|t0pUy^2J3FN(3spmBK)G)(0nJh&IYtKlq8@
zXV*H>>s2O+FZ(C3hiSUvE_Uiw!W`}Oucd097Zu_J??+lsTie|RQD0~M5{l3412fr-
zPvmu^tTFAX-+P3dZ3gTEO>4sR6ce@o<#=X6hCoUXllX^oPbFA#$Zv0n_Z1D!2N$oU
zXV(%f6vj_Q6`*nwN7s9yi|(>wuD_PLKg-`{DM@^45S}D|U|E}c%LsdB#u8#oX08BWl5SwS}k+S=DXoD9?$
zlOTnuK}JVDfT!x-3@JXkygnZ8-RA9EPc#5+QTbmVdVB%pUx!$JSg$tg{SHR8&L6rR
zQ{uYlb8Nqyth`(aHXr5J&fuHU?S?rGaOh90h@S2*F1iz*VPfBme#=$g$7ooI5xJ!0
zOtEwa7C7MzAIYlr0Jp!n{i;6*UtCw?2@KZ~=Bm`IV2%1snSb`#f`s*ZLA|1RvW-mk
zQvRKx-H($HaUmC}UX3J=L53{oChHd76S7~q6P)+GjQ1KA{ltgWm%g=UA|p35nVFaP
z=PYwQOwF6%FLF&--4{Fy{r%_a)r0013r_2%jgGG@i{v&deeHhf;4X)g<@rrJyo$#S
zsxp&<=v`mz)Gbgq`yTy@K^3G=vKpg0-@L&<@MuRCcSb-=|jtITDoRV
z%cj9)zUL#c1+dc@m+Vx|Y&vHuGE06f-C*mS5t6PaBpa5UmZ}W(O31u0{jA
zFuJmZyK_YZPPeBU7NjR?k-&0qnelbnQ~T5&3MPRCmq>EKcakn_tNq91b5-ezo~<|E
zQ|Bq&7bue~#+CVCv(x6^BC*;fKka`Yfal*+>s(|J85+*KHQF3)AnvlK$Wq=L9`pD{xNHyFM=J!6
zCOG8`2YuFeO1WEkRtF%GS!QXxM!}+R-9BT5NqQQr^KXTtmGkf~k3faSnP8S31U(ln
zW>p>)s{V%s_1q>U*stz9PI&zS75U_fOgj${T#qNgMXMr8RNfdWm1?45BFiAWgeRS?
zR-TQ%Z)9kLixDy6*72NG-E`Z%?5^q!_iCSM^O|^``hJY-Xqlf3WXf{!MN1Eu&x8zi
zsS(2S?Ndff+6|X!2}0%{J+UMILes&+ppoea7F$)E0nJ(uaa~0h6
zgA>J?N~b!*q0eRAA4SHNn=uSjZhsK;k-wOz>x>&JTwpi30egh!yvcb4X69FI#M#*(s4@C}A>TCgA+DVAbi39H6SRjg9YX&+)7uRu0
zX7>C%4w-lskJSU&n$9VBM|dcmu2~1p$y3-aE$FEhT#+hfaypGOk?2v)<@h|E)y7Eu
zsA2rC-CAFFRHo5%r`CK?1j+pZm)Cq{IaCi4`EH-`TOZr1aXRD(DgSJEd-FyOgSR$)
zJ-PbgIZh5!0N6n#tTrn=ogS|DctaEME_(AlnqFplZC;a9S=t(E__E(+>|p(LE6JLM
z?(Nz-%h*+aOg>7;nwV(8g>}oO+vs7*fphky!hKvjX$;k_v(j}m4UIo0hd2ZVU&JRG
z`6Cf=OG-}#A6Cj^QRJ-|_u>S-BQrHCE({=CW4P?;#?v_}x-c#CDZ$3xe6~okRxxP;
zWB!ql=rZ^aP2R*ocG^Ou9e4aSm2|)lhvb<0hHc%ayqp5DL%FfyWJzqEwh+89g~Za-
zTBL61Z@kJKL!XrW;qWBv+ReQk3TmgP(kFxkHtn2!_KK+#QYCs`?PPe?LcTo<>4i=a
z2+~@PnXQAUv+@H72lCq_O9SGsyGppqF9RuEqwAGddcA=4v0VNHxFekG)v0;we3GX~
z1LM|frEKY1t3
z^Mqx?)c2Bsb5uHZ`
zeV~E-svBd!i?xZtmzl>yB3!PiRcA7S0nWM-SRgIM_UkpS6;XpvJUBmQYMgo>RFax3
zy}w1C#m1H{NF->?IDZdB6T?ljyh&G7ARN~>)Vt|A3xP9p6bpRWEtIs%&+Ch}gWfG@
z?MpI5>dW3%!D2|;rt4y7nU~CaM{*ZZ*SzdztK(wfAr`%_P8k(J12(^LCoB@%Oh5}k}l(lja7E16`)gOENgA(VaDIVu;tySr!(LV1;N)$DN0><
zdedez4{GE-9HEHy<;Ivt%OT_Ja03n~E|+FWF1@Jud=wO-UsfSFmNwbkqPPXo4J!{h%8LSUqcRVcA%oe#hy?g6WDX7(tfNrmU>k6HAdvXQiwy5UUy
z1`O0oai%|G~?YN<+=GRzfD`xxLe);qry~queaMf5P@0hY@XloPzo%}om%C&
z9ze(jUbj}GUu=7C?ctZ_bIW<*k!OaQQjsv^s3a+
zh{Nh*EM@y!G(OL$DkD95nWvi4*B56yHN!tv!u{_uKUc@^;xJN%ktY?dtND
zmE1A_-_&c|(An1TNWD8a=i#>jDx(I`dSBbN@*485sK0IZ{Rc#}4m@VxOBp(&b&S6F
z*S>UT$@IIVX6Vg$?x^98|K`kZ4V4L98D3!9k9UXI>FaQqx-W;2d^hGnV7n!k1-y%n
zaY)aYJ~E#_E#`ADJtin>2+DVu_i^tn6X0=(OO#)$)6eC;o-{Jvnd~TDQ$5yWmUYkn
zxNW`qSnl(W28S4EPl3luNk~WJQTNEz94aP#?NFKgMm`
zG-fXToo}d~Ru`qGq$A=$x=sBL;&rXJ-50NIHx&fsKcV&}#Cc)QUUYRRc!)*pWoV9D
z=w>=2G0T9NNrAzeP03|CPjE41`&Dt`nXI6X()6`{h4LJ*xXw7-cwk@c_np+@+&tk%
z{m~Df?vC)?T!lcsT1DqXU6g_P>D+Z;1tmA%iXK)w|HR+epIW8lV*n#w3iYilo@TYX
zn*F*FkFG-HeH~sqm{P;e;V`Bk1y%)7`^fV8>_psOh-q}}E!8eo#!s_MYaGPRY2ngR
zv{r*AjjpC!raTrLZHki)WcP>ngFGLiF~L9B4}c-2wu{!W2y~TZuzY&^Z$WMp2|S3O
zZ%@nB3zo(LB&U!_%Lb@i`23Eq_LkVYwl%62a>3Db-JR>p$yR-=AQUTU97XGrFpjIX;5^g@BOKf)kT+CBU#P#~j)hr=*+$3SZaS$3Den0CIWgIJr{rRzq5JeroS0A^)B$Ch1pI
z5f#BQCY&_O3S4J3Jyr(8mM(+GHqeawDz@j!bDooIy>LlStxEEI{RI-T6q3`OBHoLN
zbuyvd+VSQtJ6=rX#&!tP_z@}Iwz@cnL(cFYj2U8U@tR|9#|Yv?E?f#x96
zhrd5+?FYfpH!A5XGTjzkPW(KL*(MUp)qLzlZ&+~9ExVv#6(PtoF0XPoy_SM1AFqY+
z2>>1)P#s5O>X-C@s$kIa;?RDXXv8Bo?lW*MX{qOXG}eZG_kjhfKD0ecIhl+nyYm0e9mN
zS{=og4bsQ*USU>U_V-B9or24E5@WmtD@fgg%EbZ>z+9~r!}f`hqEbrLbj)LwA9Ld!
z0Jn<9nTJJ};*@8c@0&*Xe&mR3rp{fJiB8d9M3poy`-+=(i;rc@y`Qh!iAD)Ff=?XSK@F6RBs+
zQogxeC_2ugWWqt$rP`^61u?SP>x!c?S?&K}`Q`gYSil3Z_RX9~pJ6m)e`Tmtqmi*L
z*naFlsfdgmo}MEY%g1PgMVkEybS(3TBUE}g+nKkaj
zM>MLLsYH+O`)r6XY}ffFyHp%pU-DfXxe}dq}oTT~OtigCu^Yna+IM%x5`MRL!%&;t+=!ujnpd;E;L&A|Jxbo@J(NG7f+`Dh+
zVH)%P@->x}n5p#z&dD=}9MNFN>iewWstT(VHEpqhuB=@6Mjun=1juoWPkNv{h8JBy
zjE1OnQk2?c_yv>^Cv8)5*@t&-{h%BF%so{@HEI>{YNN(eveup+@JS0%M_3yw$i0R`
zx^-QUIEzVjZCpe?>LauL<=buR3Z9o+g-Gv%Rj?{&Q(KXH0*07YeUtNsTQQSrWBbAt
zYda{q%!sC*)2G%fSPZSs=vH-8zXBx&m9u#d>emM~Wy><#cESBmBgqpS0}^32_5`&&kg3PBVaZ5Ou%;L
zdAoRuYNssj&_z8iiHzC4Auk2u1g8iF)5z7Dq|^&j36psVp`Wv(YkHzOSa1
zWRxpm?<0Lovi&3w4BEdrDU?WH*O|?glWi?y^4wW$Z;vEn+BI
zfHzSB)deT{qB+?ZppHN8F|g%i)9$q_A}b=vNErseTsa*&t;$4#j^wsgQcmCtBl&yG
zBz$zpqoVCL1D8?R4QRnwpd0tcz^9;g;$XIzZyyOP*s6r5kQX3L^97ev}D8h_1c>h`Umoh@w`g-`xC|clrpU_v%<%U`J
zab*eOMe*=e^LEAdStROXPV4jD-lCILpy%%DOGEYqh#1Gw@(?Bd%KRJoXtK51N)4hi
zhwd8kos0OKj+{s@@WsYyYz3b>{k&14Zxm`o@pM&DVbX0{B8*%AF$UPsWrLCg
z)&`ReAan=j+$38nj8|#mW|JC3&mGjOI2LwNY$^^jjX6Gg2{r-iXJI3qz3?VEsTa3K
z;UFeMkmjx5NS*hOPqc^pfELzD^WyxpYr9zIgjYY_&pefx{#AsTW?-2eqNptoAxG9R
zV!cmb$2XSgeAXV_UkM;JZ!3);X%*3%?uJx8~W$AZ0|{2K}dAp-8%!X_-}^d
zP)!Wx`qre*k%v(_MrmHjS#8+wY}P~FQdPC99|?A
zatOX_hm1ejFxAs-^BdqK_uZ$`kE?v#9o94`0Ihxh1#-UaYX*xXyqQ*+I(kUR*fec)
z`|LyYjn{Vbha_ZAxTcTW<%z*5*p|g1M}oXNcT+7l&QEHhGJEvP`qemB_9a4~T%<1D
z_*itN@`oEv>J{xZvF4pG9GyKKoZyyr;Mf3jJMJc^hk``fjoNJs83xNAxz*vvX8ZOJ
zLbA-ZbOzoJ6H&6g29`rY#quxf@)pF@(P~zc_N&<8u35&F0}Qw%qduI=;-_!%E?rM!
zMD{*T+knFKH3X~k*x|~%rjJeq6}F>ujXQ>gx63XTu0E7yvzl-P=7IgP@x4(j+a3He
z?hu|Y&%ojhYjB-j4g0Dhsm%HE_neUcp?&XEP`#}z#uYhIRrP$fSuyh-LjQShtr45B
zk;k?P;)kLV+}CX5>&k4d*GT@PjRytdj|blS09vF~`5bXlBRtV6YNg;@n1?tR9UwF;p}L3Rj9ayB2uobS@^Z)VC2uR7uZ%FIV*$9-V4gt{rP9
z3=h30QEuFsAUWbnPFjf`W6YEGaaqV{9peNZ1m
zQd9%WnG94O_gSGaK`z$;XsIJ$jlbN=b0U9E7Pe-1vNng<8t*Ny{!RuuSnA~4Esc)P
zEEdF#xxUT(-0Yix#^Llf_8#{UQZacS$xaRkE7eSK7-A0wy<`3FO2S$B;Ggtr|DPCsbQ<}MrDAV({WC5+d-`V9v>T`4E(
z_){F_jSVny$yO`BHiM?)Tnr;og>G)~*3_q&44rKhoDzypV?$9c7NU73Em*#^A$RS2
zA=^3GQhQb1cuJtIzc7jvMnK6)sSw}yE!txQ&Z8=2a|s#I#nud9D>6M7h{@^ZYJYN)
zBwMImy<5vptLC-vg$%#QUKQj@|3rsH>|m)IiP|ob9N<+#P!q~g}Z!ZbZ{xnfePDkHwTvQ${gY{4S{+!MV
zYJ^<0c+X(Qi(U(9i5Ikssgg$3}e^h4re2=DvpS?fJET
zz}MMQST9Mim*I6OJW@+P;!il;RmkY}eV7(Z2E^k6>Ip}#rQxLcyiTjH+&9wQ>kUcg
zSh~?w`lqJ=9?njV!P6_;2>=%;(e*Q%`prljLBEHMw>gqdC8$oy1mT@c_gfd@Bm-3>
ziF34HOE%oLc@GP0C|GJL9ej)FrAH;8ap<}Npwl^{CHMBL^jlYn6sBj__yH{=XF5x7Kvx(N%d(cJz(kj^k3*YMcoC
zz>T1a6C&{`kwZ(0uyR(S}L!i?_Hb
zZas2j9)Iu^ms~^OWE!y|9Ol@wMk27n>q@EVoH6e*Kn+8#P(|Td3v~cwp)LOTcSwDH
z^HkJ3lBC0$rVB9nWEtIqalf+a#2|UEs|E3}jr|Q$f-zJC9cv>&lhD<#@l<{xmMWh!
z(|Jo=OhPLQlrxw2+Hdcj$?z`DlvWOvC4+~{nj=KuAG_1`928$>#R1$%Md32(e^CDB
zWR?Ojsh!L!T1)bq2AyN0^}c>+60jI_gGW0_@y0W~F0J69!%Cr`LOzlidFHb0*$kB8
zLXt@L55B1#Q`S-+5Wd(g81|^Epk+f4X>~i_$J^Ymhke+tCtW8Py5%uFfA0QMeKGgU
zRxH8j!0urh(KW8*;XgQ}0V*U_9eq_M(y+7KEQS3AhzR6Id}RQ@110;|m;FBs(Icj;
zj=qs>ww8Tr-(Z8NEQK3Z<_#e_hqz-bI`7}s{y(HxF+#yDi^3MO*ziEcNRFwmXw~kt
z7!T5jYUWsYe-lpr%AiIO;J)x>oYc&Id!o_JSX&%i8M}Rmo
z5pjh`5wZX!Weoc5B0!WM)Bk52f!hET!dDN|TGs~zO=mu-u(7R4{X<9p%wSF6;rQ!)
zyJ4mu^4e@w}{JtH!xfbYW%-x&nH%guVTaRQ`Au!Bd
ztUqr4tpL@i#HdV_S1)6q@;91ctNuGLN`N9NmrB|%owKcmGXEVu=b?E{u~ekK85+}c
ztaq2P9|G4uPX9*leKrPcdg`jxpviwxpWlM~U%i#gT1Mq~nD*M+IR?5PxHyr1F(}a<
zA>vlZvAqyW`L8yk1js$81s14L`mtclOz%F(?Q$6LaT^?Kps90!Lpl~5p;-1i4)$L{
z4fudv%kE%`F}sphI-hFaZpNiajepy&46C=POl+=5CsD}|odp;;W_RAr9r?Gog_+v_
zy1M0JJr(I^e0w4T)fWDX~vgl27a^2jVwYMZ0`^`>=!$bd9d;S1EPwwz1q6+}0Z|_eM@O@k-
zfX3+#v$%c^FLu7;DIsGdZaf-BUSd!BMp=ub`5g7B+)Ht?0-#$E1Tl47O8fv=q?KMD
z--Z4Gv`S^oZ2)l;$<;OkIi0iFV>7;pfI4*JI;ZRKg#A-@NA!Uifa3&I*wF_st=vo0
zHGNo_OT`RkRCYREA3UyXn0HcY7?mS$I$QK#{F9Lh@B|K|2r*RWxE~8z_3G0P|3h^o
zZO|X2)qbDN%uh#U!lZQhw7uBwA0FeCRFrf#v%(yEDL1Bz-6H_BmR6KOWc?Z=0Gy`t
zX!f7xfrn_bvEz;#v*(3?uU($*ZUfY=?HhoFblH37L4rbvwLcb5&H#K)l6fEud&hRz
z&^B~^(=cNi*|XivWN2u(2e3q)2$@t)dUeec-jv{h0iIgS6+r%54#G+iFk4H|ahpVC
zBIC^Q6-N}S3aw3h`RE$y3u!TNI2-|PMdQ~Y;Q@;c02y<+s#f=txX&IL$0RVZ{9dj*gb#O
zfg+*W2k=QXYLMjit;=_`-9j4aWO3w{>Q$3+h8Mi}VqMS^K;Gx=8er>TZNXQ}9S$A$
zFa_5P`uYnu1~XQkaX1iU+;*VhpWH9Z@G8N_>r?5Zh03{B0Rg@ipv>xTDgyxX!@FTh
z-=_GftNp%v#7z@GGnofsp2CYh?9!n)hlrx%V1@5@v^eBN*BVyfV1^iUFii+7r1J7<
z#&3Y`Z%gsOBOvj<3gfWPIHl9!e#w}9_dncJD$6##~QdwqOU=M}(Qlw&vfLPnwU(;ZQ1&nlaW
zlf=j)8~=}@<+kgifh!|=vW1)rN%jzMciT}v+n!GQWdc4Y^8>R^%-!4!ivG$#F)}Q|
zM6Sq=KFHa^HsI{|85^Y=7r$Nl)^9zu4>`+7nM<+c
z1!7Yjc>@seP6sBytI)-pdjAUl3C+zpn@U6_G0pgC2|j`!i>b@pX(l=9Z-4G@IouLk
z0&u&S#8A_Pk^BBQmsZa{CG6(3%-7AO`#z_ED?pU5dR%e;=&?M-E-=7GhuK$z
zws^gsp`dCeE)OJ8#L{46y|$}8KNv&O
z+h1sG!e^bB)D(*uI_7tnJ^xO}tji@r!NWbQ*fuI5d;yImN<
z$`d6El!Bu$=bycyqI!?4>L#_F?N
z@+ALfd(Q)m6e_`s=hjs>+Xn9e{D1{ubc7w}pKJFa}_614snQ
z?l&j-k|3rU8V5SBG~SCj?gcX5n4x9n7!Y
z@U|ZkCRS=Jt$voLIz09W?ycD%qto2l6PN_aivmF5VrqA2S8{5C(9acZ1G_a~_W_zn
zoqy|kip@2p7yG%!{wcp|RG=`KO8jdwvOG__ozov5%Ce%Oz|y4??9Zp_dI$FC(X%tgvFc&P?-A$k=vz}ZF8zTHUZ$f&^>6r(b3Pa
zMyfIdUYw&~o1h}NjDD*RVTxzPQ0bzT^IKWU
z?vyHRh;f$|fLd!~rdX^3i|hl!uhxRfW88WceY+f4`J#DWwmh)ntZMWTf;?u*C}l>H
zR$G82JC=5Tke0Vqh4W`fjLeLTy5t$i*f-GiVD&n
zU6RuE0h_Q6PD`7LDR0teg$d6@jWLGRnFYrt03iXlh6S%u@9V=}O*E!DTYmtw78y*+
z(dv6GMLiir3Uw%azJK~wOl9I2SQ&;NeEg@WeTNFf8(;e$bDUjI=HOobD2qOrIl1CbshLo`EU)|acOn9O=(Tmf7(GZRfF;InnH{zVA@VR*gh
zw^@>bc<~O0ucwX9znF|8webUY<4cITI+xm@G8K#d#UA?GYr^6fIRS#TutkaUyp~^k
zVQ{q5_e}ipF195)aD;dp?U+dc_oBhKEL8Z^}y?mF}MNb6YjWqO!qz
za+*}w9e4l}C}C9=dPV^>A5`7*1l#?P@SC-U!60GIu@;+g-w@2l#QcZN(zm^x+>96U
zl&%v#`4ef3%?!1$Jwe{7(-bbo*Zi0fiH`*zntS}-=Jg-R(g5zutuJ-3wClu*%&~)|
zA8tOd9b!bFWGJu^w19Kj
zZp(Tf_(ey_l0|*FZv;7VSuhkxH+yN>ka|2qiCF-||07RxFD+#0Bmf;K2X_=R6_&n};SnBcmPqc*rvf4E)lu0k;QbpQ_i
z-sNnG1fEE_sN+JJ+xEht8MeT3DMCn3u|BSYw|4x}-D2O6d23&;j})kE$>H9=7x$MR
zg6ewHQ&1#@((fgZm5Z{68I6JE~;QKBColetx+-sA5}dUjWMAW;xqam;c!%>BpR3WRE;K8N_fL#{eD5Foj-
z{Sj_oKs5u7Zj7(-wSWITB$yEv)28E}qQ^fBP0in@5vWV5?i%`l_iv96GGvUJsIM-<
zuh?&B@RsrBiw$#+c=YJiM^ODA{&Kqx>)tQN;2-|>+&vJA8(0Q<{uy>IfOYfdqw@dx
zDK`)eMh(5bZ1|5a`s@87%0Y1X<)aVoAFm5D0I}*^$-`EPKVH|A2R40W`zj;apI@g5
zl+!JjYAfKky8c?tE0A+)tHN&m;}@9!FJCE(TBOagx$V#8F@nA@C>`*AghG(~>C9AKNge4~}Be^7+XhAA~)G
zjv{@r@y`u%fi@`r^45QXGO$apKv38gl1%a+-x#D_C!v$54s2!kV+p3L;3TfQ!$bbq
zRW}N7lYWmzXjue;s;r?|LGGj3jZ?@
zkV^m0;(=S_e|OUVjKW{e!2jos!iVS|fOo-Rwk6z!gjLy2iL20m4C0_5*R@hxR2%-Q
z`RP|;07{08Nav}6aQv&xvmC3^Kezc;aXe5*bCu?~1Ec(t2x{_wDuaDgW#I5_LJ
zn6wyn<7g==Hy$K(T|h5>MIT%|*Ho1EKM@ls-&_X)K)-4@R3!im8{ry@t86VIz)JGUt{aI&QE!ZImctLCc@G{T)?1xRL;GkU!z@BxiL%bGcBcDJ5<>~RB^GlwJtT+5-
z!5*Nc`c&VxbmZZi!#&X2FFX#?i7LW?oDDK1(FP+93{TczZdj(JO3TTq%9$5gGI2ZA61%8zvx2zb)W(aW%KnM;_ZQ6@)#rr4w0T}QbC!5lr
zfxwglY63y)>$S?;2@-&op~fYI9MssAny~*Xc-WV_LFqY5EqxqHFZPi7ltEBaf=Cd;
zmviQoH6(c*EnmwDY|El}5rs+vk+~d$oYtD5<N#;eb72jC@uS|S%xl%&93t%#qnOt;=J&?25JzL@AZzKKLkWGFAAS)IiW40Bpyo6AWH%hF?bC*J8U=QRd1q7rg@MTA!i39|
zGssU?5Tu#EZBKX8=K4vaQ|0~?`X9Tp7`2
zp1XVD7D^064YS^12x$C3acTS(4mYXo7O-0}RbiSkDxD&;xcdl(AKknaU~k4DunXu0
ziUu{8Yc3Zm629_N`pRrJNS-OJsX}oS}CF7=9QoDZxQn$#qzzr0#`H==gh;6o;TY
z`gv~3Kc>!(y}yg0vR%Va`LCg3kAX>}vYj69?@NkadE`>~LDs&(9}B9BdiB{Iz(;EU
zolpK4DAS!@ozQM!4ap@Jc?C=h4ZrE_(aRk?KvvhF0u*^Zn;o&nCi>s&{`o)OTR#^M
z>X3(H2a4-!pQoNE@syk1d5)}Nd)YJY~n(c{~kRZ
z2avori}y(jUZ4u*kqVe1DoIo%>~C=0IT`(IYdFPh_UWsTqxb2m;i=G1I|JV0bQ_$I
zSdE~)>xBs(S5TS$ZpZjFAWc*)2J|gifxlnRt6bk2GXxjI`kSefOz-3qc}XMp{dGx0
zqkuXE_28lZ-aw?%sh54D!KO6fQ46%bwrLly;iv^Tx16G
zOz$(xB&gS%ZbNq!!C|3^#oGmC(H{r1Nzbk;8Xon=kAwSl2GrX?)*RVbM4flt>*U~Y
zLDCKEej+Ltq@4_%Bye`6=_?BhSpVIU07x5T9;C~K2HPS5b&>X?r1?QD6TB&pS-=G2
z5O3gafh+F3a`!W+(zWCRswB<#$&8BFR*8VRW8E?3I1wt!YVgJYMddNr2l^w1w^4Xk
zX12XeWHtrvo(O#=eP;fV7M&4z4v
z;7+c9Gfg6%nd_|5l^Rbvo=6+{GQCj9xTwP1UWz7C`Pv1)84umVQ1JJZvSv>VJu*KW1X~uU_n?~(!KQDn05pAG(~me
zxV8YV9`z+0;8P{((bdumK92Fu^Jy3^G?@j|Jc4=vCZbZC)mvB`T6!-z=5sMA{l~Zy
zO|iHzl^qw%SudO<;t_VE;irKisbILg0rW6t@~+PS^UF~l=2n+7Oo~j
zAa5b=nK5)+sm+N$8ZIbs`(BE?Li5og^wzFO;r!$0aY}<8OVv1Nu@S?ks%^7Q|5OeuZ7%bu
z74w(UsKx`$t0)_$^j*Ll&L4iPwOs!7U4+D&L0vSi@-T4kM&$b3s66`Q^N?Sde5c<5
z!yrdf%R&6S8x6Rkm~+q$)U5c8lPcFyYrEf7y)rUkNK4LDxQM}=R)$&p&gVwo5zeAg
zp_^*Mp?c0|N~xIAoQM3sMD4dxI3!XvQLMeadhJz1U@R&tBpk3~oTY7>lto2^8)j;;
zx4s7IxwTo6056=R`96ld1@*DSJ3K_;#n@euHhZF0V6Jym>lWk{XnFzu3thz>^UL}#
ziGcY5Q!79jgSZT!0jg=uY9hP}2bP~uS&>!AV$BQ$QhJy^|IgWw06!Vc>2vR^4AJ$s;iiCq-pfzx-RIM}UQFYfk#~
zBn!rGL`;6YDqXHDaS^Nq#-6(RTwkeS-rYvBAG3e2hV6*>*m
zn!irh`@Q;c1n$}m(molKt>
zr~Ko-O_{d=%KHHdO$EutJ6r_~rbXO&xbRhtQBQgbtX;q
zBP0W05-sq-c<4NAm$uB8+B>b`Ao9!5r*1-5%o+KGRC-_2y2ibCJr9T_n0#QI=uX^<
z_;~$p<>V!|QXJ2(8CVZ=x#Tu;*nY;aqJ+C%2%P&14SyeJm@s+H4fIaM>NTGC4uGq~
z*9LH_F0;~BlBqC`z>x71Gu4;auc5k;Vdksoz1h-Hn!`E2BSxX3VdWt0p-qJkUWXcU
z;{umjKFNAipOZTr$mIl~>ZWOX42M@I%bE?4pA09oX#HPzl`jl@)~8+rU77J@_r4TK
zrDa&|B1DGTS_u=WiZBjv%jf=G&2l3LRj0+#%eLGU1qQ6{xFp*cf4#yJLQJ%}I79c8
z1oYo77;Fvf&g0JyI`AoOTbg!{Wgs|{W-fOlQRnk4k%W!-Y^kol#W5U+<4!^Zno}@r
zI9K1Scas(a4vxzQR*B<}LW`dZIVCBjsfCvY&qe
zf9{9Qq&7
zecwxvyg7_+b;tZg^D_k~R+S}YbXCt7&CEG}(QXo;tdcz@Q>DbIk=^$PWFHd2Qe{!w
z=v~pHbb9=WD@
z*FB!}!ypGIuorh3X-8XkKfQnb?XNxo5MP60aK?mX#7R6^LFx;kZr;qJEtQ}4Hj5`B
zao>Mh!TEaWx!>d43Bi)fopqO@0;EW!_#W1F+g`#3-!wtU=-P`X@Z{XN^bG?~?~NSL
z`F4v);GE6bJQ2q4dW*2azLfS?pgSrLMDw_xOf*-@hxbsBu@H(kRD)n!6=IM3v&2C6
z&TmCL1@cquO{e}0#`w*;fj1omR|Q-vzVw({oBri+cc0e0Unfusc5AeBrPY-^RU9_V
zeSKHY`-#$5JMq6Ti%u$xRh?IF=UV3JyHvW!?ESu2My0`8Jo-t>a~O@Pm6wdwQC{2w
z=7b*wgOY3cFor|pM5*#q{O)9dbwkUhSmxSA0(#o~QpNzuW=@@x#=LSdZb?<0kgnzP
z6J~@2;Z=cSuN;=Y;k2`IO{1`4+$@kqL0>*tno*zu=dc>j$T(x$>ix2640twON1pR2
z+&lN1F~vo%yEZtO0rS(MzhzqilI;&dIQQdBXA#u27$&J@U~Nn&80;~SJTj~~Q#NJ^
z+dV*`@UB~8s0>>k-lbP+>HeYk6*uF`w%lf8^ftlympK&xVaCrJhx6!cy{r@z5-a}_
zgNK4%M1tv|eZ`T;z`MaHC~$;5%ZS6%rsJxlj!`pxNp9fjQpv8t2SLNw&b}n)c4@7_
z?o{y>6JR?35)8&Ju)r-n@wwcbx}~1|=0;sK
zc1IQXZr$sH2If%%Wk&x6ST{nwkG-9Ya4Zc$ylXQk~zzcw5yX5g|_
zcjH_xd|;oT8&Nl5R`?FgaCo=FBHc^UT1QTl4_xayjJd)u%!X0f_A2NYq9vsp_yw4z
z-_8aNA?H>%JOoW}5y;!xV@uXoC;6#ai
z`CN?UJK~`91+5
zR`0r?md{DwY2}4jWg;T;D45y;uLY?gK+0HH9#Nd=qmYzvHX%{`Y0|r7hpDH;16+&fA1hiq
z4bJ5t;yT!S(T{{h(L97+ZYopH-how^qUbX>yihQcQVD9VW_Dlc5UjFyu8;g>t#^TR
z>)3WuSrTTID7oSVJs<(XY3Ez;=rvhv>I(&d^=n_tbv8&alz(0hx03|&ctnWL54#v@
zy|Jto)x|??<6RW0sR`5{0l6}4R>Drdj5N#J_>QjxHtwWexGgE^(Xd*+(j2HF$QwjU
z_3MPqtH2>Ozj0`!$M!cY{&@O&1ffv~5KqPBu;GYB_nwM&-2A=)_o(4<_ICXeYh6v$
z&fnb%{g7#Y!dYtN3p+5=d!PPU0jFCO1J#-}b=5y=VnQ~5;Ej}b#voR|4gu0x-&rO<
zdT@Z~(YZ4{4KI8n$!4
zl7EjOC+iJ~ZV8O>WG1(-XDkC_DG}9Ii1@YvGiKhx`z^o+;Sh@(<}aOHoV;2|3V8rf
z=-kZ%8SQtO*b!X?YBGLV7s%j5L|M{|RX5Cq^N5_MOU(OBH?L^R3w_9myPlIR8{6>v
zCcO+kN!`#YMa_|F$a<(gaa%1c0Oo;1C}5Ft^pRY`A|)!~_boE*4gUDYqxw&{7?^RU
z&$~VlwHCr0p34_eNQZqg7|sa*9L4p0tbX^ZuM%C^$36MkHwYFe3Sn`!v9I3R`!-~{
zTJx1+nWN;$ou`z(O;ge|9yulXLNRhs1eP;87-y
z*0c=U%+Oq>qa!N%=q(tY)#(U#&=^ynX;F?(-uC@?
zFVe{Eb!14C^`UNnUq<){=3!-)?jK9l=L2TRBg^Vtz8?O{^|7Ko9+vX+h9b}=2efoq
zFmZdUXY9%%namzxD~z#i;ohkpS@U5oUb5xdWU7EafGxW}PzaZkiRnYS+zmhT<
z0*_m@jg-h&485UQ5O7RQkDB(Gp2A=8H!bQp=Ne)E@*W6VKLXXL8dUb(dUtKp(}G7e
zK;tfYJEnjAU3bP9smHTTNKZEO=Fu{yHnG2y@!Tisl8(fRn|n*%iiUbfqPi*5W~0Aq
zxT(^DndL>sPT)6xbOv)wHeV(6&Qd=RgS@du2Fvb%5Ot+ir
zB2zEs?^UZVIX|EVF$JUsKCp8Wi5*38+K^>4q9_o9t-ef3;>(Y!N6=p9bT9+Bih`9H
zs}Y>ZGk-;2-IC+1Q-@R|C
z`~S)=M_htlDJ$UFylEuJiFlYepechgtM4!
zw`|GjQ+@oigGfN$hw-ta-q?JWMKv7QWTmx8mA~A9U>0C?w04u+ED*fyCk=^i&Jif+
z{3MLeUsFBJgVXMbQSll1(1ykC!!c8fdB*6aaMX?{B^XbngeXipRaql~MSsgsldtla@a7
zTF2Viq_BGtvD3i{&U+>zY6XhzxgW$UYdFsD|22U5%b0k~_2}@9*NFAqz5aw5iWWHw
z2U_&kY=rQw(WOsI)T$f>y-VBnRo~DX6duIltlX%qnErBZEcu`q{qT;KdJyar^&L-(
zymaDWQr{4a*F>=SJY6cy$1hbs~>opME|Wzb8ppZMf!|Ze1y?t
z*)w3wer!8J>~p%U0cYWx-cr}5&|;{6-dVpixmde~dcZubzW*Va$`c|?^oBs&g0j5#
zI|5lRKc)-Sd@JfW(RaR^g@Sgm42?jdAiZ7Q_}6`VeroePi;{TB-A3*Du~C=NSBRIE
z1$_)II@`O9H&f@O>k{t+qTEYxh*Lg{lNsaJwY3L@9?83LV|p(86pyHt%X8KF17Z58
zl32;zvpGDEh5jC3V;%|Kja-UiUSPsnpvUNHySTptxm2n+%3b$FGGvlkb;ycy+74j2
zsQ{JbK8`H-?`WY2ij$r3oTd*~p%t#w38TnfErwMOT>zgvT2og)*zUSH1+`4^GN&5c
zLi~a+w5kFz4?IZnT+#8idon+dyrAG>u|f?^7}x!fu^Xf=ll5QBKND+^5#IRd|Sb0miG(o3kNxg7YvXlBBSWPa-@8f8;p1OK}}?%vF65{hu7W$wIMEid8Oe{
z0b%~RYYt{uI$Zxz^nh`KWeO+68b#*1~c`)V0<8%Afj~RIc6f1B_Fo`C~12FO+ntmK&bLLDx8M79i8h&kSR$$bd
zp|IXeW9tN(TXEvVhCXWYeg=J46=1MNCpV~}v8=lK!q~9qkfV&6Pm^0hX~0;Nby0>^
z0ZWYg1QMGt5AE-TL_>j4EANEo_SYYIx)yA|z(|p$!Di=H6}o_np=wujLQ4qMY$iNO
z=lc>=dz`=*|E6mHCOIdERHSg$3rxs)0;rEZ$8SrnP<#jdl(sd~DrIlo^JyKwT!U9@
z_eN1a1nrzpN3&hEYrRg#F9*>Q*MdT7($$C{OT;}*{drKkNL19b1Y$OH5%lm3H2zAy
zf_w%!Fw+*Od)K=pW^Dk`+d=Efy=$hegrE|?QEt(f+0?NsQ~NFZZEwg9C~sGSedWC2
zq^TKCuwXtM!Ga+USa?J{M5ktBH4T;lkSNx*yy82=ZEXVz`z+P+av_w+Cjc7bD(;cS
zbMh$JEt5gWv;*{_hz@~r_I?8nZ_G(=E@7f9-2e8`GA55e
z5l{)cyIZSvy#bJ!uglcN3PTk7oUsf_h6fE4V5AA+8_Vx_586;jPe@w5x8F>aQ5bsm
zFlcYTqIK?9gCiVcpy@bez&{aQuP=m90>4PH2ps
ziAoq|^j-(XXF3Qg(8YH4$%Vcyj&y*=7Uk@t4>2yg6bvhl9s4mBQw39-x{M5Wh73EQ
zBD+)?dJ`FnnQTN^L4iMpOk|c8qg(tvBdoZnyAGWNj5tv{ryIiV%^S%>*Im|=UU@7|
z?mG{7TzAQQkI46cr`+g69xqFZ*Hd#sZM3IW*Gb=|KLNdDY^$|(h$2AS1X>O&`}5&4
zM*WxqvO=`>jYKHSa+AlC`+&TKUWJZov-E)bMnrlHE-Vm9Y%FqXNk4^Po@PDb5kDjjZW$jbf4P&4=d)Gw`0Bb!GELN#AUeBQo
zY61Q{@rW6p+-Dq|^XU3&5M~ImZ)WSo5TSZ8d7&b-i`_Q3Z1-9(hLKo|0MwQIhpbo%
zB0`j3KfXjvj0gq108!_ae$z!N^wUsVPv%#n4|*iFzg$Ox;XewYPc%Ufc-hg)$7m%jB@D~#0*|dW
zHbk*SUzCy-Cq}yAoQ%<3EP;DM1w`r3m9uq(U_W5^Z7+NSHe2}1(ZVuPX(
z_N!qB5HvLn+JcYyHxAa7YIFj|0zz=QwLVSy)zQdCk{(+kcKOr~MaJEM(UkX;ClL4s
zpt;pT3xE$F7&zVed$B;Ej&4D6#CP0D6!_NUScQ^%+9;>)exjW-4cw?e6mSDamleFk
zBX=?jTJtW%v-<)e3Xz~&UfIT&NP+7|Fo*az*LD@FIc%3!fGT)V0LHd#&PrJ(79+dE
ziP*ZYjMQZvN^&zrXGh>k+zaZP=_+KVA`dN1YCCy)O8#3uz<+!zpc6qeo)U$ubi*Ct
ziI_%o1Ohbjf9He7lek2}6<=LBTJsU*qO>c}z_0oCupA9vgt2<}Ex}WEtE%tK*5Zp!
z{D}}C@&ocB3K-S(5D9Qj-VY=iY^mTdDVsDx^C&B$g$Zi-rj_HCl2nMLNn+|3|87Fpvy4x2cLVB
zT;BAJQu)BXJTVTYq!4yr*0zgs<1Zn#H$o^_*$)G2etjIkR2BHe+c-L3cK&-kkAXnk
zy;Dp^?-C|*08o&A$d`sE4~jaJYjN3VZ+032%pxC{b;IYfKE5*uDSn)R`|^%(iUcS=
zBLRX29!lfcmC}OCw&@7qtU*BU$2=JJbpxaAZ#x+`Vgl9Kc_6~UNh3xDcP*yPTsGon
z75$~lKM#!(9TM|Y`c+zp8^btUa_@F#mXXJ#6cwDSGKkjUc0mslz#tkxlh4)$%a~7$
zIrQ#kdLe~J2+?{-IA2&|JEpsNwt&Y7<)mtS=-^K#FXI8H7xL&S-H2GTM2Yl*2@
z?X7|EKoSFaj6Q6@OmRk(PuHIOPc_!~sm5)MV)7E%=~#i$(B+E+U@1k1T3{a7EW>x6
zrS(y>B{lSee=wJSuoIbQTP+RPMQI8pnfl?fx$QWq{6im76`+HDE8w|kzE(1XPh(SN
zOszqJ%U0#(gUe>ovm}H5m<8x8uw}VTd3FChlfXpq7lNdW*#-OPd-3
zqg`sYlZ=Dto|soog){Ic?)VkW5c)nry%US)$CqgIs1V?A*?bxm+e85j5ZGVg#HG_u{p?oh|1?MpM@Rv|%0zfvB=*=P)qKtI15GU^G!wg$ulK{rp3
zn*iihvuX}t%irz5?eo41vhLQpWbcI?;CVGiD?XQx?X)gE%&*>S|DLPM#5WG@dA3j@
zX(S#*1|qUiwH<{T%KgI~fhX(_Aj0hByd!+$AfZq3TWfud*&wUV;@T;?=npP6ZY(!I
z+JLKTU2?CE;+{w?;>)8O+pjiXsCY7!$L|v8Vy2@R>tcCNqcLCt(CdS^{jebhFj378
zaIt0n5E?O!E&x7SX6?(wY0FIj5{tFU#XTD~aD{NWTjnc^zL!Ad#u5;E7%1djRvFII
zhNH{H!ZggJV*{hpicoOm7(nB(g_=p^iu0g0n>Oqi&`e{!PjpaBxasZX;9Wi+hXWRA
zy`uRa>#71-SJ*KC!{Pup*A~YOT!C$IHB~rxPlAU5Sg`>NxEoyOW@<}+UJ%RdIgWn^
zBTn!ESWVuY&W+#ig??izO-NlNiLV+~bWr5UPU9Nq*ad~1*=`<9T=~z^&
zgwptOI)n%+SdCBoheh$h#Ek-)?3a;W_D}Wx?Rrt)0r#aGML03rKQ8Fsmo?vQAWOJW
zH8cOZ+J0?`xd1Sl)cO*N|9;@PS5n6aY#IOUqhQ^C7^L5~P-+i2S_`M|?Dl{A1-~E5
zi0XHR5z)rD%xUuv^YQDkguZel;23(E(i{Kf1O0M_{{Q*HWZktO%DsvyEA_nY3^S*o
z0sHLu?Weaaj3a!-vO}N!SM>>y5V4mHF{oBIF`~jF6G8pj@I=m14)f
zh5keO0^J9|L8~Jo=i_6Fz$AU}RlBb8Mr0@(-)rDNvj}ReOCE#@jk348_KM$>4@8$W8f>ycF20p&}^=3GJ!O^ctSEnM<%nGZRzw-^9f_MiRe1Yj>`Tcvt{@Z!1Pzhr!d7QT
z45ZJrym0Gdr;S1a$%%c4kkr#>ZSv=7!fkSSHfTGIWuReNKE|
zodN%hh4P&QG3Upw0H7STW1bY3q}Cn8B@uwUcv@ovw?TL4OBpiPI_gL6*uD
zn!t*KqbS#bNW#}vBFN%4yWV)sTaoAL2}_6eI?c5>PzuY{T|f~W9DCf`O7;;xCt0Bg
zbd?=7g*)^EZ=K;6w-njc&(+YJ3uwF|X{eYy%k**&JId35um@BXFQf3JHBHeJHf4lk
zgeL4vm-|o)(cC0SKb+ekfnaFsgAp-T(ccQ4H1xUgldp9e
zH_r`RX0g{2OqsT^cZq&$_7z|!IMj&xMX#c$gg+!!t!|gOE+Bk2P+H?I+MOjCp_nad
zraQWngI5D^+2n4?|F#!YaQ7TERo6#d$#iy!S>jAP&*9}Qx
z_G~N;LFO?TOL^Bu1r0p5I6373?Z+S9QJ1W$kgVkO{xV4Le$*L;@QlCuw1^|lqvwQor^7fSM|3=L)-?tYRo10&w%Q;<&{b01+TV@^HBEJ
zLH_<6^dG(u@LQ^8b&c1*MfmTJp2s;gM)$A$>qEbm`+w_$scl7GT{Dpr0bFpzr4UfR
z0rr1W)Cus+C|hkCiFcQIs%#XJ4Ku645#eM$r5e}pEPtw-#PXN-bZ;^*G-r;Zs#!J
zJQRS#TU9{n12s~3g1MDomcbqDn+Np-4Zy=o@ZZn1eacA-9Hn;+$x8&aATThFlqFZV
z26b2&11C<&dw<&$%n?U=OdT7PeQ%~gYX^zH&$$9e6^ML+LOiT*U{oF+jdihF6g4Fr
zV!ldN-}`Fobsb6$wSBroYX{q+-zQb=l#ap^wR5t}z|sRl5{&)ugl=zXEl65pXQ*|k
zXp}9yGf6OgYH)*ft2)~$5z5TKT!M}PW0rPDP?#D!+cIssYH+(BW(@N*9vkw-;zBrb
zBdl^==|^*g6L2FLELlG6Oo1pay0Pl)$MJjP60sruOu0!(UqDKBdH#jNV=8!6e9dXH
zsN+Ljot!Tp6=4%d&Dc)X07uZ!mSNVr60w_D3?gA3y!U1z%c6-@msn`FmX;>jNy=vyY4=sBg|F?X%2RpH#0QyCNWA;UF~7r1Pa2bbPx31a2&bxtI3x*$9y`0`2I}J0I4F$I)W*yN@@lqFl|k%zc16yJtxVS2SjgF
zP_I!7PsLJNyW0Sr*F07yW`sByNJ3Zz=0D~UmVIS|_u`IO6G+DrejP7p4K$w#+I(<9ClvO3OPVEiBTWWmo7!eqaa+$Kiyj-Kl
zvL(Yj^G%vE#j*tcODp#Fn&lry+5~Sp(>iy^J{;?j`w0pB{qRR4LLA?~!&$FUl@z-^
zRSArCVBtzir@bl3jhYTK%EDvI4S=_RcDvS
zKa2lO?;j_<^_um>FQ(zLpXo?qgjX=i$n2}x`&7Uz=3(H243K+{2*QJ%=(l4MSla@H
z_|8t%TqMJqmh&&;Wzn30d9^3GOB?*Vg#0K))J1tYF9$wrC4eTYZ^O{Y(8_yDI%I2T
zy%*ei@6g=KSa;NRl}4lV6P!FXpiP#34Gj~!74E205vxLu5~Swr)y>LuGXl$5BxX&T
zKvDn@c82fLdd6%(kJkzdBHB1DlR$Y
zLLF9;vA2ZRjFQCURbC891H=E6jn65N1XC0ClL)*zbHtfnrCvtJxDy$6@4hz6;jFCv+mn%9kY$skUH~De=-x<|S1C80ssw0R
z+&0bD*tuNc3c7+r)&QGr#Bfr2LG$bFX(MWD0c-e~M?@!K{U6Odt8FWZEw(O)*-KG!`etic-xEy2w-Te100P5gKGl8Q?&DkJus2WCKG
zOK|zcS<(&A)TN*=r~kmfRbEwYiR)2$TD!ms-mo3*?L|O{IN-0z8yG!UI%|vz5!q0l3H_89gX9&4kzviFOU*;)S+L%d&FRo*#Rk=!d?`MC=0D216b)
zdEjXy!QnGIv2ug4OL0M;9(B=+gP}X4qgjLGDdlP|y89-9AX2>HmQ&IPe{DO*z`qT;
za#zjE4rdw8672j!_-yu0zyMr?eRB!_F4M)dm6N(9w9FefBl@x!_-bP9yGb@{+$7PR
z@yvYOhudP^Zg#Ja9A5*e#V=W4yvI;K!^)LPsdjgz7QnO^nYoeZ*hwYbLhm+PN?(Mv
zmD+kdX05yKEK(({jbGH0FzecPN7&5mPF@NaclyD*I7mCJgTLe83CMqB7(<<+7OL0i
zS{B=CyW%blDxKb?qeP7a?YoQHJLLjubnt9fm(BIJ)mv+nllwqBs?&XI#-4N+-7V=f
zMy{{}XGQ`&u-y(dppo+`SHFGoP~ib6e2fpi-R+tYjydA`SjBta*8h1xkR=~ifi&V`
z0BZ-JE*7$;zB%~h2Q=q(c3V}Xz>YyYop7;g4w84TO8+HClq{(*
zyHbsPG2bCz0AxG55=Mr_fL*yUY+v>Szo|*Qi&!!BFr4m_){))!H{!&F1>atKlh03Z
z+$;+}B5{;WC@JOs!for{Wx-zY5#QV5^T{DIuIGZ*3yarQ1p=dm%94SFts>t5jVAeA
z*(^w`yg4$NAC)LAJ+`sUr7Sk0*lDDr?7nc7O54ez|$gwX1}I
zvBTFr@$8FBSyd3z2O}vrRT!4+8j!JV1Zh$p^P$r>^q-?$%rl^^smllKesr(uZzl;h
zWPdxZryH%>tv9kOsABLsKw6K`W#oMJFThQaaC4A9+yUK|&Gd9GGITRfTDI20Yq(n~
zjlDM4AC^N_X*bAK?5XJ+aTl8v24stz-CG{*aS`{~xOW)MH?}WL^yuZ_RvSF%I%Hmb
z$5qRywCUw~b|RkHrTuy<%xsFzit&Op$$OS3VGduYpH3)=o4d^%>D*r|;BzKLA)@3#
zl{OPGMtKp4qbQ{;^^ACJBFSY^6h55h=qrbcClnOQdnlXvD)n+>yxd9Qvu6dVS|gHY
z0p9PEo;-d@ph!rKc5-+TXSKoglI)AVmyVul6Cjt|5^5yns#P$gXOJG;eDQG8;<@u+
z+1vTzR1ynHH$-E+<*xz4wVtSbmmQ_uPG
zDFvP+*MZ0fJ(~huwB*CO$#1J(o3I4VT0R3ZGow{XuW2k&Z};$w2dN)$D@^Uy7lOql
z|GHgu0u{044146;A8+Z4q#9=$<6Sl${Fr8N5M)jFoMasPF54R}0@H8uSk)idyR1a7
zL+`y6opQiF<^@c~NVk#wFcF>9I_~JMmTrS{$4R{^#;c7`GXfdesycZgT*(+aPcf4O$4xM$(P
znlrCE$6{z-^!qMnaly=Qm42RwE;OFsC)a?JI=d4N%1j-6?^+DTq;MEN)-cgc?8tLT
zK$VEV@Vag)IAbcGKy2-t-7L!6E<0(sj+QRjqD#GE|RhYw
zX4XRKYu*N^e?^=`K6FZW#o>*F>FL?9lWcTTeO^>sH<0pwhFJD-s9*kq!?0`;cHmxz
z8$s@b&*eo6YUXvNSME0zad)>8tlwn}zZN8h?w9h8qKc>XWJ@StYyX1uz({yXaBK^%
z?_pS+a1A5={S_ijH3Sx0Tx}i2$h}M7LD3;5x8;k(!$tucH)c4WduYc5!Sxm=iu;}4
zZ@6k^&>wY@uZ&ga?y?L^T};d2lH|*FUBqKkeRhqkgEq5G%D9y8+jFPm&P`As8#_5v
zW4l*Oz!elhp21-K{0^3Jxv5*L7&+v00e|6vQ92>+9@nhrd)%$j1flJU6WmWUf3fDvaQI=z`co1n7KH9P+^WoDrZno
z3A~#=k4?YBt!tN>dKy}FZZhrB<^fmrfHPUsl-tT_nww5!EB@A*GdkTsf?uzVG0aV9
zu@8$GkH+Q;!$R%ow#ZQ@rf@Ol)Rer4d8?NFzM_>u`bI3qocKc3O`C7q4J~>PIvF@W
z(-I5@U-R}llD@|!S~D!_a!Is)7(y?Fa|3XZ^=BKTb@i#cgeH@6DDq{UTJ!lCZ_Gq&{gn<`jR
zkKWJ?8Wr%@9k*Y+u-D>t3@Rg?pQE;!b{~vOucHR}3hYKPOreCmBkL&&k=B!7>SM7S
zR}rc(t(G_6AATN+pHiMZ8D4?Mb{e%iPJ}U*BX|S<#=&%PUfP$kr2I7QM~BaeWGBCZ
zNxPv88;Z*GhmSVj$Tt|xv5PyctewuLrzkoB5|hjyRrn3gqI=)lR@u$NAxklrPw|w8
zoX3#!c)dW4be_yntNV@i108&Ep2eJf;e5KeqhMF}9#IiVF;H@$ya8PZZNzbP_-4cC
zta3^PewUlpkvPh6+ewKDQkPdpnDAoG9{UpEY2zD?aDSEfLW9s;bEhZBm^eY2_XT*0
zeZswX5RB+Dso@c~G5@qM-*7Wr?>k*wm0U_KyiO}ZlU$XRD_4v1i=M&3rXL-z(d|0J
zI6qKs-lGf8)_T}vJyfm{39h7o3ue)ae4gW5bBGR;g&?xyo~yKM-iOOU$SWKAlo?i8
zX^Hh`xw5S3j-6VN-*+h%r86AyS(crAFcXD%_
z=A|;cP3-GaU}Bz;+fwZp$C}_o7fX@0>+T26fR?!7@Q
z3Et%TXe{$`!2ac%RSPTZfWukl%;#DY%{}2ernP*d35Kn=%C1iRKDgZtmTehH!DJB!
zaRP6%F^4>F%qf*2V@cL7*RK}MqPXRWHeOw2xn^{M@|euHt#UYM>5Ieml=nblY+zAD
z{|C@Ozs+v?S5@s}mtRidofYO;iMQ?w=2y4%iAvBw#q7T>?KVrT%inVC>x*fhNbnLr
zBe)nc%y6IhGoRg;qE5%*C+p%pg^zea(X@24N2I0K*HXLvy6)Pi8Ux+eMqG!$d3$}L
zEj`=!f>%i!A3Nv!Cg;AR{AFIc@wfZpN^^oo?3ORZ^T#8fc-j|vZc(gPZ9Yx@UP7jb
zAEHfJZ4Ve$*t{v*t(TWTqAd2E%FDB_ib=vw35NH{A(T%gt>l9h|9LF=k2IT!Ew~pY
zJn;Kew)AT8*xZ~=BZ}SzyXyrY(UYV(#c7Qb>O9824(-8r2_0U}md(fMZe`!@J-l}I
zgVPom?fkANK}D1o(-wX;ChD@ss(0xZP!Bi0a*cF#IcPVEDK8A=Pt1fGFA&Ik%F0tc
zkYUZd{tCBohmDi1Uv>jBuWdJHy&Wm57cTXcb0txZyc#H-5eq+Qq&Q{5-*_325-;bE
z!JJW|Fo>1Eb!y~h*m5_?tId7=`9hl=-@Ju9rEk={yG4hwsmarz_p1kLxL&VK?u&gB
z%YWgwH$a>3>;m#Z@4kDaGteAwhVw8k<*73puGo_Wk~jqyI*Z0vCe7+MhpBpT
z6MrOmGS_LH;}?P-5b%Pz26a*N{)5ZuBV>Y!o75=P)bMO`hTwMTPLn~m*g%pTeS{hD
z^~AbOT!cCizMoLy+&tJEhh*`j#Q_=~W_npvgwp4{_I#-gVk62;+Q=rjU({NDSaO~XGPcJB3*?+__^aoll!bkez0D+`k9MPF$ZHpu(kc0
zoA@{Ls{?Q5Z{W4tsqal7B@m2VM*-ho&{ZQdti-j7YZjJi6Dg8jf_fT*Z=`Tys&6n>
zKhqH3t&A+rUYL{=CvpD1A8(qS*1g1y?-El6eIs`fSOoPgCo=|mZ0}GFFTBOP)>q}2
zUv|L15o`9o%6X;Wmhqg^F?W^;@=b5xVqKda{dFN!r`KMH;5%%!lti*N4ncft1W+;=CMktggc}
zErX^GRKp?g
z5@p}q-CVe)s#uEc+r-@wF9P~3p)G5-dywHw>f6K2w-rc&J7!uRjGRubgdH-|dQ!<%23fo;y}()%^`;i+L3gRHSdSFCmi)52&XbcnHaXF?D%$8?
zJI&U;M1^R6%=M$|P)HJgt81o+=&2sQ5O%<9mt{V(Yt_S#KQZR1O9aJWlWH^HR*HQy
zl?fPX;KE-)l!$aSUVxw{yS?}h<@0yAqmk(T^D;&PSRqwN^rq&}Y{R=b*$*HE?_?X~
zdqQ)QbIA+&k;(YbNoc?h)$D6b|L`+^*{#>_?;gP4aPZ#Hv&ne_YDI4SNh=}?<21Uo
zZobnJ534>&z0a4+*%TbGdsdXED|A;a{G(9ffmj&t$Er+}6?B4o!e`^!;cdLUx+k8V
zI4xlk8+3FyPxN@4so{idEd$FFNP#WU4zszBA*=C7e|cDg`OIgN&l0#Vk?4WM(UuW6
z?}^8S)^M{EYsPL|a@pL*y;%((IM-%Fp<9k`)Ma&T=J)$j{vY<<`=9Im{R8J^WF#_+
zjLMcsLb6q&>`hh_GD22XUP42*B%_d$l}+}_E-TqY_TGDauV+Q)9G^end;5OhKb&(M
z^?E%YkLTlhT#xH3_v5~NE_W(eZqcN`W3cO2rjC*1vnpDzT-#bQR!qB
zkf|2m+`Q^HY4pW?VzMXtFiT{EZXfMXgj9%u?*+cB^Dkq)$=q;9uXL5gE~|Qz$*Y%K
z(YJ*(GVB!}YlC4Vj^QW4WsM)n9j&dXL{J~@?MdOT9z7!0fMlUjUA
zjDc82s!t&U6Mvg-LVvGZ(|D~-#VrUa2np`I9!wQgs7tE}&l9>+{FX*}=uD)+xyV*d
zn#2#)J=V*F5!CliWagZ%qNYR?Hy0k;g-+F<5cRpwHx;Y9*83iSN+h@(8&Q;OBYput
z@O^!c1Ar~@16GBJzyNJm(^aF3A0NunS(isq)NCe0Mo;9D&MM*bo#WvoN8Y~6>@mWq!`Ogbi7_c6Ye*-wroxXgL
z1zWLiDRu&lIGM;FrynWG)A1Y-O)B4RDbs06Od0{Us
zb&zoG2#T!S>gT0r3U@P3jP$+@F4M!z*jljrAvUk>vREy7?&Q{@J+t^IE&-ajdbHBP
z^2}M6>0$4xt>1=-g%`gIio$8h*|cTkG8yZ`cBTs<1bt`E)^lUsCX&qHG~ay}#~x~N
z{MlCjk)&Oodwtt0fyXH6?uUt#)m;iXZ~x#+;Q(=kk>Qa`*;cRS+RMUrJ1*=@%sjpw
zvCAHEY*$3}*NYF3QahA6!Cvdjl&p*qK-e2sbNO+9E^Qmzs*lC6Z
z&Xg46lgrv~^G2#D9-82ZH2Us(mhc|=9lxEZ;c?|FIT}}_f`+fKR7i)q{N$}{NUCZ4
z%Ksy>O}JGr>y73SdeS)qbnfG?3uf(QewSuDx_Il;2g2GF5+=6w-x{_ZB6krVU9!&<
zNKWOqt91$;;AXYjHp=NadZqIguXi4MmklAIl&!7ik*)`!&FD8@1y?UEYJ{2G>s(*1
z`=S?WeI~W&>L*>P`Hi?r9Tm6qbMX<&x^=~u+&B}H71h3AO?8avUV%_fRoO0iWsDU?
z=2Pm)9Vzx8Hzttey#lWxE#sL))B88(FPyzT?yq|Em|${X#W}MY{*RfWUn-aDP;S~-
zUQ1#LypMUU2A&p!*JH?OUyCGNv4<&TrS5Xrz5cH~xafbow3EEcuPWp|-=<$cn@
zOy@`GEo0gp3H^WeHO3cQqoID%EI($+Do6yWR7hu-x3&Dc5kxw$zu-*bF1-YpFHQN3lxwR0tzcP
z&(2kNleL=7h{Zheuvs{AX|i8)b1NU;;Dc
zS&;@us%}~WJH8Qt+4PnC54u_^Zq`v_RaPEZFI^oz$(A0%K7(g|#Dc-U?Od@CPr@_%
zfJw9C75GLRhjWh&*E+=yOF_vSJwh5r;!V~+nwQ9|(tJ(wl{;y91&j4AMBh&BC|?Q+
z-X)UcVU_p1t#W
zX?OWh+t2Ew!@f3Gs~^9(p_@Rz6F@&FQB=;pT}<{_Pl9ts=+4}Is48T=!%4j^_jk+#s*Pkm0RoNLai?O&8mF2qp4&3@?1x-0)3
za{)~5AFnFTvz~u`r;J)+N5|vD!bcO~rAzeDec~bpfo3<@7RQGbZ&9*v)PmO}G42#B
z{kR<73?6I?MZRPKI`n&-&!Ybnf^^)&QTtZ=Gvbbr-zI*M~S|12Yf#9o=&?^
z;EpfP=gg=sv$cVZXHKNwuKU!b@^}Yl%=$gNSBgvc`RDfHY~g%`38Ve(Zt;()MH44<
z?;#-bi%*eTw21&3AFS@8Sj<<_G-ea)%WuEZw#43kB1ik!aMUdvpFBHR?!EA%(}F8P
zjNe<(4V!=|kF}m(9Mj*7LKik!E<7O{qT;A^#yT|G++c8H;bXAU(aAHKj`p4{w?+jP
zsM%H1m&X0J{an2yoVKVqT{L~6T$c+u88w0%e(K2km{8eCH>qM3*)|Ot^3NkTUkE>1x^#eXADu@nG6Ga3%iu?(8
zPIul_Eq~4uAlN%zjdlCkD$1vL0p)ql>*zpqPnv$*!a@P^rBhPuP;`iwRA
zZ+F?ypOsHoQGQhT!_w`(>yT-s$l=$_LJl)GA70wli975w_U_8C`fHJw%!w7!U<86x
zl;ghNA7q(@5mcX6F1Ec{pc%PsBF>hS}lS8%=fD@_CKl9cVvdS~%MZ6?6
z)`ja?suj1|!}+P_+DXd|0PA~%@JyN*Q>QtR&XI?J6$wsLyJ=lH=jTv(x^TuWwf=Cy
z_hXDZ+NcXzQjCNwWtuPj
zeajymymE_|Q}T8SiaL3EJVVQ=DYZ;unt0Z{fUWjZH-pV~ZgB(i`jukET<9KKuN!>A
z6n(Z+-}*&DH7C{$6ALz41$qW91S(9JlVq5b8BrrzLu|#V#h;(@I3;x^Ci|>>>7|t_
z{^D~9j~yRI%uIa5idy(FoA1AY(YdhEULq2R?k%qGZS#>^Po3BSNOwe*KOQm9UBE1>
z;zW5qa`IhqKtR~gGjz8+H3F}$n7)eE2Gi92?4#A>yXudV@5%BX=EPD7YHfR*-o(?x
zJjz+29!nOZBYx=+ZOVtFSk8O0&IBn5UeY0)r)5lJbDB85zX*SJh)vT3K7&br}F6SnvXo&{Je6fb8AH+LA*eM+W9w-fMh3mkua
zOG)N0EDJ6VM#trFUsbft+%f>|OniazvbnbD>SXcvde)FJ0K{FZ6d=JiI@pc&@o8W&
zcxYy7IW1)X#<3{}wdTA>dADUHU8}Q`{soxTKLt2Lqf?>z3upd9bFmnk$SZ(KIUQfNhhV`T{Lwv59)gX{0-wwar-EAZQDOSNqK00M4rQ1$%y
zPKMj>!QIdYl<$rHa`m=<%!vd$TwkE9{QK;KM(3`KA6?&>BLRl~f{U?-@~PWz4B=r~
zfbn^MtO?QoJF8=ssE*2U1U1Ej*7<>ZjuZD%!3iit)}G}|nKqL&<5)_AxR01$SLXmt-&^s_zq?jL;Oum63Hf+t*!A%Ah`?-Tyfg{6fD5Jo~Z9QMBx{R@ZVCO`mm
zUP>x-|HRGh|9tl(pe8vrC_elPEdA%a`|xATqaVBg4VbO38vXZa
zd&oQZ&08^8Yem*8!{YxZ@cI9X6E3BZ+o8w6*xhy2o*>=RbD8Qr)N|s>cJ+J=tG-xl
z2m|UV1H&oyC)C*I$k}3_FkB%QCwUQzdHGq$u>fY)D~Z^Q?zktTQRVa!gu>)UNfe$j
zwvgVHAi{RsX>>3O{qZ%keRU?Ucl*oai=Mu|nW--zlh(c6J2iDvzR@YKaXJ1~qfio6
z?v_o(HplKhDc|45*4t!`V}Yi{b&^!tttAat`s2(CFqW@CxAR_a^6$p|yU>N4Sj)Xf
z!}j+NWe{rAr2-6$zhC+1Ck0t(@Vsz*jI!^#ZiNe>V!-<1G6-j(5;ht+b(ofpOs5J|Hv;;fEIw_EayyJY`AG#my6N_315
z=R}fV`?pNpnQyIMj9+tJ?@eGiU}6jk$_z|a3(xfo)`zdQ-V7u@V7M6QSOTG?8Wp{4
z_{I@ES${GV%M}aTiGx~gz6c4ym#6OzTB|2w`yFE{`}|V-mPQRm*%C%}{eb@v!$-Dq
zT5|%=fid$Ud+Z!b=sqvT-UHF3(8$ha@K*T`FU#KV!JtiUz*aJpJ9BWPP8V>071Vk
zwDUVaJCoBlZID;Z?R~SmWn9w7Up!C555PIQK2m<$~Hp
z0n+O<1Ndl@(@E+&BOnoM*C@JMs15kMUefDl6z%c~-K?O8qW~bD-z}i4b;fnpSx+}f
zJnuh)eU5=9d(=E$i^xaxI!&e^BIMRTpo=`b4}{N)#{u%nUD5{)D*YpX9D2X_As71j
z$$F61uKbRgz|*~e<__D?Jbm8(;>fX8=)(L28M&U6j5>DT