|
187 | 187 | " My sister's name is Rachel.\n",
|
188 | 188 | " My brother's name Joe. My dog's name is Spot\n",
|
189 | 189 | "\"\"\"\n",
|
190 |
| - "output = chain.predict_and_parse(text=(text))[\"data\"]\n", |
| 190 | + "output = chain.run(text=(text))[\"data\"]\n", |
191 | 191 | "\n",
|
192 | 192 | "printOutput(output)\n",
|
193 | 193 | "# Notice how there isn't \"spot\" in the results list because it's the name of a dog, not a person."
|
|
218 | 218 | }
|
219 | 219 | ],
|
220 | 220 | "source": [
|
221 |
| - "output = chain.predict_and_parse(text=(\"The dog went to the park\"))[\"data\"]\n", |
| 221 | + "output = chain.run(text=(\"The dog went to the park\"))[\"data\"]\n", |
222 | 222 | "printOutput(output)"
|
223 | 223 | ]
|
224 | 224 | },
|
|
300 | 300 | "text=\"Palm trees are brown with a 6 rating. Sequoia trees are green\"\n",
|
301 | 301 | "\n",
|
302 | 302 | "chain = create_extraction_chain(llm, plant_schema)\n",
|
303 |
| - "output = chain.predict_and_parse(text=text)['data']\n", |
| 303 | + "output = chain.run(text=text)['data']\n", |
304 | 304 | "\n",
|
305 | 305 | "printOutput(output)"
|
306 | 306 | ]
|
|
402 | 402 | "\n",
|
403 | 403 | "# Changed the encoder to json\n",
|
404 | 404 | "chain = create_extraction_chain(llm, cars_schema, encoder_or_encoder_class=\"json\")\n",
|
405 |
| - "output = chain.predict_and_parse(text=text)['data']\n", |
| 405 | + "output = chain.run(text=text)['data']\n", |
406 | 406 | "\n",
|
407 | 407 | "printOutput(output)"
|
408 | 408 | ]
|
|
529 | 529 | ],
|
530 | 530 | "source": [
|
531 | 531 | "chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')\n",
|
532 |
| - "output = chain.predict_and_parse(text=\"please add 15 more units sold to 2023\")['data']\n", |
| 532 | + "output = chain.run(text=\"please add 15 more units sold to 2023\")['data']\n", |
533 | 533 | "\n",
|
534 | 534 | "printOutput(output)"
|
535 | 535 | ]
|
|
891 | 891 | }
|
892 | 892 | ],
|
893 | 893 | "source": [
|
894 |
| - "output = chain.predict_and_parse(text=text)[\"data\"]\n", |
| 894 | + "output = chain.run(text=text)[\"data\"]\n", |
895 | 895 | "\n",
|
896 | 896 | "printOutput(output)"
|
897 | 897 | ]
|
|
1027 | 1027 | ],
|
1028 | 1028 | "source": [
|
1029 | 1029 | "chain = create_extraction_chain(llm, salary_range)\n",
|
1030 |
| - "output = chain.predict_and_parse(text=text)[\"data\"]\n", |
| 1030 | + "output = chain.run(text=text)[\"data\"]\n", |
1031 | 1031 | "\n",
|
1032 | 1032 | "printOutput(output)"
|
1033 | 1033 | ]
|
|
1070 | 1070 | ],
|
1071 | 1071 | "source": [
|
1072 | 1072 | "with get_openai_callback() as cb:\n",
|
1073 |
| - " result = chain.predict_and_parse(text=text)\n", |
| 1073 | + " result = chain.run(text=text)\n", |
1074 | 1074 | " print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
1075 | 1075 | " print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
1076 | 1076 | " print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
|
0 commit comments