diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index 0ac03351..00000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,83 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: [ "main" ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ "main" ] - schedule: - - cron: '24 0 * * 2' - -jobs: - analyze: - name: Analyze - # Runner size impacts CodeQL analysis time. To learn more, please see: - # - https://gh.io/recommended-hardware-resources-for-running-codeql - # - https://gh.io/supported-runners-and-hardware-resources - # - https://gh.io/using-larger-runners - # Consider using larger runners for possible analysis time improvements. - runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} - timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'python' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] - # Use only 'java' to analyze code written in Java, Kotlin or both - # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both - # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - - # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality - - - # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # â„šī¸ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 - with: - category: "/language:${{matrix.language}}" - diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml new file mode 100644 index 00000000..594f161c --- /dev/null +++ b/.github/workflows/pypi.yml @@ -0,0 +1,71 @@ +name: Publish Python 🐍 distribution đŸ“Ļ to PyPI and TestPyPI + +on: push + +jobs: + build: + name: Build distribution đŸ“Ļ + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + - name: Install pypa/build + run: >- + python3 -m + pip install + build + --user + - name: Build a binary wheel and a source tarball + run: python3 -m build + - name: Store the distribution packages + uses: actions/upload-artifact@v4 + with: + name: python-package-distributions + path: dist/ + + publish-to-pypi: + name: >- + Publish Python 🐍 distribution đŸ“Ļ to PyPI + if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes + needs: + - build + runs-on: ubuntu-latest + environment: + name: pypi + url: https://pypi.org/p/llm-workflow-engine + permissions: + id-token: write # IMPORTANT: mandatory for trusted publishing + steps: + - name: Download all the dists + uses: actions/download-artifact@v4 + with: + name: python-package-distributions + path: dist/ + - name: Publish distribution đŸ“Ļ to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + + publish-to-testpypi: + name: Publish Python 🐍 distribution đŸ“Ļ to TestPyPI + needs: + - build + runs-on: ubuntu-latest + environment: + name: testpypi + url: https://test.pypi.org/p/llm-workflow-engine + permissions: + id-token: write # IMPORTANT: mandatory for trusted publishing + steps: + - name: Download all the dists + uses: actions/download-artifact@v4 + with: + name: python-package-distributions + path: dist/ + - name: Publish distribution đŸ“Ļ to TestPyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 2171af55..205a304d 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 diff --git a/COMMIT_LOG.md b/COMMIT_LOG.md index 7da6096c..dbf202ad 100644 --- a/COMMIT_LOG.md +++ b/COMMIT_LOG.md @@ -1,3 +1,31 @@ +### v0.22.13 - 07/08/2025 + +* **Tue Jul 08 2025:** fix outdated documentation + +### v0.22.12 - 06/10/2025 + +* **Tue Jun 10 2025:** add support for o1-pro/o3-pro +* **Thu May 08 2025:** update AI21 plugin doc +* **Sat May 03 2025:** add doc for xAI provider plugin + +### v0.22.11 - 04/16/2025 + +* **Wed Apr 16 2025:** o3/o4-mini +* **Mon Apr 14 2025:** gpt 4.1 mini/nano + +### v0.22.10 - 04/14/2025 + +* **Mon Apr 14 2025:** gpt-4.1 + +### v0.22.9 - 04/08/2025 + +* **Mon Apr 07 2025:** migrate from textract to kreuzberg *(NOTE: with kreuzberg, pandoc is a required dependency for using workflows with non-PDF documents)* +* **Mon Apr 07 2025:** switch to newer build system + +### v0.22.8 - 04/07/2025 + +* **Mon Apr 07 2025:** workflow for auto-publish to PyPi + ### v0.22.7 - 03/19/2025 * **Wed Mar 19 2025:** support AIMessage objects in streaming diff --git a/README.md b/README.md index 61860c4b..bc0b93b5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ [![Test status](https://github.com/llm-workflow-engine/llm-workflow-engine/actions/workflows/python-app.yml/badge.svg)](https://github.com/llm-workflow-engine/llm-workflow-engine/actions/workflows/python-app.yml) -[![CodeQL status](https://github.com/llm-workflow-engine/llm-workflow-engine/actions/workflows/codeql.yml/badge.svg)](https://github.com/llm-workflow-engine/llm-workflow-engine/actions/workflows/codeql.yml) +[![CodeQL](https://github.com/llm-workflow-engine/llm-workflow-engine/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/llm-workflow-engine/llm-workflow-engine/actions/workflows/github-code-scanning/codeql)

lwe-logo-small @@ -57,7 +57,7 @@ ChatGPT Wrapper was an amazing tool for its time, thank you to its original crea 🔌 [**Simple plugin architecture**](https://llm-workflow-engine.readthedocs.io/en/latest/plugins.html#core-plugins). Extend LWE with custom functionality -đŸ—Ŗ **Supports multiple LLM providers**. Provider plugins allow interacting with other LLMs (GPT-3, Cohere, Hugginface, etc.) +đŸ—Ŗ **Supports multiple LLM providers**. Provider plugins allow interacting with other LLMs (GPT-3, Cohere, Huggingface, etc.) 🔄[**Build workflows**](https://llm-workflow-engine.readthedocs.io/en/latest/workflows.html). Easily integrate calls to an LLM into larger workflows via Ansible Playbooks diff --git a/docs/how_it_works.rst b/docs/how_it_works.rst index ad594c7c..e7c8ae78 100644 --- a/docs/how_it_works.rst +++ b/docs/how_it_works.rst @@ -67,7 +67,7 @@ List available presets: Show configuration of the current preset: -.. image:: https://user-images.githubusercontent.com/43772/249006348-88746adc-cf42-47aa-91a8-dfe000ea35e3.png +.. image:: https://github.com/user-attachments/assets/8f4c89b0-d0ef-4fbb-a4be-d80306462ac8 :alt: Show configuration of the current preset @@ -82,7 +82,7 @@ List available templates: Run a template and provide input: -.. image:: https://user-images.githubusercontent.com/43772/249006386-9877218d-5112-46cd-9d89-d3a892515110.png +.. image:: https://github.com/user-attachments/assets/592b8fcd-4afa-4205-a0e6-8c9362e3f577 :alt: Run a template and provide input @@ -97,12 +97,12 @@ List available workflows: Show a workflow: -.. image:: https://user-images.githubusercontent.com/43772/249006985-bd92f8c2-a19b-4f2e-9508-9d6875a2672a.png +.. image:: https://github.com/user-attachments/assets/af67b8c2-544d-4627-a084-4a22ed1137ce :alt: Show a workflow Run a workflow: -.. image:: https://user-images.githubusercontent.com/43772/249007169-3917cc85-5bbe-4e87-9f95-f3ab65a1e915.png +.. image:: https://github.com/user-attachments/assets/07a9c680-335c-4350-a10d-4c34ce39b818 :alt: Run a workflow See :ref:`python_module_doc` for details on using LWE as an API from Python. diff --git a/docs/installation.rst b/docs/installation.rst index 673ac593..218de6e1 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -16,15 +16,22 @@ Requirements To use this project, you need: -* Python 3.9 or later -* ``setuptools`` installed. You can install it using ``pip install setuptools``. Make sure that you have the last version of pip: ``pip install --upgrade pip`` +* Python 3.9 or later with pip installed. * A database backend (`SQLite `_ by default, any configurable in `SQLAlchemy `_ allowed). +* Optional: + * `Pandoc `_ for document extraction *other than* PDFs. ----------------------------------------------- From packages ----------------------------------------------- -Install the latest version of this software directly from github with pip: +Install the latest release of this software via pip: + +.. code-block:: bash + + pip install llm-workflow-engine + +Install the 'bleeding edge' of this software directly from github with pip: .. code-block:: bash diff --git a/docs/plugins.rst b/docs/plugins.rst index 83ef91ce..02188fe2 100644 --- a/docs/plugins.rst +++ b/docs/plugins.rst @@ -122,8 +122,8 @@ Supported providers **NOTE:** While these provider integrations are working, none have been well-tested yet. -* **provider_ai21:** Access to `AI21 `_ models - https://github.com/llm-workflow-engine/lwe-plugin-provider-ai21 +* **provider_ai21:** Access to `Chat AI21 `_ models + https://github.com/llm-workflow-engine/lwe-plugin-provider-chat-ai21 * **provider_azure_openai_chat:** Access to `Azure OpenAI `_ chat models https://github.com/llm-workflow-engine/lwe-plugin-provider-azure-openai-chat * **provider_chat_anthropic:** Access to `Anthropic `_ chat models @@ -146,12 +146,16 @@ Supported providers https://github.com/llm-workflow-engine/lwe-plugin-provider-chat-together * **provider_chat_vertexai:** Access to `Google Vertex AI `_ chat models. https://github.com/llm-workflow-engine/lwe-plugin-provider-chat-vertexai +* **provider_chat_xai:** Access to `xAI `_ chat models. + https://github.com/llm-workflow-engine/lwe-plugin-provider-chat-xai * **provider_huggingface_hub:** Access to `Hugging Face Hub `_ models https://github.com/llm-workflow-engine/lwe-plugin-provider-huggingface-hub * **provider_openai:** Access to non-chat `OpenAI `_ models (GPT-3, etc.) https://github.com/llm-workflow-engine/lwe-plugin-provider-openai * **provider_openrouter:** Access to `OpenRouter `_ models https://github.com/llm-workflow-engine/lwe-plugin-provider-openrouter +* **provider_requesty:** Access to `Requesty `_ models + https://github.com/llm-workflow-engine/lwe-plugin-provider-requesty * **provider_vertexai:** Access to `Google Vertex AI `_ text/code models. https://github.com/llm-workflow-engine/lwe-plugin-provider-vertexai diff --git a/docs/presets.rst b/docs/presets.rst index 000a5a82..16a3a309 100644 --- a/docs/presets.rst +++ b/docs/presets.rst @@ -24,12 +24,12 @@ To save an existing configuration as a preset: .. code-block:: console - /preset-save mypresetname + /preset save mypresetname Later, to load that configuration for use: .. code-block:: console - /preset-load mypresetname + /preset load mypresetname See ``/help`` for the various other preset commands. diff --git a/docs/requirements.txt b/docs/requirements.txt index e221e961..7ba6cf97 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,31 +2,31 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile requirements.in +# pip-compile --output-file=- # alabaster==0.7.16 # via sphinx -babel==2.12.1 +babel==2.17.0 # via sphinx -certifi==2023.5.7 +certifi==2025.1.31 # via requests -charset-normalizer==3.2.0 +charset-normalizer==3.4.1 # via requests -docutils==0.20.1 +docutils==0.21.2 # via sphinx -idna==3.4 +idna==3.10 # via requests imagesize==1.4.1 # via sphinx -jinja2==3.1.2 +jinja2==3.1.6 # via sphinx -markupsafe==2.1.3 +markupsafe==3.0.2 # via jinja2 -packaging==23.1 +packaging==24.2 # via sphinx -pygments==2.15.1 +pygments==2.19.1 # via sphinx -requests==2.31.0 +requests==2.32.3 # via sphinx snowballstemmer==2.2.0 # via sphinx @@ -36,17 +36,17 @@ sphinx==7.3.7 # sphinx-copybutton sphinx-copybutton==0.5.2 # via -r requirements.in -sphinxcontrib-applehelp==1.0.4 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.1 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -urllib3==2.0.3 +urllib3==2.4.0 # via requests diff --git a/docs/templates.rst b/docs/templates.rst index 928026c1..23bd7265 100644 --- a/docs/templates.rst +++ b/docs/templates.rst @@ -44,6 +44,6 @@ These front matter attributes have special functionality: * ``model_customizations```: A dictionary of model customization overrides This dictionary is recursively merged with the struture of the preset configuration. To see the exact structure of a preset, - use the ``/preset-edit`` command. + use the ``/preset edit`` command. All other attributes will be passed to the template as variable substitutions. diff --git a/docs/workflows.rst b/docs/workflows.rst index 1557c5d6..0b77f430 100644 --- a/docs/workflows.rst +++ b/docs/workflows.rst @@ -6,13 +6,13 @@ Workflows LWE supports more complex linear workflows via built-in integration for `Ansible playbooks `_. -If you chose to install the example workflows, you can run ``/workflows`` to list them, and ``/workflow-show workflowname`` to view the playbook configuration for a particular workflow. +If you chose to install the example workflows, you can run ``/workflows`` to list them, and ``/workflow show workflowname`` to view the playbook configuration for a particular workflow. To execute a workflow from within the program: .. code-block:: console - /workflow-run workflowname + /workflow run workflowname To run a workflow from the command line: @@ -66,7 +66,7 @@ Example: retries: 10 delay: 3 -``text_extractor``: Provides an easy way to extract text content from many different file types. For supported arguments and return values, see the `text_extractor module documentation `_. +``text_extractor``: Provides an easy way to extract text content from many different file types. For supported arguments and return values, see the `text_extractor module documentation `_. *NOTE: PDF document extraction is supported natively, to extract other document types (.docx, .xlsx, etc.) you need to install* `pandoc `_ *and make sure it's available in your PATH.* Example: @@ -115,7 +115,7 @@ Here's how to use the templates to generate workflows: .. code-block:: console - /template-edit-run workflow-spec-generator.md + /template edit-run workflow-spec-generator.md This opens the template in your editor. Under the ``GOAL`` section, describe the goal you're trying to accomplish. Try to be fairly specific -- the more detail you can give the LLM, the better it will be at producing the spec. @@ -135,7 +135,7 @@ Here's how to use the templates to generate workflows: .. code-block:: console - /template-edit-run workflow-generator.md + /template edit-run workflow-generator.md Paste your original goal and the spec written by the LLM into the appropriate sections in the template, save and close the editor to run the template. @@ -147,7 +147,7 @@ Here's how to use the templates to generate workflows: .. code-block:: console - /workflow-edit workflowname + /workflow edit workflowname Then paste in the generated workflow. @@ -157,7 +157,7 @@ Here's how to use the templates to generate workflows: .. code-block:: console - /workflow-run workflowname + /workflow run workflowname At this point the workflow should either just work, or you may need to do a little troubleshooting to work out the last kinks. diff --git a/lwe/backends/api/request.py b/lwe/backends/api/request.py index 7d3ea5d0..32dea270 100644 --- a/lwe/backends/api/request.py +++ b/lwe/backends/api/request.py @@ -309,7 +309,14 @@ def strip_out_messages_over_max_tokens(self, messages, max_tokens): def is_openai_o_series(self): if self.provider.name == "provider_chat_openai": model_name = getattr(self.llm, self.provider.model_property_name) - if model_name.startswith("o1-") or model_name.startswith("o3-"): + if model_name.startswith("o1") or model_name.startswith("o3") or model_name.startswith("o4"): + return True + return False + + def is_openai_responses_api_series(self): + if self.provider.name == "provider_chat_openai": + model_name = getattr(self.llm, self.provider.model_property_name) + if model_name.startswith("o1-pro") or model_name.startswith("o3-pro"): return True return False @@ -333,6 +340,8 @@ def call_llm(self, messages): self.log.debug(f"Calling LLM with message count: {len(messages)}") # TODO: Remove this when o1 models support system messages. if self.is_openai_o_series(): + if self.is_openai_responses_api_series(): + self.llm.use_responses_api = True if self.is_openai_legacy_reasoning_model(): messages = [{**m, "role": "user"} if m["role"] == "system" else m for m in messages] self.llm.temperature = 1 diff --git a/lwe/backends/api/workflow/library/text_extractor.py b/lwe/backends/api/workflow/library/text_extractor.py index e3208e10..647127e4 100644 --- a/lwe/backends/api/workflow/library/text_extractor.py +++ b/lwe/backends/api/workflow/library/text_extractor.py @@ -6,7 +6,7 @@ import tempfile from urllib.parse import urlparse -import textract +import kreuzberg import pymupdf4llm import fitz fitz.TOOLS.mupdf_display_errors(False) @@ -20,7 +20,7 @@ config.set("debug.log.enabled", True) log = Logger("text_extractor", config) -TEXTRACT_SUPPORTED_FILE_EXTENSIONS = [ +KREUZBERG_SUPPORTED_FILE_EXTENSIONS = [ # Microsoft Office formats ".docx", ".pptx", @@ -121,13 +121,13 @@ def extract_text_pymupdf(path): return pymupdf4llm.to_markdown(path) -def extract_text_textract(path): - return textract.process(path).decode("utf-8") +def extract_text_kreuzberg(path): + return kreuzberg.extract_file_sync(path) def get_file_extension(path, default_extension): file_extension = default_extension - for ext in set(TEXTRACT_SUPPORTED_FILE_EXTENSIONS + PYMUPDF_SUPPORTED_EXTENSIONS): + for ext in set(KREUZBERG_SUPPORTED_FILE_EXTENSIONS + PYMUPDF_SUPPORTED_EXTENSIONS): if path.lower().endswith(ext): file_extension = ext break @@ -184,12 +184,13 @@ def main(): message = f"Error extracting {path} content with pymupdf4llm: {str(e)}" log.error(message) module.fail_json(msg=message) - elif file_extension in TEXTRACT_SUPPORTED_FILE_EXTENSIONS: - log.debug(f"Extracting content from {path} with textract") + elif file_extension in KREUZBERG_SUPPORTED_FILE_EXTENSIONS: + log.debug(f"Extracting content from {path} with kreuzberg") try: - content = extract_text_textract(path) + extraction = extract_text_kreuzberg(path) + content = extraction.content except Exception as e: - message = f"Error extracting {file_extension} content with textract: {str(e)}" + message = f"Error extracting {file_extension} content with kreuzberg: {str(e)}" log.error(message) module.fail_json(msg=message) else: diff --git a/lwe/core/constants.py b/lwe/core/constants.py index 10fca5f3..4692ce5f 100644 --- a/lwe/core/constants.py +++ b/lwe/core/constants.py @@ -9,7 +9,7 @@ "tool_choice", ] -# Backend speciifc constants +# Backend specific constants API_BACKEND_DEFAULT_MODEL = "gpt-4o-mini" SYSTEM_MESSAGE_DEFAULT = "You are a helpful assistant." diff --git a/lwe/plugins/provider_chat_openai.py b/lwe/plugins/provider_chat_openai.py index f3ae1ac1..02215705 100644 --- a/lwe/plugins/provider_chat_openai.py +++ b/lwe/plugins/provider_chat_openai.py @@ -1,4 +1,5 @@ from langchain_openai import ChatOpenAI +from langchain_core.messages import AIMessage, AIMessageChunk from lwe.core.provider import Provider, PresetValue from lwe.core import constants @@ -106,6 +107,24 @@ def static_models(self): "gpt-4o-mini-2024-07-18": { "max_tokens": 131072, }, + "gpt-4.1": { + "max_tokens": 1047576, + }, + "gpt-4.1-2025-04-14": { + "max_tokens": 1047576, + }, + "gpt-4.1-mini": { + "max_tokens": 1047576, + }, + "gpt-4.1-mini-2025-04-14": { + "max_tokens": 1047576, + }, + "gpt-4.1-nano": { + "max_tokens": 1047576, + }, + "gpt-4.1-nano-2025-04-14": { + "max_tokens": 1047576, + }, "gpt-4.5-preview": { "max_tokens": 131072, }, @@ -130,12 +149,36 @@ def static_models(self): "o1-2024-12-17": { "max_tokens": 204800, }, + "o1-pro": { + "max_tokens": 204800, + }, + "o1-pro-2025-03-19": { + "max_tokens": 204800, + }, "o3-mini": { "max_tokens": 204800, }, "o3-mini-2025-01-31": { "max_tokens": 204800, }, + "o3": { + "max_tokens": 204800, + }, + "o3-2025-04-16": { + "max_tokens": 204800, + }, + "o3-pro": { + "max_tokens": 204800, + }, + "o3-pro-2025-06-10": { + "max_tokens": 204800, + }, + "o4-mini": { + "max_tokens": 204800, + }, + "o4-mini-2025-04-16": { + "max_tokens": 204800, + }, } def prepare_messages_method(self): @@ -144,6 +187,21 @@ def prepare_messages_method(self): def llm_factory(self): return CustomChatOpenAI + def format_responses_content(self, content_list): + return "".join([content['text'] for content in content_list if 'text' in content]) + + def handle_non_streaming_response(self, response: AIMessage): + if isinstance(response.content, list): + response.content = self.format_responses_content(response.content) + return response + + def handle_streaming_chunk(self, chunk: AIMessageChunk | str, _previous_chunks: list[AIMessageChunk | str]) -> str: + if isinstance(chunk, str): + return chunk + elif isinstance(chunk.content, str): + return chunk.content + return self.format_responses_content(chunk.content) + def customization_config(self): return { "verbose": PresetValue(bool), diff --git a/lwe/version.py b/lwe/version.py index 12f825ca..6a214e0f 100644 --- a/lwe/version.py +++ b/lwe/version.py @@ -1 +1 @@ -__version__ = "0.22.7" +__version__ = "0.22.13" diff --git a/misc/slide-presentation.md b/misc/slide-presentation.md new file mode 100644 index 00000000..34502bae --- /dev/null +++ b/misc/slide-presentation.md @@ -0,0 +1,145 @@ +--- +type: slide +slideOptions: + theme: black + transition: 'fade' +--- + +# LLM Workflow   Engine + +## What is LWE? + +Power CLI and Workflow manager for LLMs + + + https://github.com/llm-workflow-engine/llm-workflow-engine + + +--- + +

+   +
+ +
+   +
+ +
+   +
+ +
+   +
+ +## #1: Conversation management + +* Chat-style conversations +* Start new conversations +* Review/extend/remove existing conversations + +
+   +
+ + + https://github.com/llm-workflow-engine/llm-workflow-engine + + +--- + +
+   +
+ +
+   +
+ +
+   +
+ +
+   +
+ +## #2: Model configuration + + * Configure LLMs across different providers + * Customize model attributes (temperature, etc.) + * Save/reload existing configurations + +
+   +
+ + + https://github.com/llm-workflow-engine/llm-workflow-engine + + +--- + +
+   +
+ +
+   +
+ +
+   +
+ +
+   +
+ +## #3: Prompt templates + + * Design/save new prompts + * Include/pass variables to prompts during execution + * Connect a specific prompt to a specific model configuration + +
+   +
+ + + https://github.com/llm-workflow-engine/llm-workflow-engine + + +--- + +
+   +
+ +
+   +
+ +
+   +
+ +
+   +
+ +## #4: Workflows + + * Design multi-step workflows using YAML + * Integrate prompt templates + * Integrate model configurations + * Save LLM interactions to conversations + +
+   +
+ + + https://github.com/llm-workflow-engine/llm-workflow-engine + \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index aa4949aa..b601337e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,90 @@ +[build-system] +requires = ["setuptools>=42", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "llm-workflow-engine" +dynamic = ["version"] +description = "CLI tool and workflow manager for common LLMs" +readme = "README.md" +authors = [ + {name = "Mahmoud Mabrouk", email = "mahmoudmabrouk.mail@gmail.com"}, + {name = "Chad Phillips"} +] +requires-python = ">=3.9" +license = "MIT" +classifiers = [ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", +] +dependencies = [ + "ansible>=8.0", + "ansible-core>=2.15", + "alembic", + "beautifulsoup4", + "docutils>=0.20.1", + "email-validator", + "Jinja2", + "kreuzberg", + "langchain>=0.3.19,<0.4", + "langchain-core>=0.3.39,<0.4", + "langchain-community>=0.3.16,<0.4", + "langchain_openai>=0.3.3", + "names", + "numexpr>=2.8.4", + "openpyxl", + "pdfminer.six", + "prompt-toolkit", + "pymupdf4llm", + "pyperclip", + "python-frontmatter", + "PyYAML", + "rich", + "setuptools", + "sqlalchemy>=1.4.48", + "tiktoken", +] + +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-datadir", + "pip-tools", + "flake8", + "black", +] + +[project.urls] +Homepage = "https://github.com/llm-workflow-engine/llm-workflow-engine" +Repository = "https://github.com/llm-workflow-engine/llm-workflow-engine" + +[project.scripts] +lwe = "lwe.main:main" + +[project.entry-points."lwe_plugins"] + +[tool.setuptools] +packages = {find = {}} + +[tool.setuptools.package-data] +"lwe" = [ + "backends/api/schema/alembic.ini", + "backends/api/schema/alembic/*", + "backends/api/schema/alembic/**/*", + "examples/*", + "examples/**/*", + "tools/*", + "tools/**/*", + "presets/*", + "presets/**/*", + "templates/*", + "templates/**/*", + "workflows/*", + "workflows/**/*", +] + +[tool.setuptools.dynamic] +version = {attr = "lwe.version.__version__"} + [tool.black] line-length = 100 diff --git a/scripts/release-pypi.sh b/scripts/release-pypi.sh index 5bb04c09..c3950bf1 100755 --- a/scripts/release-pypi.sh +++ b/scripts/release-pypi.sh @@ -3,39 +3,54 @@ # Convenience script to handle preparing for PyPi release, and printing out the # commands to execute it. +clean() { + echo "Cleaning build environment" + rm -rfv dist/ build/ + find . -depth -name __pycache__ -type d -exec rm -rfv {} \; +} + execute() { - local update_build_release_packages="pip install --upgrade wheel build twine" - local clean="rm -rfv dist/ build/" + local update_build_release_packages="pip install --upgrade build twine validate-pyproject" + local validate="validate-pyproject pyproject.toml" local build="python -m build" + local check_dist="twine check dist/*" local test_pypi_upload="python -m twine upload --repository testpypi dist/*" local pypi_upload="python -m twine upload --skip-existing dist/*" + # Uncomment the following line if you want to sign releases + # local pypi_upload="python -m twine upload --skip-existing --sign dist/*" echo "Updating build and release packages with command:" echo " ${update_build_release_packages}" ${update_build_release_packages} if [ $? -eq 0 ]; then - echo "Cleaning build environment with command:" - echo " ${clean}" - ${clean} + echo "Validating pyproject.toml with command:" + echo " ${validate}" + ${validate} if [ $? -eq 0 ]; then echo "Building release with command:" echo " ${build}" ${build} if [ $? -eq 0 ]; then - echo "Build successful" - echo - echo "Test release with command:" - echo " ${test_pypi_upload}" - echo - echo "Release with command:" - echo " ${pypi_upload}" + echo "Checking built distribution with command:" + echo " ${check_dist}" + ${check_dist} + if [ $? -eq 0 ]; then + echo "Build successful and verified" + echo + echo "Test release with command:" + echo " ${test_pypi_upload}" + echo + echo "Release with command:" + echo " ${pypi_upload}" + fi fi fi fi } -if [ -d vit ] && [ -r setup.py ]; then +if [ -r pyproject.toml ]; then + clean execute else echo "ERROR: must run script from repository root" diff --git a/setup.py b/setup.py deleted file mode 100644 index 50e11356..00000000 --- a/setup.py +++ /dev/null @@ -1,57 +0,0 @@ -from setuptools import find_packages, setup -import re -from os import path - -FILE_DIR = path.dirname(path.abspath(path.realpath(__file__))) - -with open("README.md", "r", encoding="utf-8") as fh: - long_description = fh.read() - -with open("requirements.txt") as f: - install_requirement = f.readlines() - -with open(path.join(FILE_DIR, "lwe", "version.py")) as f: - version = re.match(r'^__version__ = "([\w\.]+)"$', f.read().strip())[1] - -setup( - name="llm-workflow-engine", - version=version, - author="Mahmoud Mabrouk, Chad Phillips", - author_email="mahmoudmabrouk.mail@gmail.com", - description="CLI tool and workflow manager for common LLMs, with a focus on OpenAI's models", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/llm-workflow-engine/llm-workflow-engine", - packages=find_packages(), - package_data={ - "lwe": [ - "backends/api/schema/alembic.ini", - "backends/api/schema/alembic/*", - "backends/api/schema/alembic/**/*", - "examples/*", - "examples/**/*", - "tools/*", - "tools/**/*", - "presets/*", - "presets/**/*", - "templates/*", - "templates/**/*", - "workflows/*", - "workflows/**/*", - ], - }, - install_requires=install_requirement, - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires=">=3.9", - entry_points={ - "console_scripts": [ - "lwe = lwe.main:main", - ], - "lwe_plugins": [], - }, - scripts=[], -)