diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 3e316d34..28babfb7 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -31,11 +31,11 @@ jobs: fail-fast: false max-parallel: 1 matrix: - os: [ubuntu-24.04, macos-13, windows-2022] + os: [ubuntu-24.04, macos-15, windows-2022] include: - os: ubuntu-24.04 name: Linux - - os: macos-13 + - os: macos-15 name: macOS - os: windows-2022 name: Windows @@ -82,12 +82,13 @@ jobs: python -m finecode run build_artifact shell: bash - # - name: Run unit tests - # if: ${{ !cancelled() }} - # run: | - # source .venvs/dev_workspace/bin/activate - # python -m finecode run test - # shell: bash + - name: Run unit tests + if: ${{ !cancelled() }} + run: | + source .venvs/dev_workspace/bin/activate + # TODO: test with all supported python versions + python -m finecode run run_tests + shell: bash - name: Publish to TestPyPI and verify if: runner.os == 'Linux' && github.event_name == 'workflow_dispatch' && inputs.publish_testpypi diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 00000000..70cfbe23 --- /dev/null +++ b/.mcp.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "finecode": { + "type": "stdio", + "command": ".venvs/dev_workspace/bin/python", + "args": ["-m", "finecode", "start-mcp"] + } + } +} diff --git a/docs/adr/0001-use-adr.md b/docs/adr/0001-use-adr.md new file mode 100644 index 00000000..4c7361b0 --- /dev/null +++ b/docs/adr/0001-use-adr.md @@ -0,0 +1,47 @@ +# ADR-0001: Use ADRs for architecture decisions + +- **Status:** accepted +- **Date:** 2026-03-19 +- **Deciders:** @Aksem +- **Tags:** meta + +## Context + +FineCode has several important architectural decisions that +are currently documented implicitly across code, comments, and CLAUDE.md. When +new contributors or AI agents work on the codebase, they lack visibility into +*why* decisions were made, what alternatives were considered, and what +constraints must be preserved. + +As the project grows and automated testing is introduced, we need a lightweight +way to record decisions so they can be referenced, reviewed, and superseded +over time. + +## Related ADRs Considered + +None — this is the first ADR. + +## Decision + +We will use Architecture Decision Records stored in `docs/adr/` following a +simplified [MADR](https://adr.github.io/madr/) (Markdown Any Decision Records) +template. The required sections are Context, Related ADRs Considered, Decision, +and Consequences. Each ADR is a sequentially numbered Markdown file. + +The template also documents optional sections (Alternatives Considered, Risks, +Related Decisions, References, Implementation Notes, Review Date) that can be +added when they provide value, but are not required. + +ADRs are immutable once accepted. Changed decisions produce a new ADR that +supersedes the previous one. + +## Consequences + +- Every architecturally significant decision gets a permanent, discoverable + record with its rationale. +- New contributors and AI agents can understand *why* the codebase is shaped + the way it is. +- Slightly more process overhead per decision — mitigated by keeping the + template minimal. +- Existing implicit decisions can be backfilled as ADRs when they become + relevant. diff --git a/docs/adr/0002-port-file-discovery-for-wm-server.md b/docs/adr/0002-port-file-discovery-for-wm-server.md new file mode 100644 index 00000000..46d67496 --- /dev/null +++ b/docs/adr/0002-port-file-discovery-for-wm-server.md @@ -0,0 +1,67 @@ +# ADR-0002: Port-file discovery for the WM server + +- **Status:** accepted +- **Date:** 2026-03-19 +- **Deciders:** @Aksem +- **Tags:** ipc, wm-server + +## Context + +The WM (Workspace Manager) server binds to a random available TCP port on +startup to avoid conflicts between multiple instances (e.g. different workspaces, +test runs). Clients such as the LSP server, MCP server, and CLI commands are +started independently and need a way to find the WM server's port without +prior coordination or a hard-coded value. + +Two modes of use must be supported: + +- **Shared mode**: a single long-lived WM server shared by multiple clients in the + same workspace (the typical IDE session). +- **Dedicated mode**: a private WM server started by one client (e.g. MCP, + CLI `run`) that must not interfere with the shared instance. + +## Related ADRs Considered + +None — port/discovery mechanism has no overlap with other ADRs at the time of writing. + +## Decision + +The WM server writes its listening port as a plain text number to a +*discovery file* immediately after binding: + +- **Shared discovery file** (default): `{venv}/cache/finecode/wm_port`, where + `{venv}` is venv where finecode WM server is installed. +- **Dedicated discovery file**: a caller-specified path passed via + `--port-file`. Dedicated instances write to this path instead, leaving the + shared file untouched. + +Clients discover the server by reading the file and probing the TCP connection. The probe distinguishes a live server +from a stale file left by a crashed process. The file is deleted on any clean or signal-driven shutdown, and the server directory is created +recursively (including parent directories) on first startup. + +## Consequences + +- **No port conflicts**: random binding means multiple WM instances (different + workspace, concurrent test runs) coexist without configuration. +- **Stale-file resilience**: client verifies the TCP connection, not + just file existence, so a crashed server does not block future starts. +- **Test isolation**: each e2e test can pass its own file path as + the dedicated port file, running a private WM instance without touching the + developer's live shared server or conflicting with other tests. +- **Cross-process discovery**: any process that can read a file can find the + WM, regardless of parent–child relationship (IDE extensions, CLI tools, MCP + hosts). +- **Crash cleanup gap**: if the server process is killed with SIGKILL or + crashes before port file is removed, the discovery file is not removed. Clients + handle this via the TCP probe, but the stale file persists on disk until the + next server start overwrites it. + +### Alternatives Considered + +- **Fixed/configured port**: eliminates the discovery file but requires port + coordination across concurrent instances and breaks test isolation. +- **Unix domain socket file**: the socket path serves as both identity and + transport endpoint, avoiding the TCP-probe step. Rejected because Unix + sockets are not available on Windows. +- **Environment variable**: works only for direct child processes; IDE + extensions and independently launched CLI commands cannot inherit it. diff --git a/docs/adr/0003-process-isolation-per-extension-environment.md b/docs/adr/0003-process-isolation-per-extension-environment.md new file mode 100644 index 00000000..4f23b2bf --- /dev/null +++ b/docs/adr/0003-process-isolation-per-extension-environment.md @@ -0,0 +1,110 @@ +# ADR-0003: One Extension Runner process per project execution environment + +- **Status:** accepted +- **Date:** 2026-03-19 +- **Deciders:** @Aksem +- **Tags:** architecture, extension-runner + +## Context + +FineCode executes action handlers contributed by extensions. Each handler +declares the **execution environment** (`env`) it runs in and its own set of +dependencies. An execution environment is a named, isolated context serving a +specific purpose (e.g. `runtime` for the project's own runtime code, +`dev_workspace` for workspace tooling, `dev_no_runtime` for dev tools without +runtime deps). In Python, each execution environment is materialized as a +project-local virtual environment. + +The **Extension Runner (ER)** is an inter-language concept — a process that +executes handler code inside a specific execution environment. The current +implementation, `finecode_extension_runner`, is Python-specific. Future +implementations for other languages (e.g. JavaScript, Rust) would follow the +same one-process-per-execution-environment model. + +The primary requirement is to separate dependencies needed by the project's +own runtime from dependencies needed only by tooling. FineCode must be able to +run project code in one execution environment and run development tooling in +other execution environments without forcing them into a single shared +dependency set. + +Once execution environments are isolated, they can also be made more +fine-grained by purpose. This allows tooling dependencies to be grouped +according to their role and makes it possible to move tools with incompatible +dependency requirements into separate execution environments when needed. + +The Workspace Manager (WM) is a long-running server that must stay stable +across the full user session. A handler bug, crash, or blocking call in one +execution environment must not take down the WM or interfere with other +execution environments. + +## Related ADRs Considered + +None — process isolation model has no overlap with other ADRs at the time of writing. + +## Decision + +Each execution environment in a project runs as an independent +**Extension Runner (ER)** subprocess. In the Python implementation, the ER is +launched using the interpreter from the corresponding project-local virtual +environment, so each ER has a fully isolated dependency set. + +Key properties of this design: + +- **One ER per (project, execution environment) pair.** ERs are keyed by + `(project_dir_path, env_name)` in the WM's workspace context. +- **Lazy startup with bootstrap exception.** An ER is started only when the + first action request requiring its execution environment arrives, then cached + and reused for subsequent requests. The `dev_workspace` execution + environment is the exception because it must be started first to resolve + presets for other execution environments. +- **JSON-RPC over TCP.** Each ER binds to a random loopback port on startup + and advertises it to the WM. The WM connects via TCP and communicates using + JSON-RPC with Content-Length framing (the same wire format as LSP). +- **Independent lifecycle.** An ER can crash and be restarted without + affecting the WM or ERs for other execution environments. Shutdown is + cooperative: the WM sends `shutdown` + `exit` JSON-RPC calls; the ER exits + cleanly. +- **`dev_workspace` bootstrap execution environment.** The `dev_workspace` + execution environment is always started first; it resolves presets for all + other execution environments before they are configured or started. + +## Consequences + +- **Dependency isolation**: project runtime dependencies and tooling + dependencies are kept separate, and tooling can be split further into + purpose-specific execution environments when conflicts or different + dependency sets require it. +- **Fault isolation**: a crash or hang in one ER does not affect the WM or + other ERs. The WM can restart a failed ER independently. +- **Startup cost**: launching a Python subprocess and importing handler modules + takes time. Mitigated by lazy startup and long-lived reuse. +- **Higher memory usage**: running multiple ER processes per project uses more + RAM than a single shared process. The overhead is expected to be acceptable + relative to the benefits of dependency isolation, fault isolation, and + long-lived per-environment state. +- **One virtual environment per execution environment per project**: + `prepare-envs` must create and populate the project-local virtual + environment for each declared execution environment before the ER can start. + Missing virtual environments result in `RunnerStatus.NO_VENV` rather than a + crash. +- **`dev_workspace` is a prerequisite**: preset resolution depends on the + `dev_workspace` ER being available. Actions in other execution environments + cannot be configured until `dev_workspace` is initialized. + +### Alternatives Considered + +- **Single shared process for all handlers**: eliminates subprocess overhead + but forces runtime code and tooling into one shared dependency set, makes + fine-grained environment separation impractical, and means one handler crash + can corrupt or kill the entire tool. +- **Thread per handler invocation**: handlers run in the same process and + virtual environment. No dependency isolation; a blocking or crashing handler + affects all others. +- **In-process plugin loading**: simplest architecture but handlers can import + conflicting packages and accidentally mutate shared WM state. +- **New subprocess per handler invocation**: full isolation per call, but + Python startup cost makes interactive use (e.g. format-on-save) too slow. + It also prevents effective in-process caching between calls because each + invocation starts with cold process state. The long-lived ER model amortizes + startup cost across many invocations and allows caches to be retained in + process when appropriate. diff --git a/docs/adr/0004-auto-shutdown-on-disconnect-timeout.md b/docs/adr/0004-auto-shutdown-on-disconnect-timeout.md new file mode 100644 index 00000000..9f1ee94d --- /dev/null +++ b/docs/adr/0004-auto-shutdown-on-disconnect-timeout.md @@ -0,0 +1,95 @@ +# ADR-0004: Auto-shutdown on disconnect timeout + +- **Status:** accepted +- **Date:** 2026-03-19 +- **Deciders:** @Aksem +- **Tags:** lifecycle, wm-server + +## Context + +The WM server is a long-running process started on demand by clients (LSP +server, MCP server, CLI). Clients may terminate without sending an explicit +shutdown request — for example, when the IDE is force-closed, crashes, or the +extension is reloaded. Without a self-termination mechanism, the WM would run +indefinitely as a ghost process, holding the discovery file and consuming +resources. + +Clients may also intentionally stop or restart the WM through an explicit +shutdown request. This ADR addresses the complementary case where no such +request is sent and the WM must determine on its own when to exit. + +Two distinct scenarios require handling: + +1. **No client ever connects** — the WM started successfully but the client + failed to connect (e.g. misconfiguration, client crash during startup). +2. **Last client disconnects** — a normal session end or unexpected client + termination. + +## Related ADRs Considered + +Reviewed [ADR-0002](0002-port-file-discovery-for-wm-server.md) — related topic: +the WM's shutdown flow performs the discovery-file cleanup defined there. + +## Decision + +The WM server uses two independent timeout-based shutdown mechanisms: + +- **No-client timeout** (default 30 s): started immediately after the server + begins listening. If no client connects within this window, the WM performs + its normal shutdown and exits. +- **Disconnect timeout** (default 30 s): started when the last client + disconnects. If no client reconnects within this window, the WM performs its + normal shutdown and exits. + +These timeouts complement, rather than replace, explicit shutdown requests used +by clients that intentionally stop or restart the WM. + +Both timeout paths use the WM's normal shutdown flow, including discovery-file +cleanup (see [ADR-0002](0002-port-file-discovery-for-wm-server.md)). + +The disconnect timeout is configurable so that tests and dedicated instances +can use a shorter grace period when needed. + +Using the same 30-second default for both timeouts keeps lifecycle behavior +simple and provides a reasonable reconnection window for IDE extension reloads +and brief transient disconnects without leaving orphaned processes running for +long. + +## Consequences + +- **Ghost process prevention**: the WM exits automatically after a client + disconnects, without requiring clients to explicitly decide when the shared + WM should stop. This is the primary defense against orphaned processes after + IDE close or crash. +- **Reconnection window**: the grace period allows clients to reconnect within + the timeout — for example, after an IDE extension reload or a brief + disconnection. The WM does not need to be restarted for each reconnection. +- **Warm reuse across brief idle gaps**: the grace period allows a shared WM + to survive short pauses between independent clients, such as sequential CLI + commands, preserving in-process state and caches between commands and + reducing restart overhead. +- **Connection-driven lifecycle**: shutdown depends on client liveness rather + than completion of previously requested work. Once no clients remain past the + grace period, the WM exits through its normal shutdown path. +- **Discovery file cleanup**: normal shutdown removes the discovery file, so a + stale file is never left behind after a timeout-driven shutdown (unlike a + SIGKILL). + +### Alternatives Considered + +- **Immediate shutdown on last disconnect**: safe but breaks IDE extension + reload scenarios and brief idle gaps between independent clients, such as + sequential CLI commands using a shared WM. +- **Never auto-shutdown (persistent daemon)**: WM runs until explicitly + stopped. Requires external process management and makes + it harder to reason about lifecycle in tests and CI. +- **Client heartbeat / keepalive**: client sends periodic pings; WM shuts down + if pings stop. More precise than a fixed timeout for detecting dead connected + clients, but it still does not answer how long the WM should remain alive + when no clients are connected at all. Shared-WM use cases with brief idle + gaps between clients, such as sequential CLI commands, would still require a + grace-period timeout or a different persistent-daemon policy. It also + requires all clients to implement the heartbeat protocol. +- **Parent PID tracking**: WM monitors its parent process and exits when the + parent dies. Does not work when the WM is started independently of its client + (e.g. shared WM). diff --git a/docs/adr/0005-zero-based-lines-and-resourceuri-fields-in-action-payloads-and-results.md b/docs/adr/0005-zero-based-lines-and-resourceuri-fields-in-action-payloads-and-results.md new file mode 100644 index 00000000..27756593 --- /dev/null +++ b/docs/adr/0005-zero-based-lines-and-resourceuri-fields-in-action-payloads-and-results.md @@ -0,0 +1,63 @@ +# ADR-0005: Zero-based line numbers and ResourceUri fields in action payloads and results + +- **Status:** accepted +- **Date:** 2026-03-20 +- **Deciders:** @Aksem +- **Tags:** actions, conventions + +## Context + +Action payload and result types that carry source-code locations must agree on +two conventions: how line numbers are represented and how resource locations +are represented. + +**Line numbers.** LSP uses 0-based `Position.line` throughout. Test runner CLIs +and linters typically emit 1-based line numbers. A mismatch between the two +means that whoever builds the result (the handler) and whoever consumes it (e.g. the +LSP server) must each know which convention is in use. + +**Resource locations.** Action payloads and results cross process boundaries +and should remain stable across languages and transports. Using runtime-specific +objects such as `pathlib.Path` in boundary DTOs leaks implementation details +into the contract. A semantic `ResourceUri` type, serialized as a URI string, +avoids this and leaves room for future non-local resources. + +This decision applies to all actions that expose source-code locations in their +payload or result schema. + +## Related ADRs Considered + +None — no existing ADR covers payload/result field conventions for +source-code locations. + +## Decision + +- **Line numbers in payload and result fields are 0-based**, consistent with + `Position.line` in the Language Server Protocol. Handlers that read 1-based + line numbers from CLI output must subtract 1 before populating a field. + Display code that shows lines to users must add 1. + +- **Resource locations in payload and result fields use `ResourceUri` + values**, not `pathlib.Path` objects. `ResourceUri` is serialized as a URI + string. Local files use `file://` URIs; future non-local resources may use + other URI schemes. This keeps the field type simple across transports, + languages, and runtimes. + +## Consequences + +- **Handlers bear the conversion cost.** Handlers that consume 1-based CLI + output subtract 1; they do not pass the raw value through. This is a one-line + transformation and the correct place to isolate runner-specific quirks. +- **Display code adds 1.** Any code that renders a line number to a user + (terminal output, hover text) must add 1 to recover the 1-based number a + developer expects to see. +- **No Path objects in payload or result fields.** Producers populate location + fields with `ResourceUri` values serialized as URI strings. Consumers that + need a local `Path` may derive one only when the URI scheme is `file`. +- **Future-proof resource model.** The contract is not limited to local + filesystem paths, so future handlers can report locations for non-file + resources without redefining the result schema. +- **Consistency across actions.** `LintMessage.range`, `TestCaseResult.line`, + and `TestItem.line` all follow the same source-location convention, and the + same rule applies to any future action payload or result type that carries + source-code locations rather than requiring per-action documentation. diff --git a/docs/adr/README.md b/docs/adr/README.md new file mode 100644 index 00000000..4f689ab9 --- /dev/null +++ b/docs/adr/README.md @@ -0,0 +1,35 @@ +# Architecture Decision Records + +This directory captures architecturally significant decisions for FineCode +using the [MADR](https://adr.github.io/madr/) (Markdown Any Decision Records) +format — a lightweight, structured template that scales from simple to complex +decisions. + +## What is an ADR? + +An Architecture Decision Record (ADR) is a short document that captures a +single decision along with its context and consequences. ADRs are numbered +sequentially, and once accepted they are **immutable** — if a decision changes, +a new ADR supersedes the old one rather than editing it. + +## How to create a new ADR + +1. **Search first.** Look through the index below (filter by tags) and search + the `docs/adr/` directory for related decisions. Fill in the "Related ADRs + Considered" section with what you found — even if the answer is "None". +2. Copy [template.md](template.md) to `NNNN-short-title.md` (next sequential number). + Use a title that states the decision, not just the topic — + "Auto-shutdown on disconnect timeout" rather than "WM server lifecycle". +3. Fill in the required sections (Context, Related ADRs Considered, Decision, Consequences). +4. Set status to `proposed` and open a PR for review. +5. Once merged, update status to `accepted` and add a row to the index table below. + +## Index + +| # | Title | Status | Date | Tags | +|------|--------------------------------------------------------------------------------------------------------------------------------|----------|------------|--------------------------------| +| 0001 | [Use ADRs for architecture decisions](0001-use-adr.md) | accepted | 2026-03-19 | meta | +| 0002 | [Port-file discovery for the WM server](0002-port-file-discovery-for-wm-server.md) | accepted | 2026-03-19 | ipc, wm-server | +| 0003 | [One Extension Runner process per execution environment](0003-process-isolation-per-extension-environment.md) | accepted | 2026-03-19 | architecture, extension-runner | +| 0004 | [Auto-shutdown on disconnect timeout](0004-auto-shutdown-on-disconnect-timeout.md) | accepted | 2026-03-19 | lifecycle, wm-server | +| 0005 | [Zero-based line numbers and ResourceUri fields in action payloads and results](0005-zero-based-lines-and-resourceuri-fields-in-action-payloads-and-results.md) | accepted | 2026-03-20 | actions, conventions | diff --git a/docs/adr/template.md b/docs/adr/template.md new file mode 100644 index 00000000..303245c5 --- /dev/null +++ b/docs/adr/template.md @@ -0,0 +1,58 @@ +# ADR-NNNN: Title + +- **Status:** proposed | accepted | deprecated | superseded by [ADR-XXXX](XXXX-title.md) +- **Date:** YYYY-MM-DD +- **Deciders:** @github-username +- **Tags:** tag1, tag2 + +## Context + +What is the issue or force that is motivating this decision? What constraints +exist? What is the current situation? + +## Related ADRs Considered + +List the ADRs you searched for and reviewed before writing this one to avoid +duplicates. Write "None" if no existing ADR overlaps with this topic. + +Example: "Reviewed [ADR-0003](0003-...) — different topic (process isolation, +not port discovery)." + +## Decision + +What is the change that we're proposing or have agreed to implement? + +## Consequences + +What becomes easier or harder as a result of this decision? What are the +trade-offs? + + diff --git a/docs/cli.md b/docs/cli.md index 19aec257..9fc8a323 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -9,6 +9,36 @@ python -m finecode [options] --- +## Usage modes + +The `run` command supports two usage modes. + +### Standalone (one-shot) — default + +Each `run` invocation is fully independent. FineCode starts a dedicated WM Server subprocess for the duration of the command, then shuts it down on exit. This is the default behavior. + +```bash +python -m finecode run lint +``` + +Use this in CI/CD pipelines or any context where you don't want persistent background processes. Results from one action can be saved to the file cache and referenced by a later action via `--map-payload-fields` (see the `run` reference below). + +### Persistent server + +A long-lived WM Server holds warm state — loaded configuration, started runners — across multiple `run` calls. Use `--shared-server` to connect to a running shared instance instead of starting a dedicated one. + +```bash +# Connect to the shared server (start it first if needed): +python -m finecode run --shared-server lint +python -m finecode run --shared-server format +``` + +This mode is used automatically by the LSP and MCP integrations. It gives faster repeated runs because configuration loading and runner startup are amortized across calls. + +The server waits 30 seconds after the last client disconnects before shutting down (configurable via `--disconnect-timeout` on `start-wm-server`). + +--- + ## `run` Run one or more actions across projects. @@ -22,11 +52,13 @@ python -m finecode run [options] [ ...] [payload] [--config.` | Use `` as the workspace root instead of `cwd` | -| `--project=` | Run only in this project. Repeatable for multiple projects. | +| `--project=` | Run only in this project (matched by `[project].name` from `pyproject.toml`). Repeatable for multiple projects. | | `--concurrently` | Run actions concurrently within each project | -| `--trace` | Enable verbose (trace-level) logging | +| `--shared-server` | Connect to the shared persistent WM Server instead of starting a dedicated one | +| `--log-level=` | Set log level: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR` (default: `INFO`) | | `--no-env-config` | Ignore `FINECODE_CONFIG_*` environment variables | | `--no-save-results` | Do not write action results to the cache directory | +| `--dev-env=` | Override the detected dev environment. One of: `ai`, `ci`, `cli`, `ide`, `precommit` (default: auto-detected — see [Dev environment detection](#dev-environment-detection)) | ### Payload @@ -83,16 +115,25 @@ python -m finecode run lint --config.ruff.line_length=120 Create and populate virtual environments for all handler dependencies. ``` -python -m finecode prepare-envs [--recreate] [--trace] [--debug] +python -m finecode prepare-envs [--recreate] [--env-names=]... + [--project=]... [--log-level=] [--debug] ``` Must be run from the workspace or project root. Creates venvs under `.venvs//` and installs each handler's declared dependencies. +See [Preparing Environments](guides/preparing-environments.md) for a full explanation of the three-step sequence and filtering options. + | Option | Description | |---|---| | `--recreate` | Delete and recreate all venvs from scratch | -| `--trace` | Enable verbose logging | +| `--env-names=` | Restrict handler dependency installation to the named env(s). Repeatable. See note below. | +| `--project=` | Restrict preparation to the named project(s) (matched by `[project].name` from `pyproject.toml`). Repeatable. | +| `--log-level=` | Set log level: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR` (default: `INFO`) | | `--debug` | Wait for a debugpy client on port 5680 before starting | +| `--dev-env=` | Override the detected dev environment. One of: `ai`, `ci`, `cli`, `ide`, `precommit` (default: auto-detected) | + + +!!! note `--env-names` restricts only the `install_envs` step. The `create_envs` step still runs for **all** envs regardless of this flag — virtualenvs must exist for every env even when you only need to update dependencies in one of them. --- @@ -101,25 +142,53 @@ Must be run from the workspace or project root. Creates venvs under `.venvs/ [--trace] [--debug] +python -m finecode dump-config --project= [--log-level=] [--debug] ``` Output is written to `/finecode_config_dump/`. | Option | Description | |---|---| -| `--project=` | **(Required)** Project to dump config for | -| `--trace` | Enable verbose logging | +| `--project=` | **(Required)** Project to dump config for (matched by `[project].name` from `pyproject.toml`) | +| `--log-level=` | Set log level: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR` (default: `INFO`) | | `--debug` | Wait for a debugpy client on port 5680 | +| `--dev-env=` | Override the detected dev environment. One of: `ai`, `ci`, `cli`, `ide`, `precommit` (default: auto-detected) | + +--- + +## Dev environment detection + +FineCode tracks which environment triggered an action run (e.g. IDE, CLI, CI/CD). This value is passed to handlers via `RunActionMeta.dev_env` and can be used to adjust behavior — for example, to emit machine-readable output in CI. + +The `run`, `prepare-envs`, and `dump-config` commands detect the environment automatically: + +| Condition | Detected value | +|---|---| +| `CI` environment variable is set (any non-empty value) | `ci` | +| Default | `cli` | + +The `CI` variable is set automatically by GitHub Actions, GitLab CI, CircleCI, Travis CI, Bitbucket Pipelines, and most other CI systems. + +Use `--dev-env=` on any command to override the detected value explicitly: + +```bash +# Force CI/CD mode locally +python -m finecode run --dev-env=ci lint + +# Mark as a pre-commit run +python -m finecode run --dev-env=precommit lint +``` + +Valid values: `ai`, `ci`, `cli`, `ide`, `precommit`. --- -## `start-api` +## `start-lsp` Start the FineCode LSP server. Used by the IDE extension — you typically don't call this directly. ``` -python -m finecode start-api --stdio | --socket | --ws [--host ] [--port ] +python -m finecode start-lsp --stdio | --socket | --ws [--host ] [--port ] ``` | Option | Description | @@ -129,7 +198,41 @@ python -m finecode start-api --stdio | --socket | --ws [--host ] [- | `--ws` | Start a WebSocket server | | `--host ` | Host for TCP/WS server (default: 127.0.0.1 for TCP) | | `--port ` | Port for TCP/WS server | -| `--mcp` | Also start an MCP server | -| `--mcp-port ` | Port for the MCP server | -| `--trace` | Enable verbose logging | +| `--log-level=` | Set log level: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR` (default: `INFO`) | | `--debug` | Wait for a debugpy client on port 5680 | + +The LSP server connects to the **FineCode WM Server** on startup (starting one if needed). See [LSP and MCP Architecture](reference/lsp-mcp-architecture.md) for details. + +--- + +## `start-mcp` + +Start the FineCode MCP server on stdio. Connects to a running FineCode WM Server (or starts one) and exposes FineCode tools via the Model Context Protocol. + +```text +.venvs/dev_workspace/bin/python -m finecode start-mcp [--workdir=] [--log-level=] +``` + +| Option | Description | +| --- | --- | +| `--workdir=` | Workspace root directory (default: current directory). | +| `--log-level=` | Set log level: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR` (default: `INFO`) | + +Typically started automatically by MCP-compatible clients (for example, Claude Code) — see [IDE and MCP Setup](getting-started-ide-mcp.md#mcp-setup-for-ai-clients). + +--- + +## `start-wm-server` + +Start the FineCode Workspace Manager Server standalone (TCP JSON-RPC), listen for client connections. Shuts down after the last client disconnects and the disconnect timeout expires. + +```text +python -m finecode start-wm-server [--log-level=] [--disconnect-timeout=] +``` + +| Option | Description | +| --- | --- | +| `--log-level=` | Set log level: `TRACE`, `DEBUG`, `INFO`, `WARNING`, `ERROR` (default: `INFO`) | +| `--disconnect-timeout=` | Seconds to wait after the last client disconnects before shutting down (default: 30) | + +Usually started automatically by `start-lsp` or `start-mcp`. Can also be started manually for debugging. diff --git a/docs/concepts.md b/docs/concepts.md index 812a03cf..9a4aba86 100644 --- a/docs/concepts.md +++ b/docs/concepts.md @@ -15,15 +15,9 @@ class LintAction(code_action.Action[LintRunPayload, LintRunContext, LintRunResul Actions are identified by their **import path** (e.g. `finecode_extension_api.actions.lint.LintAction`), not by the name used in config. The config name is just a human-readable alias. -Actions can be called from: +## Action Handler -- the CLI (`python -m finecode run lint`) -- the IDE via the LSP server (diagnostics, code actions, formatting) -- other handlers - -## ActionHandler - -An **ActionHandler** is a concrete implementation of an action. Multiple handlers can be registered for a single action. For example, the `lint` action might have handlers for ruff, flake8, and mypy — each independently checking the code. +An **Action Handler** is a concrete implementation of an action. Multiple handlers can be registered for a single action. For example, the `lint` action might have handlers for ruff, flake8, and mypy — each independently checking the code. Each handler: @@ -61,6 +55,31 @@ flowchart LR **Concurrent mode** (`run_handlers_concurrently: true`): all handlers run in parallel and results are merged afterward. Accessing `context.current_result` in concurrent mode raises `RuntimeError`. Useful for independent linters. +## Service + +A **Service** is a long-lived dependency that handlers (and other services) can request via dependency injection. The Extension Runner resolves services by type annotation and injects them into handler constructors. + +Service bindings are declared by interface and implementation: + +- `interface`: import path of the service protocol (e.g. `finecode_extension_api.interfaces.ihttpclient.IHttpClient`) +- `source`: import path of the implementation class +- `env`: virtualenv name to install the service dependencies into +- `dependencies`: packages to install for that service + +Services are singletons per Extension Runner. `init()` runs on first use, and `DisposableService` instances are disposed when the last handler using them shuts down. + +Service declarations merge by `interface`, so a project can rebind a preset's service by declaring the same `interface` in `pyproject.toml`. + +```toml +[[tool.finecode.service]] +interface = "finecode_extension_api.interfaces.ihttpclient.IHttpClient" +source = "finecode_httpclient.HttpClient" +env = "dev_no_runtime" +dependencies = ["finecode_httpclient~=0.1.0a1"] +``` + +See the [Services reference](reference/services.md) for the list of built-in services and which presets or extensions provide them. + ## Preset A **Preset** is a Python package that bundles action and handler declarations into a reusable, distributable configuration. Users install a preset as a `dev_workspace` dependency and reference it in `pyproject.toml`: @@ -76,15 +95,23 @@ A preset contains a `preset.toml` file that declares which handlers to activate When configuring an action in `pyproject.toml`, you can control how your configuration relates to preset handlers: -- **Default (additive):** your handlers are added to the preset's handlers. +- **`handlers_mode = "merge"`:** (default) your handlers are added to the preset's handlers. - **`handlers_mode = "replace"`:** your handler list completely replaces the preset's handlers for that action. -- **`disabled = true` on a handler entry:** disables that specific inherited handler. +- **`enabled = false` on a handler entry:** disables that specific inherited handler. + +## Source Artifact + +A **Source Artifact** is a unit of source code that build/publish-style actions operate on. It is identified by a **source artifact definition file** (for example `pyproject.toml` or `package.json`). This is what many tools call a “project”, but FineCode uses **source artifact** to be more concrete. + +When a source artifact includes FineCode configuration — a `pyproject.toml` with a `[tool.finecode]` section — the Workspace Manager discovers it automatically under the workspace roots provided by the client. Some CLI flags and protocol fields still use the word “project” for compatibility. + +### Source Artifact identification -## Project +The canonical external identifier for a source artifact is its **absolute directory path** (e.g. `/home/user/myrepo/my_package`). This is always unique, language-agnostic, and is what `list_projects` returns in the `path` field. All WM API consumers (LSP, MCP, JSON-RPC) use paths. -A **Project** is any directory containing a `pyproject.toml` with a `[tool.finecode]` section. FineCode discovers all projects under the workspace root automatically. +The human-readable **project name** is taken from the `[project].name` field in `pyproject.toml`. Names are unique within a workspace in practice (two packages with the same name would break dependency resolution), but paths are used in the API to eliminate any ambiguity. The CLI is the only interface that accepts names — it resolves them to paths before making API calls. -A project may belong to a **workspace** — a directory containing multiple projects. FineCode handles multi-project workspaces transparently: running `python -m finecode run lint` from the workspace root runs lint in all projects that define it. +A source artifact may belong to a **workspace** — a set of related source artifacts, often a single directory root but sometimes multiple directories. FineCode handles multi-artifact workspaces transparently: running `python -m finecode run lint` from the workspace root runs lint in all source artifacts that define it. ## Workspace Manager and Extension Runner @@ -103,7 +130,7 @@ The `finecode` package. It: The `finecode_extension_runner` package. It: -- runs inside an isolated virtual environment (e.g. `.venvs/dev_no_runtime`) +- runs inside a purpose-specific virtual environment (e.g. `.venvs/dev_no_runtime`) - imports and executes handler code - communicates results back to the WM via LSP/JSON-RPC diff --git a/docs/configuration.md b/docs/configuration.md index f26bc29e..54a0b75d 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -55,7 +55,7 @@ handlers = [ ```toml [tool.finecode.action.lint] handlers = [ - { name = "flake8", disabled = true }, + { name = "flake8", enabled = false }, ] ``` @@ -75,6 +75,28 @@ config.max_line_length = 88 config.extend_ignore = ["E203", "E501"] ``` +### Declaring services + +Services are shared, long-lived dependencies used by handlers. Declare service bindings with `[[tool.finecode.service]]` entries: + +```toml +[[tool.finecode.service]] +interface = "finecode_extension_api.interfaces.ilspclient.ILspClient" +source = "finecode_extension_runner.impls.lsp_client.LspClientImpl" +env = "dev_no_runtime" +dependencies = [] +``` + +Service declarations are merged by `interface`. If a preset declares a service, you can rebind it in your project by declaring the same `interface` with a different `source`: + +```toml +[[tool.finecode.service]] +interface = "finecode_extension_api.interfaces.ihttpclient.IHttpClient" +source = "my_company_http.MyHttpClient" +env = "dev_no_runtime" +dependencies = ["my_company_http~=1.2.0"] +``` + ### Environment-specific dependencies You can pin or override dependencies installed into each env: diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000..014f6cfa --- /dev/null +++ b/docs/development.md @@ -0,0 +1,194 @@ +# Development of FineCode + +## Documentation Structure + +The FineCode documentation is organized into the following sections: + +### User-Facing Documentation +- **Home** (`index.md`): Landing page highlighting main benefits and quick start +- **Getting Started** (`getting-started.md`): Installation and basic usage +- **IDE and MCP Setup** (`getting-started-ide-mcp.md`): VSCode and MCP-client integration setup +- **Concepts** (`concepts.md`): Core concepts and architecture overview +- **Configuration** (`configuration.md`): Detailed configuration options +- **CLI Reference** (`cli.md`): Command-line interface documentation + +### Extension Development +- **Guides**: + - Creating an Extension (`guides/creating-extension.md`) + - Creating a Preset (`guides/creating-preset.md`) + - Multi-Project Workspace (`guides/workspace.md`) + +### Reference +- **Built-in Actions** (`reference/actions.md`) +- **Extensions** (`reference/extensions.md`) +- **LSP and MCP Architecture** (`reference/lsp-mcp-architecture.md`): Protocol and server lifecycle internals +- **LSP Client Protocol** (`reference/lsp-protocol.md`): LSP client/server protocol details and custom commands + +### Developer Documentation +- **Overview** (`development.md`): Contributing to FineCode core development +- **WM Protocol** (`wm-protocol.md`): Technical protocol and endpoint reference +- **WM-ER Protocol** (`wm-er-protocol.md`): WM and Extension Runner protocol details +- **Developing FineCode** (`guides/developing-finecode.md`): Monorepo workflows and conventions + +### Potential Additions +- **F.A.Q.**: Common questions and troubleshooting +- **Changelog/Release Notes**: Version history and migration guides + +## Development Environment Setup + +FineCode is a monorepo containing multiple Python packages. To set up the development environment: + +### Prerequisites +- Python 3.11 - 3.14 +- Git + +### Clone and Setup +```bash +git clone https://github.com/finecode-dev/finecode.git +cd finecode + +# Create development virtual environment +python -m venv .venvs/dev_workspace +source .venvs/dev_workspace/bin/activate + +# Install development dependencies +pip install --group=dev_workspace +``` + +### Prepare Development Environments +```bash +# Prepare virtual environments for all packages +python -m finecode prepare-envs +``` + +## Project Architecture + +FineCode follows a modular architecture with clear separation of concerns: + +### Core Components + +#### Workspace Manager (`finecode/`) +The main package that: +- Discovers projects in the workspace +- Resolves configuration from multiple sources +- Manages virtual environments per tool +- Provides CLI interface +- Exposes LSP API for IDE integration +- Delegates tool execution to Extension Runners + +#### Extension Runner (`finecode_extension_runner/`) +Executes tool handlers in purpose-specific virtual environments: +- Runs inside a purpose-specific venv (e.g. `dev_no_runtime`) +- Imports and executes handler code +- Communicates with Workspace Manager via JSON-RPC/LSP + +#### Extension API (`finecode_extension_api/`) +Public API for extension authors: +- Defines action interfaces and base classes +- Provides built-in action definitions (lint, format, build, etc.) +- Protocol definitions for handlers and services + +### Architecture Constraints +The Workspace Manager follows strict layered architecture enforced by import-linter: + +``` +finecode.lsp_server.lsp_server ← top layer (IDE-facing) + ↓ +finecode.lsp_server.services ← service layer + ↓ +finecode.domain ← domain models (no upward imports) +``` + +LSP protocol types may only be used in `finecode.runner.runner_client` and `finecode.lsp_server.lsp_server`. + +## Building and Testing + +### Running Tests +```bash +# Run all tests +pytest tests/ + +# Run tests for specific package +pytest finecode_extension_api/tests/ +``` + +### Building Packages +```bash +# Build all packages +python -m build + +# Or use the finecode build action +python -m finecode run build_artifact +``` + +### Development Workflow +```bash +# Run linting +python -m finecode run lint + +# Check formatting +python -m finecode run check_formatting + +# Format code +python -m finecode run format +``` + +### Running in Development Mode +```bash +# Start LSP server for IDE integration testing +python -m finecode start-lsp --stdio +``` + +## Contributing Guidelines + +### Code Style +- Follow PEP 8 style guidelines +- Use type hints for all function parameters and return values +- Write docstrings in Google format +- Keep line length under 88 characters (Black default) + +### Pull Request Process +1. Fork the repository +2. Create a feature branch from `main` +3. Make your changes +4. Run tests and linting: `python -m finecode run lint check_formatting` +5. Submit a pull request with a clear description + +### Commit Messages +Use conventional commit format: +- `feat:` for new features +- `fix:` for bug fixes +- `docs:` for documentation changes +- `refactor:` for code refactoring +- `test:` for test additions/modifications + +### Testing Requirements +- All new code must include unit tests +- Maintain or improve code coverage +- Test both success and error paths +- Use descriptive test names + +## Release Process + +### Version Management +FineCode uses setuptools-scm for automatic versioning from git tags. + +### Release Steps +1. Update version in git tag: `git tag v0.4.0` +2. Push tag: `git push origin v0.4.0` +3. CI/CD will automatically build and publish packages + +### Package Dependencies +The monorepo contains interdependent packages that must be released in order: +1. `finecode_jsonrpc` +2. `finecode_httpclient` +3. `finecode_extension_api` +4. `finecode_extension_runner` +5. `finecode_builtin_handlers` +6. `finecode` (main package) + +### Pre-release Versions +Use alpha/beta/rc suffixes for pre-releases: +- `v0.4.0a1` (alpha 1) +- `v0.4.0b2` (beta 2) +- `v0.4.0rc1` (release candidate 1) diff --git a/docs/getting-started-ide-mcp.md b/docs/getting-started-ide-mcp.md new file mode 100644 index 00000000..b5b6c252 --- /dev/null +++ b/docs/getting-started-ide-mcp.md @@ -0,0 +1,67 @@ +# IDE and MCP Setup + +After completing the base setup in [Getting Started](getting-started.md), connect FineCode to your IDE and AI tooling. + +## VSCode setup + +Install the [FineCode VSCode extension](https://marketplace.visualstudio.com/items?itemName=VladyslavHnatiuk.finecode-vscode). + +The extension: + +- Starts the FineCode LSP server when you open a workspace +- Shows diagnostics inline +- Provides code actions and quick fixes +- Supports formatting on save +- Exposes FineCode actions in the sidebar +- Integrates with the native VS Code Testing panel (discover and run tests) + +### Requirements + +- FineCode installed in `.venvs/dev_workspace` (see [Setup](getting-started.md)) +- `python -m finecode prepare-envs` run at least once + +### Configuration + +The extension auto-discovers `.venvs/dev_workspace/`. No extra extension-side project configuration is required. + +### Testing integration + +The Testing panel (beaker icon) is populated automatically when the workspace loads. It is driven by two actions from `finecode_extension_api`: + +- `ListTestsAction` — discovers tests and builds the file → class → function tree +- `RunTestsAction` — executes tests and reports pass/fail/skip/error per test case + +To use the Testing panel, you need handlers registered for both actions. The `fine_python_test` preset provides pytest-based handlers for both. If you are already using `fine_python_recommended`, testing support is included — no extra preset needed, since `fine_python_recommended` already pulls in `fine_python_test`. + +## MCP setup for AI clients + +FineCode exposes an MCP server so any MCP-compatible client can invoke FineCode actions directly. + +At a minimum, your client should launch: + +```bash +.venvs/dev_workspace/bin/python -m finecode start-mcp +``` + +Client configuration format depends on the MCP client. + +### Example: Claude Code + +Create `.mcp.json` in the workspace root: + +```json +{ + "mcpServers": { + "finecode": { + "type": "stdio", + "command": ".venvs/dev_workspace/bin/python", + "args": ["-m", "finecode", "start-mcp", "--workdir=."] + } + } +} +``` + +Claude Code discovers this file and prompts for approval on first use. + +Manual server startup is mainly for debugging and custom integration development. +See [LSP and MCP Architecture](reference/lsp-mcp-architecture.md#manual-server-startup-for-debugging). diff --git a/docs/getting-started.md b/docs/getting-started.md index f0a7a5ef..a6d74698 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -70,7 +70,7 @@ FineCode runs each tool handler in its own virtual environment. Set them up with python -m finecode prepare-envs ``` -This installs the handler dependencies (ruff, flake8, etc.) into isolated venvs under `.venvs/`. +This creates purpose-specific venvs under `.venvs/` and installs handler dependencies (e.g. ruff, flake8, etc.) into them. ## 5. Run actions @@ -90,7 +90,7 @@ python -m finecode run --concurrently lint check_formatting ## Next steps -- [IDE Integration](ide-integration.md) — set up the VSCode extension for real-time feedback +- [IDE and MCP Setup](getting-started-ide-mcp.md) — connect FineCode to VSCode and MCP-compatible AI clients - [Configuration](configuration.md) — customize tool settings and override handler config - [Concepts](concepts.md) — understand how Actions, Handlers, and Presets fit together - [Creating an Extension](guides/creating-extension.md) — write your own tool integration diff --git a/docs/glossary.md b/docs/glossary.md new file mode 100644 index 00000000..a73f3f76 --- /dev/null +++ b/docs/glossary.md @@ -0,0 +1,45 @@ +# Glossary + +## Action + +A named operation (for example `lint`, `format`, `build_artifact`). + +## Action Handler + +A concrete implementation of an action. Multiple handlers can be registered for a single action, and they run sequentially or concurrently. + +## Execution Environment + +A named, isolated context in which handlers and project code execute (e.g. `runtime`, `dev_workspace`, `dev_no_runtime`). Each execution environment has its own dependency set, serving a specific purpose — for example, the project's runtime, dev tooling, or test execution. The concept is inter-language; in Python each execution environment is materialized as a virtual environment. Configuration uses the shorthand `env`. + +## Extension Runner (ER) + +A process that runs inside a specific execution environment and executes action handler code. The Workspace Manager spawns one ER per (project, execution environment) pair, on demand. ERs communicate with the WM over JSON-RPC. The concept is inter-language — `finecode_extension_runner` is the Python implementation. + +## Preset + +A reusable, distributable bundle of action and handler declarations. Users reference a preset in their project configuration; its declarations merge with the project's own configuration, giving full control to override or disable individual handlers. The concept is inter-language — in Python, presets are distributed as packages installed into the `dev_workspace` execution environment. + +## Service + +A long-lived dependency injected into handlers by interface. + +## Source Artifact + +A unit of source code that build/publish-style actions operate on. It is identified by a **source artifact definition file** (for example `pyproject.toml` or `package.json`). This is what many tools call a “project”, but FineCode uses **source artifact** to be more concrete. + +## Source Artifact Definition + +The definition file for a source artifact (for example content of `pyproject.toml`). + +## Virtual Environment + +The Python-specific materialization of an execution environment. FineCode creates one virtual environment per environment name per project at `.venvs/{env_name}/` and installs the declared handler dependencies into it. Created by `prepare-envs`. + +## Workspace + +A set of related source artifacts a developer is working on. Often this is a single directory root, but it can also be multiple directories (workspace roots). FineCode can run actions across all source artifacts that include FineCode configuration. (Some CLI flags and protocol fields still use the word “project” for compatibility.) + +## Workspace Manager (WM) + +A long-running server that discovers source artifacts, resolves merged configuration, manages execution environments, exposes an LSP and MCP API to clients, and delegates action execution to Extension Runners. Typically one shared WM instance runs per virtual environment. diff --git a/docs/guides/developing-finecode.md b/docs/guides/developing-finecode.md new file mode 100644 index 00000000..e21fdbc5 --- /dev/null +++ b/docs/guides/developing-finecode.md @@ -0,0 +1,128 @@ +# Developing FineCode + +This guide is for developers contributing to FineCode itself — the monorepo structure, conventions, and workflows used internally. + +## Repository structure + +The repo is a monorepo. Each package has its own `pyproject.toml`. The root directory is the workspace. + +```text +finecode/ # Main package (Workspace Manager) +finecode_extension_api/ # Public API for extension authors +finecode_extension_runner/ # Extension execution engine +finecode_jsonrpc/ # JSON-RPC client/transport layer +finecode_httpclient/ # HTTP client for extensions +finecode_builtin_handlers/ # Built-in action handlers +extensions/ # Extension packages (ruff, flake8, mypy, ...) +presets/ # Preset packages (recommended, lint, format) +finecode_dev_common_preset/ # Preset used for developing FineCode itself +tests/ # Test suite +``` + +## Setting up the development environment + +```bash +# From the repo root, inside the dev_workspace venv: +python -m finecode prepare-envs +``` + +## Running checks + +```bash +python -m finecode run lint +python -m finecode run check_formatting +pytest tests/ +``` + +## Dependency lock files + +FineCode uses [pylock.toml](https://packaging.python.org/en/latest/specifications/pylock-toml/) lock files for reproducible dependency installation. + +### Why lock files + +Without lock files, `prepare-envs` resolves dependency versions from the ranges declared in `pyproject.toml` at install time. This means two developers (or CI runs) can end up with different versions depending on when they ran the command. Lock files pin exact versions for reproducible environments. + +### Lock files are environment-specific + +Each FineCode environment (`dev_workspace`, `dev_no_runtime`, `runtime`, etc.) has its own set of dependencies, so each needs its own lock file: + +```text +pylock..toml +``` + +For example: + +```text +myproject/ + pyproject.toml + pylock.dev_workspace.toml + pylock.dev_no_runtime.toml + pylock.runtime.toml +``` + +### Lock files are platform- and Python version-specific + +A lock file records the exact dependency resolution for one platform and one Python version. The same `pyproject.toml` can resolve differently on Linux vs macOS, or Python 3.12 vs 3.13. + +If the project targets a single platform and Python version, one lock file per env is enough. For multiple targets, encode platform and version into the file name (the `` segment in `pylock..toml` must not contain dots): + +```text +myproject/ + pyproject.toml + locks/ + pylock.dev_workspace-linux-py312.toml + pylock.dev_workspace-linux-py313.toml + pylock.dev_workspace-macos-py312.toml + pylock.dev_no_runtime-linux-py312.toml + ... +``` + +### Generating lock files + +Use the `lock_dependencies` action: + +```bash +python -m finecode run lock_dependencies \ + --src_artifact_def_path=pyproject.toml \ + --output_path=pylock.dev_workspace.toml +``` + +For the Python ecosystem, the `PipLockDependenciesHandler` runs `pip lock` under the hood. + +### Installing from lock files + +The `PrepareEnvsInstallDepsFromLockHandler` is an alternative to `PrepareEnvsInstallDepsHandler`. Instead of reading dependency versions from `pyproject.toml`, it parses the lock file and passes the pinned versions to `install_deps_in_env`. + +By default it looks for `pylock..toml` next to the project's `pyproject.toml`. If a lock file is not found for an env, it is skipped with a warning. + +### Lock files in CI + +Lock files should be committed to the repository. CI should install from them, not regenerate them: + +```bash +# CI installs from existing lock files — reproducible +python -m finecode prepare-envs +``` + +To update lock files, run `lock_dependencies` locally or in a scheduled CI job and commit the result. For multi-platform projects, use a CI matrix to generate lock files on each target platform. + + +## Code Style + +### Typing + +- type the code +-- use complete types, no holes in generics like `list` instead of `list[int]` + +### Imports + +- keep imports at the top of the module +- keep imports at the root level of module +-- there are 2 exceptions: + - you need to avoid circle dependency (usually it means there is a problem in code structure) + - you want to avoid loading the module on startup (e.g. don't import all CLI command handlers if only one is needed for current CLI call) + +### Exports + +- explicitly export public module members using `__all__` +-- it may not contain dynamic elements, only literal strings diff --git a/docs/guides/preparing-environments.md b/docs/guides/preparing-environments.md new file mode 100644 index 00000000..d9c2c417 --- /dev/null +++ b/docs/guides/preparing-environments.md @@ -0,0 +1,122 @@ +# Preparing Environments + +FineCode runs handlers in purpose-specific virtual environments. Handlers that share the same `env` name (e.g. `dev_no_runtime`) run in the same virtualenv. Before handlers can execute, their environments must exist and contain the right dependencies. This guide explains how that process works and how to control it. + +## The two-step sequence + +Environment preparation is split into two distinct actions that must run in order: + +``` +create_envs → install_envs +``` + +### Step 1 — `create_envs` + +Creates the virtual environments (`.venvs//`) discovered from the project's `dependency-groups`. No packages are installed yet. + +Each env name found in `[dependency-groups]` becomes a virtualenv: + +```toml +[dependency-groups] +dev_workspace = ["finecode==0.3.*", ...] +dev_no_runtime = ["fine_python_ruff~=0.2.0", ...] +runtime = ["fastapi>=0.100", ...] +``` + +→ Creates `.venvs/dev_workspace/`, `.venvs/dev_no_runtime/`, `.venvs/runtime/`. + +### Step 2 — `install_envs` + +Installs the full dependency set into each virtualenv. This reads the `dependency-groups` entries and calls `install_deps_in_env` for each env, including `finecode_extension_runner` and all handler tool dependencies (e.g. ruff, mypy). + +After this step every handler has all its dependencies available and can execute. + +--- + +## The `dev_workspace` bootstrap + +The `dev_workspace` env is special: it contains FineCode itself and the preset packages. The handlers that implement `create_envs` and `install_envs` live inside `dev_workspace` — which creates a bootstrapping constraint. + +### Workspace root bootstrap (manual, one-time) + +The workspace root's `dev_workspace` is the **seed** for everything. `prepare-envs` cannot run unless FineCode is already installed somewhere, so the workspace root's `dev_workspace` must be created manually on a fresh checkout: + +```bash +python -m venv .venvs/dev_workspace +source .venvs/dev_workspace/bin/activate # Windows: .venvs\dev_workspace\Scripts\activate +python -m pip install --group="dev_workspace" +``` + +This is the only step that cannot be automated by FineCode itself. See [Getting Started](../getting-started.md) for the full first-time setup sequence. + +### Subproject bootstrap (automated by `prepare-envs`) + +For subprojects in the workspace, `prepare-envs` creates their `dev_workspace` envs automatically — **before** starting any runners — using the workspace root's handler configuration: + +1. `create_envs` (subproject `dev_workspace` envs) — create the venvs +2. `install_envs` (subproject `dev_workspace` envs) — install FineCode + presets + +**Requirement:** the workspace root's `create_envs` and `install_envs` configuration must produce a valid `dev_workspace` for every subproject. In practice this is rarely a constraint: `dev_workspace` envs exist only to run FineCode and preset packages, so their setup is uniform across projects. If a subproject genuinely requires different handler configuration for either action, its `dev_workspace` must be bootstrapped manually the same way as the workspace root's. + +Only after all `dev_workspace` envs exist are runners started, and only then can the remaining steps run across all envs. + +--- + +## CLI command + +The `prepare-envs` command runs the full sequence automatically: + +```bash +python -m finecode prepare-envs +``` + +This is the only command most users need. It: + +1. Discovers all projects in the workspace +2. Bootstraps `dev_workspace` for each subproject (`create_envs` + `install_envs`, using workspace root config) +3. Starts Extension Runners +4. Runs `create_envs` across all projects +5. Runs `install_envs` across all projects + +See [CLI reference — prepare-envs](../cli.md#prepare-envs) for available options. + +### Re-creating environments + +```bash +python -m finecode prepare-envs --recreate +``` + +Deletes all existing virtualenvs and rebuilds them from scratch. Use this when a venv becomes corrupted or when you want a clean slate after dependency changes. + +### Filtering by project + +```bash +python -m finecode prepare-envs --project=package_a --project=package_b +``` + +Only prepares environments for the listed projects. Useful in a large workspaces with multiple projects when you've only changed dependencies for a subset of packages. + +### Filtering by environment name + +```bash +python -m finecode prepare-envs --env-names=dev_no_runtime +``` + +Restricts the `install_envs` step (step 2) to the named environments. The `create_envs` step still runs for **all** envs regardless of this flag. + +**Why?** Virtualenvs must exist for every env — they are cheap to create and skip if already valid. Filtering at that step would leave envs in a broken state if they don't exist yet. + +Useful when you've added a new handler in one env and want to update only that env without reinstalling everything. + +--- + +## Calling actions directly + +The two actions (`create_envs`, `install_envs`) are standard FineCode actions and can be invoked individually via the WM API or `python -m finecode run`. This is useful when writing custom orchestration. + +| Action | Source | +|---|---| +| `create_envs` | `finecode_extension_api.actions.create_envs.CreateEnvsAction` | +| `install_envs` | `finecode_extension_api.actions.install_envs.InstallEnvsAction` | + +See [Built-in Actions reference](../reference/actions.md) for payload fields and result types. diff --git a/docs/guides/workspace.md b/docs/guides/workspace.md index 1ffdf637..7ef5faee 100644 --- a/docs/guides/workspace.md +++ b/docs/guides/workspace.md @@ -1,19 +1,22 @@ # Multi-Project Workspace -FineCode natively supports workspaces containing multiple projects. This is common in monorepos where each package is a separate Python project. +FineCode natively supports workspaces containing multiple source artifacts. This is common in monorepos where each package is a separate source artifact. ## Structure -A workspace is a directory containing one or more projects. Each project has its own `pyproject.toml` with `[tool.finecode]`: +A workspace is a set of related source artifacts. Often this is a single directory root, but IDEs and clients can provide multiple workspace roots. Each source artifact has its own `pyproject.toml` with `[tool.finecode]`: + +!!! note + The CLI uses a single workspace root (`cwd` or `--workdir`). IDEs/LSP clients can provide multiple roots, and the Workspace Manager treats them as one workspace. ``` my_workspace/ pyproject.toml ← workspace-level (optional) package_a/ - pyproject.toml ← project A + pyproject.toml ← source artifact A src/package_a/ package_b/ - pyproject.toml ← project B + pyproject.toml ← source artifact B src/package_b/ common_preset/ ← shared preset package pyproject.toml @@ -23,13 +26,13 @@ my_workspace/ ## Running actions across all projects -Run from the workspace root to target all projects: +Run from the workspace root to target all source artifacts: ```bash python -m finecode run lint ``` -FineCode discovers all `pyproject.toml` files under the workspace root, finds those with `[tool.finecode]`, and runs the action in each. +FineCode discovers all `pyproject.toml` files under the workspace root, finds those with `[tool.finecode]`, and runs the action in each source artifact. To run concurrently across projects: @@ -37,25 +40,25 @@ To run concurrently across projects: python -m finecode run --concurrently lint check_formatting ``` -## Filtering to specific projects +## Filtering to specific source artifacts ```bash -# Single project +# Single source artifact python -m finecode run --project=package_a lint -# Multiple projects +# Multiple source artifacts python -m finecode run --project=package_a --project=package_b lint ``` -When `--project` is specified, the action must exist in all listed projects. +When `--project` is specified, the action must exist in all listed source artifacts. -## Sharing configuration across projects +## Sharing configuration across source artifacts -The recommended approach for sharing config is a **local preset package** in the workspace. Each subproject installs it as a dependency and references it in `pyproject.toml`. +The recommended approach for sharing config is a **local preset package** in the workspace. Each source artifact installs it as a dependency and references it in `pyproject.toml`. **Why a package, not hierarchical config?** -- Subprojects don't depend on workspace directory structure — they can be moved or extracted without changing tool config +- Source artifacts don't depend on workspace directory structure — they can be moved or extracted without changing tool config - Configuration is fully explicit: the complete config is visible inside each subproject - No implicit workspace-root lookup needed @@ -90,7 +93,7 @@ presets = [{ source = "my_lint_config" }] ## Saving and reading action results -Results of actions are saved to `/cache/finecode/results/.json`, keyed by project path. This makes it easy to collect results from all projects in CI: +Results of actions are saved to `/cache/finecode/results/.json`, keyed by source artifact path. This makes it easy to collect results from all source artifacts in CI: ```bash python -m finecode run --concurrently lint check_formatting @@ -106,7 +109,7 @@ python -m finecode run --no-save-results lint ## CI usage ```bash -# Run lint and formatting check in all projects, fail if any fails +# Run lint and formatting check in all source artifacts, fail if any fails python -m finecode run --concurrently lint check_formatting # Save results for later processing diff --git a/docs/ide-integration.md b/docs/ide-integration.md deleted file mode 100644 index 2947e194..00000000 --- a/docs/ide-integration.md +++ /dev/null @@ -1,70 +0,0 @@ -# IDE Integration - -FineCode exposes a standard **Language Server Protocol (LSP)** server that IDE extensions connect to. This gives you real-time diagnostics, code actions, formatting, and more — powered by the same tool configurations you use in the CLI. - -## VSCode - -Install the [FineCode VSCode extension](https://github.com/finecode-dev/finecode-vscode). - -The extension: - -- Automatically starts the FineCode LSP server when you open a workspace -- Shows linting diagnostics inline as you type -- Provides quick-fix code actions -- Formats files on save (when configured) -- Exposes the FineCode action tree in the sidebar - -### Requirements - -- FineCode installed in your `dev_workspace` venv (see [Getting Started](getting-started.md)) -- `prepare-envs` run at least once so handler venvs are set up - -### Configuration - -The extension discovers the `dev_workspace` venv automatically from `.venvs/dev_workspace/`. No per-project extension configuration is required — everything comes from `pyproject.toml`. - -## How the LSP server works - -```mermaid -sequenceDiagram - participant IDE as IDE Extension - participant WM as Workspace Manager (LSP) - participant ER as Extension Runner - - IDE->>WM: textDocument/didOpen - WM->>ER: run lint_files action - ER-->>WM: LintFilesRunResult (diagnostics) - WM-->>IDE: textDocument/publishDiagnostics - - IDE->>WM: textDocument/formatting - WM->>ER: run format_files action - ER-->>WM: FormatFilesRunResult (edits) - WM-->>IDE: TextEdit[] -``` - -The WM translates LSP requests into FineCode actions and delegates execution to the appropriate Extension Runner. Results are translated back into LSP responses. - -## Starting the server manually - -If you need to connect a custom client or debug the server: - -```bash -# stdio (most common for LSP clients) -python -m finecode start-api --stdio - -# TCP (useful for debugging) -python -m finecode start-api --socket 2087 - -# WebSocket -python -m finecode start-api --ws --port 2087 -``` - -## MCP server - -FineCode also supports the **Model Context Protocol (MCP)**, which allows AI agents to invoke FineCode actions directly. - -```bash -python -m finecode start-api --stdio --mcp --mcp-port 3000 -``` - -This starts both the LSP server (for IDE) and an MCP server simultaneously. diff --git a/docs/index.md b/docs/index.md index 65bcefef..305e79f8 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,15 +1,16 @@ # FineCode -**Stop configuring tools. Start using them.** +**FineCode gives you one workflow for code quality and developer tooling across CLI, IDE, CI, and AI assistants.** -Every Python project needs linting, formatting, type checking. And in every project you end up doing the same thing: installing the same tools, writing the same configuration, wiring them up to your IDE — again. +FineCode organizes your tooling so tasks like linting, formatting, type checking, build, and publish follow one consistent workflow and can be reused across projects. -FineCode solves this once. +Start in one repository in minutes. Then package the same setup and reuse it across your other projects. -## One line to get linting and formatting +## Start in minutes + +Add FineCode and a preset to your `pyproject.toml`: ```toml -# pyproject.toml [dependency-groups] dev_workspace = ["finecode==0.3.*", "fine_python_recommended==0.3.*"] @@ -17,121 +18,156 @@ dev_workspace = ["finecode==0.3.*", "fine_python_recommended==0.3.*"] presets = [{ source = "fine_python_recommended" }] ``` +Run FineCode: + ```bash +# initial one-time setup +python -m venv .venvs/dev_workspace +source .venvs/dev_workspace/bin/activate # Windows: .venvs\dev_workspace\Scripts\activate +python -m pip install --group="dev_workspace" python -m finecode prepare-envs -python -m finecode run lint check_formatting -``` -That's it. Ruff, Flake8, and isort — installed, configured, and running. No per-tool setup, no config files to write. +# now you are ready to use finecode +# e.g. lint the whole workspace: +python -m finecode run lint +``` -## Your IDE just works +NOTE: `pip install --group` requires pip 25.1+. -Install the [VSCode extension](ide-integration.md) and get inline diagnostics, quick fixes, and format-on-save — powered by the same configuration as your CLI. No separate language server setup, no per-project extension configuration. +This gives you a working Python baseline with Ruff, Flake8, and isort through one shared config entry point. -## Share configuration across projects +## From one project to reusable standard -Package your tool configuration and share it across your team's projects as a regular Python package: +Once this works in one project, turn it into a shared preset and use it across repositories: ```toml -# Any project that wants your standard setup: +[dependency-groups] +dev_workspace = ["finecode==0.3.*", "my_team_standards==0.1.*"] + [tool.finecode] presets = [{ source = "my_team_standards" }] ``` -Update the preset package — all projects pick it up on next install. No drift, no copy-paste. +```bash +python -m pip install --group="dev_workspace" +python -m finecode prepare-envs +``` -## Tools stay out of your project +Projects can then adopt updates through normal dependency updates. +See [Creating a Preset](guides/creating-preset.md) for the packaging flow. -Dev tools, runtime dependencies, and your project stay in separate virtual environments. Ruff's dependencies don't mix with your project's dependencies. Mypy doesn't break because something else updated a package. Everything is contained. +You can also combine presets in one project (for example, a language preset and a team preset): + +```toml +[tool.finecode] +presets = [ + { source = "fine_python_recommended" }, + { source = "my_team_standards" }, +] +``` -## Your rules, not ours +Typical rollout: -Presets give you a working setup instantly, but nothing is locked in. Every default can be overridden: +1. Start with a preset in one repository. +2. Tune handlers and action config to fit your workflow. +3. Publish that setup as a preset package for your team. +4. Reuse it across repositories. -```toml -# Adjust a single handler's config -[[tool.finecode.action_handler]] -source = "fine_python_ruff.RuffLintFilesHandler" -config.line_length = 120 +## Why developers use FineCode -# Swap out individual tools while keeping the rest of the preset -[tool.finecode.action.lint] -handlers = [ - { name = "flake8", disabled = true }, - { name = "my_linter", source = "my_team.MyLinterHandler", ... }, -] +### Core benefits for every project -# Or replace everything and build from scratch -[tool.finecode.action.lint] -handlers_mode = "replace" -handlers = [...] -``` +- Keep tooling config in one place instead of per-tool config sprawl +- Use the same actions in terminal, IDE, and AI-assisted workflows +- Spend less time wiring tools together and more time shipping code +- Keep local runs and CI behavior aligned around the same actions -You can adopt FineCode incrementally — start with a preset, customise as needed, replace entirely if you want. There's no framework lock-in. +#### One command surface for local, CI, IDE, and AI -## Virtual environment management included +Use the same actions and config everywhere: -FineCode manages virtual environments for you, with a clear separation by purpose: +- IDE: [VSCode extension setup](getting-started-ide-mcp.md#vscode-setup) +- AI assistants: [MCP setup for AI clients](getting-started-ide-mcp.md#mcp-setup-for-ai-clients) +- Local CLI: `python -m finecode run lint check_formatting` +- CI: `python -m finecode run lint check_formatting` +- Git hooks: run FineCode actions before commit without requiring `pre-commit` + +#### Isolated environments by purpose + +FineCode keeps developer tooling separate from runtime dependencies: ```text .venvs/ - dev_workspace/ ← FineCode itself, presets, dev tools - dev_no_runtime/ ← linters, formatters, type checkers - runtime/ ← your project's runtime dependencies - docs/ ← documentation tools + dev_workspace/ <- FineCode and presets + dev_no_runtime/ <- lint/format/type-check handlers + dev/ <- tooling that imports project code during execution + runtime/ <- project runtime dependencies ``` -Each tool runs in the right environment. Runtime dependencies never get polluted by dev tools, and dev tools never break because a runtime package updated. +This reduces dependency cross-talk and makes tool execution more predictable. -In a monorepo with many packages, this becomes especially valuable — FineCode handles environment setup across all of them automatically. No manual venv juggling, no shared environment where everything mixes together. +These environment roles are examples, not fixed requirements. You can shape the layout to match your workflow. -```bash -# One command sets up all environments across all packages -python -m finecode prepare-envs -``` +#### Workspace-aware by design -## Simpler CI, any platform +FineCode understands your workspace as a whole, including how individual subprojects fit together. -Without FineCode, CI pipelines tend to grow — separate steps for each tool, platform-specific workarounds, duplicated logic between local and CI runs. +Actions can target a single project or the entire workspace, so tasks like linting every subproject run from one command. -With FineCode, your CI is just: +#### Polyglot workflow, one action surface -```yaml -- run: python -m finecode run lint check_formatting -``` +FineCode actions are not tied to a single language. A single action can include handlers for different file types (for example Python code, Markdown docs, and JSON/TOML config) while keeping one shared command surface. -That's the same command you run locally. FineCode handles tool invocation, configuration, and environment differences. Switch CI provider, add a new linter, change a tool version — the pipeline stays the same. +In practice, this means you can keep using the same `lint` and `format` actions across mixed repositories today. Broader first-class preset coverage for combinations such as Python + Rust is on the roadmap. -## Not just linting and formatting +### Additional benefits for teams -FineCode ships with built-in actions for the most common workflows — lint, format, type-check, build, publish — but actions are just Python classes. You can define your own for anything that fits your development process: running migrations, generating code, validating architecture, checking licenses, or anything specific to your project. +- Keep standards centralized in a shared preset package +- Roll out toolchain changes through normal dependency updates +- Keep rollout predictable by updating a shared preset package instead of editing each repository separately -Your custom actions get the same CLI interface, IDE integration, and environment isolation as the built-in ones — for free. +## Flexible, no lock-in -## Extend it with your own tools +Presets are a starting point, not a ceiling. + +Disable or tune individual handlers: + +```toml +[[tool.finecode.action_handler]] +source = "fine_python_ruff.RuffLintFilesHandler" +config.line_length = 120 -FineCode has a clean handler interface. If you have an internal tool, a custom linter, or anything that fits into a lint/format/build workflow — you can plug it in and get CLI and IDE integration for free. +[tool.finecode.action.lint] +handlers = [ + { name = "flake8", enabled = false }, + { name = "my_linter", source = "my_team.MyLinterHandler" }, +] +``` -```python -class MyLinterHandler(ActionHandler[...]): - action = LintFilesAction +Replace an action handler set completely: - async def run(self, payload, context) -> LintFilesRunResult: - ... # your tool logic here +```toml +[tool.finecode.action.lint] +handlers_mode = "replace" +handlers = [ + { name = "my_linter", source = "my_team.MyLinterHandler" }, +] ``` -## Your AI assistant knows your tools +You can also add custom actions and handlers for project-specific workflows. -FineCode exposes an [MCP server](ide-integration.md#mcp-server) that AI assistants connect to. Instead of guessing which linter you use, how to run it, or what flags to pass — the assistant gets the exact tool configuration from your project directly. +## Proven in this repository -No explanations needed. No wrong commands. The assistant just knows. +FineCode is used to run quality actions in the FineCode repository itself. -## Community +- TODO: Add repository-scale metrics (actions/day, CI duration impact, setup time before/after) + +## Ready to try FineCode? -Have questions, ideas, or want to share what you've built? Join the FineCode community on [Discord](https://discord.gg/nwb3CRVN). +[Get started in 5 minutes ->](getting-started.md) -## Ready to try it? +See also: [Concepts](concepts.md), [Configuration](configuration.md), [available presets and extensions](reference/extensions.md) -[Get started in 5 minutes →](getting-started.md) +## Community -Or browse what's included: [available presets and extensions](reference/extensions.md). +Have questions or feedback? Join [Discord](https://discord.gg/nwb3CRVN). diff --git a/docs/reference/actions.md b/docs/reference/actions.md index 7bab75bf..25d62c2b 100644 --- a/docs/reference/actions.md +++ b/docs/reference/actions.md @@ -6,7 +6,7 @@ All built-in actions are defined in `finecode_extension_api.actions`. Use their ## `lint` -Run linting on a project or specific files. +Run linting on a source artifact or specific files. - **Source:** `finecode_extension_api.actions.lint.LintAction` - **Default handler execution:** concurrent @@ -15,7 +15,7 @@ Run linting on a project or specific files. | Field | Type | Default | Description | |---|---|---|---| -| `target` | `"project"` \| `"files"` | `"project"` | Lint the whole project or specific files | +| `target` | `"project"` \| `"files"` | `"project"` | Lint the whole source artifact (`target="project"`) or specific files | | `file_paths` | `list[Path]` | `[]` | Files to lint (required when `target="files"`) | **Result:** list of diagnostics (file, line, column, message, severity) @@ -35,7 +35,7 @@ Similar to `lint` but designed for language-aware per-file linting. Used interna ## `format` -Format a project or specific files. +Format a source artifact or specific files. - **Source:** `finecode_extension_api.actions.format.FormatAction` - **Default handler execution:** sequential @@ -45,7 +45,7 @@ Format a project or specific files. | Field | Type | Default | Description | |---|---|---|---| | `save` | `bool` | `true` | Write formatted content back to disk | -| `target` | `"project"` \| `"files"` | `"project"` | Format whole project or specific files | +| `target` | `"project"` \| `"files"` | `"project"` | Format the whole source artifact (`target="project"`) or specific files | | `file_paths` | `list[Path]` | `[]` | Files to format (required when `target="files"`) | !!! note @@ -84,7 +84,7 @@ Build a distributable artifact (e.g. a Python wheel). | Field | Type | Default | Description | |---|---|---|---| -| `src_artifact_def_path` | `Path \| None` | `None` | Path to the artifact definition. If omitted, builds the current project. | +| `src_artifact_def_path` | `Path \| None` | `None` | Path to the artifact definition. If omitted, builds the current source artifact. | **Result fields:** @@ -169,13 +169,29 @@ Group source files by language (internal, used by language-aware actions). --- -## `prepare_envs` +## `create_envs` -Set up virtual environments for all handler dependencies. +Create virtual environments for all envs discovered from the project's dependency-groups. -- **Source:** `finecode_extension_api.actions.prepare_envs.PrepareEnvsAction` +- **Source:** `finecode_extension_api.actions.create_envs.CreateEnvsAction` -Also available as the `python -m finecode prepare-envs` CLI command. +--- + +## `prepare_runner_envs` + +Install Extension Runners into virtualenvs (internal, called by the Workspace Manager). + +- **Source:** `finecode_extension_api.actions.prepare_runner_envs.PrepareRunnerEnvsAction` + +--- + +## `prepare_handler_envs` + +Install handler dependencies into virtualenvs. + +- **Source:** `finecode_extension_api.actions.prepare_handler_envs.PrepareHandlerEnvsAction` + +The `python -m finecode prepare-envs` CLI command runs `create_envs`, `prepare_runner_envs`, and `prepare_handler_envs` in sequence. --- @@ -189,7 +205,7 @@ Install dependencies into a specific environment. ## `dump_config` -Dump the resolved configuration for a project. +Dump the resolved configuration for a source artifact that includes FineCode configuration. - **Source:** `finecode_extension_api.actions.dump_config.DumpConfigAction` @@ -205,14 +221,6 @@ Initialize a repository provider (used in artifact publishing flows). --- -## `prepare_runners` - -Prepare Extension Runners (internal, called by the Workspace Manager). - -- **Source:** `finecode_extension_api.actions.prepare_runners.PrepareRunnersAction` - ---- - ## `clean_finecode_logs` Remove FineCode log files. diff --git a/docs/reference/lsp-mcp-architecture.md b/docs/reference/lsp-mcp-architecture.md new file mode 100644 index 00000000..19d354dc --- /dev/null +++ b/docs/reference/lsp-mcp-architecture.md @@ -0,0 +1,66 @@ +# LSP and MCP Architecture + +FineCode uses one shared Workspace Manager Server (WM Server) for CLI, IDE (LSP), and AI-agent (MCP) clients. + +## Component model + +```text +FineCode WM Server (TCP JSON-RPC, auto-managed) +├── WorkspaceContext, runners, services +└── auto-stops when no clients remain + +LSP Server (start-lsp, started by IDE) +└── connects to WM Server (starts one if needed) + +MCP Server (start-mcp, started by MCP client) +└── connects to WM Server (starts one if needed) +``` + +The WM Server writes its port to `.venvs/dev_workspace/cache/finecode/wm_port` for client discovery. + +## LSP request flow + +```mermaid +sequenceDiagram + participant IDE as IDE Extension + participant WM as Workspace Manager (LSP) + participant ER as Extension Runner + + IDE->>WM: textDocument/didOpen + WM->>ER: run lint_files action + ER-->>WM: LintFilesRunResult (diagnostics) + WM-->>IDE: textDocument/publishDiagnostics + + IDE->>WM: textDocument/formatting + WM->>ER: run format_files action + ER-->>WM: FormatFilesRunResult (edits) + WM-->>IDE: TextEdit[] +``` + +The LSP layer translates protocol messages into FineCode actions, delegates execution to extension runners, then translates results back into LSP responses. + +## Lifecycle behavior + +- Any client (CLI, LSP, MCP) can start the WM Server if it is not already running. +- Each connected client keeps the WM Server alive. +- When the last client disconnects, the WM Server exits automatically. + +## Manual server startup for debugging + +Most users should not start servers manually. IDE and MCP clients usually manage startup automatically. + +Use manual startup when: + +- Debugging LSP/MCP behavior +- Developing a new IDE integration +- Developing a new MCP client integration + +```bash +# LSP server (for custom IDE clients) +python -m finecode start-lsp --stdio + +# MCP server +python -m finecode start-mcp +``` + +For setup instructions, see [IDE and MCP Setup](../getting-started-ide-mcp.md). diff --git a/docs/reference/lsp-protocol.md b/docs/reference/lsp-protocol.md new file mode 100644 index 00000000..cc91533c --- /dev/null +++ b/docs/reference/lsp-protocol.md @@ -0,0 +1,118 @@ +# FineCode LSP Client Protocol + +This document describes the communication between the FineCode LSP server +(`src/finecode/lsp_server/lsp_server.py`) and LSP clients (currently the +FineCode VSCode extension). It focuses on FineCode-specific behavior and +custom commands; standard LSP features are listed without detailed spec. + +## Transport + +- JSON-RPC 2.0 over standard LSP framing (`Content-Length: N\r\n\r\n{json}`) +- Transport: stdio (the server starts with `start_io_async()`) + +## Lifecycle Notes + +- The LSP server waits for the standard `initialized` notification and then + ensures the WM Server is running and connects to it. +- If WM Server connection fails, FineCode features that depend on WM will fail. +- Normal LSP shutdown closes the WM client connection. +- Use the custom `server/shutdown` request (below) to explicitly stop the WM + Server when the IDE wants to restart it. + +## Standard LSP Features (Implemented) + +The server implements the following LSP features (standard behavior; no +FineCode-specific protocol additions): + +- `workspace/didChangeWorkspaceFolders` +- `textDocument/didOpen` +- `textDocument/didSave` +- `textDocument/didChange` +- `textDocument/didClose` +- `textDocument/formatting` +- `textDocument/rangeFormatting` +- `textDocument/rangesFormatting` +- `textDocument/codeAction` +- `codeAction/resolve` +- `textDocument/codeLens` +- `codeLens/resolve` +- `textDocument/diagnostic` +- `workspace/diagnostic` +- `textDocument/inlayHint` +- `inlayHint/resolve` +- `shutdown` + +## Custom FineCode Commands (`workspace/executeCommand`) + +FineCode exposes IDE commands via the standard `workspace/executeCommand` LSP +method. The command names below are advertised by the server and routed to WM +Server APIs. Params are passed as the `params` array of `executeCommand`. + +### Action Tree and Projects + +- `finecode.getActions` + - Params: `parent_node_id` (string or `null`) + - Returns: action tree under the given node + +- `finecode.getActionsForPosition` + - Params: position object (currently ignored) + - Returns: action tree (currently full tree) + +- `finecode.listProjects` + - Params: none + - Returns: list of workspace projects + +### Action Execution + +- `finecode.runAction` + - Params: `{ "action": str, "project": str, "params"?: object, "options"?: object }` + - Behavior: forwards to WM `actions/run` + +- `finecode.runActionOnFile` + - Params: `{ "projectPath": "::" }` + - Behavior: + - Requests `editor/documentMeta` from the client to get the active file + - Runs the action with `target=files` for that file + +- `finecode.runActionOnProject` + - Params: `{ "projectPath": "::" }` + - Behavior: runs the action with `target=project` + +### Action/Runner Management + +- `finecode.reloadAction` + - Params: `{ "projectPath": "::" }` + - Behavior: forwards to WM `actions/reload` + +- `finecode.reset` + - Params: none + - Behavior: forwards to WM `server/reset` + +- `finecode.restartExtensionRunner` + - Params: `{ "projectPath": "::" }` + - Behavior: forwards to WM `runners/restart` with `debug=false` + +- `finecode.restartAndDebugExtensionRunner` + - Params: `{ "projectPath": "::" }` + - Behavior: forwards to WM `runners/restart` with `debug=true` + +## Custom LSP Requests + +### Client → Server + +- `server/shutdown` + - Params: `{}` + - Result: `{}` + - Purpose: explicitly stops the WM Server, then closes the WM client + +### Server → Client + +- `editor/documentMeta` + - Params: `{}` + - Result: document metadata for the active editor + - Minimum required fields: `uri` with a valid filesystem path + +- `ide/startDebugging` + - Params: debug configuration object (VSCode `debugpy` attach configuration) + - Result: any value (used for logging only) + - Purpose: starts a debug session when a runner is restarted with debug diff --git a/docs/reference/services.md b/docs/reference/services.md new file mode 100644 index 00000000..5837d454 --- /dev/null +++ b/docs/reference/services.md @@ -0,0 +1,46 @@ +# Services + +Services are long-lived dependencies that handlers (and other services) can request via dependency injection. This page lists the services that ship in this repo and where they are registered. Availability depends on whether the Extension Runner provides the service, a preset declares it, or an extension activates it. + +## Core services (always available) + +These services are registered by the Extension Runner at startup and are available in every handler without extra configuration. + +| Interface | Default implementation | Notes | +| --- | --- | --- | +| `finecode_extension_api.interfaces.ilogger.ILogger` | `loguru.logger` via `finecode_extension_runner.impls.loguru_logger.get_logger` | Logging (trace/debug/info/warn/error/exception). | +| `finecode_extension_api.interfaces.icommandrunner.ICommandRunner` | `finecode_extension_runner.impls.command_runner.CommandRunner` | Async and sync subprocess execution. | +| `finecode_extension_api.interfaces.ifilemanager.IFileManager` | `finecode_extension_runner.impls.file_manager.FileManager` | File system IO abstraction (read/write/list/create/delete). | +| `finecode_extension_api.interfaces.ifileeditor.IFileEditor` | `finecode_extension_runner.impls.file_editor.FileEditor` | Open-file tracking, change subscriptions, read/write with editor awareness. | +| `finecode_extension_api.interfaces.icache.ICache` | `finecode_extension_runner.impls.inmemory_cache.InMemoryCache` | In-memory, file-versioned cache. | +| `finecode_extension_api.interfaces.iactionrunner.IActionRunner` | `finecode_extension_runner.impls.action_runner.ActionRunner` | Run actions and query action declarations. | +| `finecode_extension_api.interfaces.irepositorycredentialsprovider.IRepositoryCredentialsProvider` | `finecode_extension_runner.impls.repository_credentials_provider.ConfigRepositoryCredentialsProvider` | In-memory repository credentials and registry list. | +| `finecode_extension_api.interfaces.iprojectinfoprovider.IProjectInfoProvider` | `finecode_extension_runner.impls.project_info_provider.ProjectInfoProvider` | Current project paths and raw config access. | +| `finecode_extension_api.interfaces.iextensionrunnerinfoprovider.IExtensionRunnerInfoProvider` | `finecode_extension_runner.impls.extension_runner_info_provider.ExtensionRunnerInfoProvider` | Runtime env info (venv paths, cache dir). | + +## Preset-provided services + +These services are declared in presets in this repo. They are available when the preset is active, or when you copy the same `[[tool.finecode.service]]` entry into your own config. + +| Interface | Implementation | Declared by | +| --- | --- | --- | +| `finecode_extension_api.interfaces.ihttpclient.IHttpClient` | `finecode_httpclient.HttpClient` | `finecode_dev_common_preset` | +| `finecode_extension_api.interfaces.ijsonrpcclient.IJsonRpcClient` | `finecode_jsonrpc.jsonrpc_client.JsonRpcClientImpl` | `presets/fine_python_lint` | +| `finecode_extension_api.interfaces.ilspclient.ILspClient` | `finecode_extension_runner.impls.lsp_client.LspClientImpl` | `presets/fine_python_lint` (wraps `IJsonRpcClient`) | + +## Extension-activated services + +Extensions can register services via the `finecode.activator` entry point using `IServiceRegistry`. The following activators ship in this repo and register services when their packages are installed. + +| Extension package | Interface | Implementation | +| --- | --- | --- | +| `fine_python_ast` | `fine_python_ast.iast_provider.IPythonSingleAstProvider` | `fine_python_ast.ast_provider.PythonSingleAstProvider` | +| `fine_python_mypy` | `fine_python_mypy.iast_provider.IMypySingleAstProvider` | `fine_python_mypy.ast_provider.MypySingleAstProvider` | +| `fine_python_package_info` | `fine_python_package_info.ipypackagelayoutinfoprovider.IPyPackageLayoutInfoProvider` | `fine_python_package_info.py_package_layout_info_provider.PyPackageLayoutInfoProvider` | +| `fine_python_package_info` | `finecode_extension_api.interfaces.isrcartifactfileclassifier.ISrcArtifactFileClassifier` | `fine_python_package_info.py_src_artifact_file_classifier.PySrcArtifactFileClassifier` | +| `fine_python_ruff` | `fine_python_ruff.ruff_lsp_service.RuffLspService` | `fine_python_ruff.ruff_lsp_service.RuffLspService` | +| `fine_python_pyrefly` | `fine_python_pyrefly.pyrefly_lsp_service.PyreflyLspService` | `fine_python_pyrefly.pyrefly_lsp_service.PyreflyLspService` | + +## Service registry for extensions + +Extension activators receive an `IServiceRegistry` instance (not injected into handlers) and call `register_impl()` to bind interfaces to implementations. See `finecode_extension_api.interfaces.iserviceregistry.IServiceRegistry` for the protocol and the activators above for concrete examples. diff --git a/docs/wm-er-protocol.md b/docs/wm-er-protocol.md new file mode 100644 index 00000000..a5a7207d --- /dev/null +++ b/docs/wm-er-protocol.md @@ -0,0 +1,158 @@ +# FineCode WM-ER Protocol + +This document describes the communication protocol between the FineCode Workspace +Manager (WM) and Extension Runners (ER). WM is the JSON-RPC client; each ER is a +JSON-RPC server implemented via the LSP stack (`finecode_extension_runner/lsp_server.py`). +The protocol is LSP-shaped with a small set of custom commands. + +## Transport + +- JSON-RPC 2.0 +- LSP-style framing over stdio: `Content-Length: N\r\nContent-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n{json}` +- WM spawns ER processes with: + - `python -m finecode_extension_runner.cli start --project-path=... --env-name=...` + - `--debug` enables a debugpy attach flow before WM connects +- Field names are camelCase for standard LSP params, but command arguments are + passed verbatim (snake_case is common in FineCode payloads). + +## Lifecycle + +1. WM starts the ER process (per project + env). +2. WM sends `initialize` and waits for the ER response. +3. WM sends `initialized`. +4. WM sends `finecodeRunner/updateConfig` to bootstrap handlers and services. +5. On shutdown: WM sends `shutdown` then `exit`. + +## Message Catalog + +### WM -> ER + +**Requests** + +- `initialize` + - Standard LSP initialize request. + - Example params (trimmed): + ```json + { + "processId": 12345, + "clientInfo": {"name": "FineCode_WorkspaceManager", "version": "0.1.0"}, + "capabilities": {}, + "workspaceFolders": [{"uri": "file:///path/to/project", "name": "project"}], + "trace": "verbose" + } + ``` + +- `shutdown` + - Standard LSP shutdown request. + +- `workspace/executeCommand` + - Used for all FineCode WM → ER commands. The `arguments` array is passed to + the handler verbatim. + + **Commands** + + - `finecodeRunner/updateConfig` + - Arguments: + 1. `working_dir` (string path) + 2. `project_name` (string) + 3. `project_def_path` (string path) + 4. `config` (object) + - Config shape (top-level): + - `actions`: list of action objects (`name`, `handlers`, `source`, `config`) + - `action_handler_configs`: map of handler source → config + - `services`: list of service declarations (optional) + - `handlers_to_initialize`: map of action name → handler names (optional) + - Result: `{}` (empty object) + + - `actions/run` + - Arguments: + 1. `action_name` (string) + 2. `params` (object) + 3. `options` (object, optional) + - Options (snake_case keys are expected): + - `meta`: `{ "trigger": "user|system|unknown", "dev_env": "ide|cli|ai|precommit|ci" }` + - `partial_result_token`: `int | string` (used to correlate `$/progress`) + - `result_formats`: `["json", "string"]` (defaults to `["json"]`) + - Result (success): + ```json + { + "status": "success", + "result_by_format": "{\"json\": {\"...\": \"...\"}}", + "return_code": 0 + } + ``` + - Result (stopped): + ```json + { + "status": "stopped", + "result_by_format": "{\"json\": {\"...\": \"...\"}}", + "return_code": 1 + } + ``` + - Result (error): + ```json + {"error": "message"} + ``` + - Note: `result_by_format` is a JSON string (not a JSON object) due to + LSP serialization constraints in the runner. + + - `actions/getPayloadSchemas` + - Arguments: none + - Result: `{ action_name: JSON Schema fragment | null }` + - Returns a payload schema for every action currently known to the runner. + Each schema has `properties` (field name → JSON Schema type object) and + `required` (list of field names without defaults). + `null` means the action class could not be imported. + + - `actions/mergeResults` + - Arguments: `[action_name, results]` + - Result: `{ "merged": ... }` or `{ "error": "..." }` + + - `actions/reload` + - Arguments: `[action_name]` + - Result: `{}` + + - `packages/resolvePath` + - Arguments: `[package_name]` + - Result: `{ "packagePath": "/abs/path/to/package" }` + +**Notifications** + +- `initialized` (standard LSP) +- `textDocument/didOpen` +- `textDocument/didChange` +- `textDocument/didClose` +- `$/cancelRequest` + - Sent by WM when an in-flight request should be cancelled. + +### ER -> WM + +**Requests** + +- `workspace/applyEdit` + - Standard LSP request for applying workspace edits. + - WM forwards this to its active client (IDE) if available. + +- `projects/getRawConfig` + - Params: `{ "projectDefPath": "/abs/path/to/project/finecode.toml" }` + - Result: `{ "config": "" }` + - Used by ER during `finecodeRunner/updateConfig` to resolve project config. + +**Notifications** + +- `$/progress` + - Params: `{ "token": , "value": "" }` + - The `token` must match `partial_result_token` from `actions/run`. + - `value` is a JSON string produced by the ER from a partial run result. + +## Error Handling and Cancellation + +- JSON-RPC errors are used for protocol-level failures. +- Command-level errors are returned via `{ "error": "..." }` in command results. +- WM cancels in-flight requests by sending `$/cancelRequest` with the request id. + +## Document Sync Notes + +WM forwards open-file events to ER so actions can operate on in-memory document +state. ER may send `workspace/applyEdit` when handlers modify files; WM applies +these edits via its active client when possible. diff --git a/docs/wm-protocol.md b/docs/wm-protocol.md new file mode 100644 index 00000000..e10ceffd --- /dev/null +++ b/docs/wm-protocol.md @@ -0,0 +1,810 @@ +# FineCode WM Server Protocol + +The FineCode Workspace Manager Server (WM Server) is a TCP JSON-RPC 2.0 service that manages the workspace state +(projects, configs, extension runners). Any client — LSP server, MCP server, or CLI — can +connect to it. + +## Transport + +- TCP on `127.0.0.1`, random free port +- Content-Length framing (same as LSP): `Content-Length: N\r\n\r\n{json_body}` +- Discovery: port written to `.venvs/dev_workspace/cache/finecode/wm_port` +- Auto-stops when the last client disconnects (after a 30s grace period by default, configurable via `--disconnect-timeout`) or if no client connects within 30 seconds after WM Server startup + +## JSON-RPC 2.0 + +**Request** (client -> server, expects response): + +```json +{"jsonrpc": "2.0", "id": 1, "method": "workspace/listProjects", "params": {...}} +``` + +**Response** (success): + +```json +{"jsonrpc": "2.0", "id": 1, "result": {...}} +``` + +**Response** (error): + +```json +{"jsonrpc": "2.0", "id": 1, "error": {"code": -32002, "message": "Not yet implemented"}} +``` + +**Notification** (no `id` field, no response expected): + +```json +{"jsonrpc": "2.0", "method": "documents/opened", "params": {...}} +``` + +Method names use LSP-style domain prefixes: `workspace/`, `actions/`, `documents/`, +`runners/`, `server/`. + +--- + +## Methods + +### `workspace/` — Workspace & Project Discovery + +#### `workspace/listProjects` + +List all projects in the workspace. + +- **Type:** request +- **Clients:** LSP, MCP, CLI +- **Status:** implemented + +**Params:** `{}` + +**Result:** + +```json +[ + {"name": "finecode", "path": "/path/to/finecode", "status": "CONFIG_VALID"} +] +``` + +--- + +#### `workspace/findProjectForFile` + +Determine which project (if any) contains a given file. The LSP server uses +this helper when a document diagnostic request arrives; it avoids having to +list all projects and perform path comparisons itself. + +- **Type:** request +- **Clients:** LSP +- **Status:** implemented + +**Params:** + +```json +{"file_path": "/abs/path/to/some/file.py"} +``` + +**Result:** + +```json +{"project": "project_name"} # or {"project": null} if not found +``` + +The server internally calls +:func:`finecode.wm_server.services.run_service.find_action_project` with +``action_name="lint"`` and returns the corresponding project name. + +--- + +#### `workspace/addDir` + +Add a workspace directory. Discovers projects, reads configs, collects actions, +and optionally starts extension runners. + +> **Design note:** Ideally, workspace directories would be a single shared +> definition independent of which client connects (LSP, MCP, CLI). Currently, +> each client calls `workspace/addDir` with its own working directory, so the +> WM Server's workspace is the union of what clients have registered. This is a +> known simplification — a future improvement would introduce a workspace +> configuration file or a dedicated workspace management layer so that the set +> of directories is not environment-specific. + +- **Type:** request +- **Clients:** LSP, CLI +- **Status:** implemented + +**Params:** + +```json +{"dir_path": "/path/to/workspace", "start_runners": true, "projects": ["my_project"]} +``` + +`start_runners` is optional (default: `true`). When `false`, the server reads +configs and collects actions without starting any extension runners. Use this +when runner environments may not exist yet (e.g. before running `prepare-envs`). +Actions are still available in the result so clients can validate the workspace. + +`projects` is optional. When provided, only the listed projects (by name) will +be config-initialized and have their runners started. All other projects in the +directory are still discovered (added to workspace state) but skipped for +initialization. This avoids the cost of reading configs and spawning runner +processes for projects that are not needed. + +Calling `workspace/addDir` again for the same `dir_path` with a different +`projects` filter (or with `projects` omitted) will initialize the previously +skipped projects — the call is **incremental**, not idempotent. Only projects +that have not yet been config-initialized are processed on each call. This makes +it safe to issue a filtered call followed by an unfiltered one. + +**Result:** + +```json +{ + "projects": [ + {"name": "my_project", "path": "/path/to/my_project", "status": "CONFIG_VALID"} + ] +} +``` + +The `projects` list contains only the projects initialized during **this call**, +not all projects in the workspace. + +`status` values: `"CONFIG_VALID"`, `"CONFIG_INVALID"` + +--- + +#### `workspace/startRunners` + +Start extension runners for all (or specified) projects. Only starts runners +that are not already running — complements existing runner state rather than +replacing it. Also resolves preset-defined actions so that `actions/run` can +find them. + +- **Type:** request +- **Clients:** CLI +- **Status:** implemented + +**Params:** + +```json +{"projects": ["my_project"]} +``` + +`projects` is optional. If omitted, starts runners for all projects. + +**Result:** `{}` + +--- + +#### `workspace/setConfigOverrides` + +Set persistent handler config overrides on the server. Overrides are stored for +the lifetime of the server and applied to all subsequent action runs — unlike the +`config_overrides` field that was previously accepted by `actions/runBatch`, which +required runners to be stopped first. + +- **Type:** request +- **Clients:** CLI +- **Status:** implemented + +**Params:** + +```json +{ + "overrides": { + "lint": { + "ruff": {"line_length": 120}, + "": {"some_action_level_param": "value"} + } + } +} +``` + +`overrides` format: `{action_name: {handler_name_or_"": {param: value}}}`. +The empty-string key `""` means the override applies to all handlers of that action. + +**Result:** `{}` + +**Behaviour:** + +- Overrides are stored in the server's workspace context and applied to all + subsequent action runs. +- If extension runners are already running, they receive a config update + immediately; initialized handlers are dropped and will be re-initialized with + the new config on the next run. +- The CLI `run` command sends this message **before** `workspace/addDir` in + standalone mode (`--own-server`), so runners always start with the correct + config and no update push is required. +- Config overrides are **not supported** in `--shared-server` mode: the CLI + will print a warning and ignore them. +- Calling this method again replaces the previous overrides entirely. + +--- + +#### `workspace/removeDir` + +Remove a workspace directory. Stops runners for affected projects and removes them +from context. + +- **Type:** request +- **Clients:** LSP +- **Status:** implemented + +**Params:** + +```json +{"dir_path": "/path/to/workspace"} +``` + +**Result:** `{}` + +--- + +#### `workspace/getProjectRawConfig` + +Return the fully resolved raw configuration for a project, as stored in the +workspace context after config reading and preset resolution. + +- **Type:** request +- **Clients:** CLI +- **Status:** implemented + +**Params:** + +```json +{"project": "my_project"} +``` + +**Result:** + +```json +{ + "raw_config": { + "tool": { "finecode": { ... } }, + ... + } +} +``` + +**Errors:** + +- `project` is required — returns a JSON-RPC error if omitted. +- Project not found — returns a JSON-RPC error if no project with the given name + exists in the workspace context. + +--- + +### `actions/` — Action Discovery & Execution + +#### `actions/list` + +List available actions, optionally filtered by project. Flat listing for +programmatic use by MCP agents and CLI. + +- **Type:** request +- **Clients:** MCP, CLI +- **Status:** stub + +**Params:** + +```json +{"project": "finecode"} +``` + +All fields optional. If `project` is omitted, returns actions from all projects. + +**Result:** + +```json +{ + "actions": [ + { + "name": "lint", + "source": "finecode_extension_api.actions.lint.LintAction", + "project": "finecode", + "handlers": [ + {"name": "ruff", "source": "fine_python_ruff.RuffLintFilesHandler", "env": "runtime"} + ] + } + ] +} +``` + +--- + +#### `actions/getPayloadSchemas` + +Return payload schemas for the specified actions in a project. Used by the MCP +server to build accurate `inputSchema` entries for each tool. + +- **Type:** request +- **Clients:** MCP +- **Status:** implemented + +**Params:** + +```json +{ "project": "/abs/path/to/project", "action_names": ["lint", "format"] } +``` + +**Result:** + +```json +{ + "schemas": { + "lint": { "properties": { "file_paths": {"type": "array", "items": {"type": "string"}}, "target": {"type": "string", "enum": ["project", "files"]} }, "required": [] }, + "format": { "properties": { "save": {"type": "boolean"}, "target": {"type": "string"}, "file_paths": {"type": "array", "items": {"type": "string"}} }, "required": [] } + } +} +``` + +Each schema value is `null` for actions whose class cannot be imported in any +Extension Runner. Schemas are cached per project in the WM and invalidated +whenever runner config is updated. + +--- + +#### `actions/getTree` + +Get the hierarchical action tree for IDE sidebar display. + +- **Type:** request +- **Clients:** LSP +- **Status:** stub + +**Params:** `{}` + +**Result:** + +```json +{ + "nodes": [ + { + "node_id": "ws_dir_0", + "name": "/path/to/workspace", + "node_type": 0, + "status": "ok", + "subnodes": [ + { + "node_id": "project_0", + "name": "finecode", + "node_type": 1, + "status": "ok", + "subnodes": [] + } + ] + } + ] +} +``` + +`node_type` values: 0=DIRECTORY, 1=PROJECT, 2=ACTION, 3=ACTION_GROUP, 4=PRESET, +5=ENV_GROUP, 6=ENV + +--- + +#### `actions/run` + +Execute a single action on a project. + +- **Type:** request +- **Clients:** LSP, MCP, CLI +- **Status:** stub + +**Params:** + +```json +{ + "action": "lint", + "project": "finecode", + "params": {"file_paths": ["/path/to/file.py"]}, + "options": { + "result_formats": ["json", "string"], + "trigger": "user", + "dev_env": "ide" + } +} +``` + +Required: `action`, `project`. All other fields optional. + +`trigger` values: `"user"`, `"system"`, `"unknown"` (default: `"unknown"`) + +`dev_env` values: `"ide"`, `"cli"`, `"ai"`, `"precommit"`, `"ci"` (default: `"cli"`) + +**Result:** + +```json +{ + "result_by_format": { + "json": {"messages": {"file.py": []}}, + "string": "All checks passed." + }, + "return_code": 0 +} +``` + +--- + +#### `actions/runBatch` + +Execute multiple actions across multiple projects. Used for batch operations. + +- **Type:** request +- **Clients:** CLI, MCP +- **Status:** stub + +**Params:** + +```json +{ + "actions": ["lint", "check_formatting"], + "projects": ["finecode", "finecode_extension_api"], + "params": {}, + "options": { + "concurrent": false, + "result_formats": ["json", "string"], + "trigger": "user", + "dev_env": "cli" + } +} +``` + +Required: `actions`. If `projects` is omitted, runs on all projects that have the +requested actions. + +**Result:** + +```json +{ + "results": { + "/path/to/finecode": { + "lint": {"result_by_format": {...}, "return_code": 0}, + "check_formatting": {"result_by_format": {...}, "return_code": 0} + } + }, + "return_code": 0 +} +``` + +`return_code` at the top level is the bitwise OR of all individual return codes. + +--- + +#### `actions/runWithPartialResults` + +Execute an action with streaming partial results. The server sends +`actions/partialResult` notifications during execution. + +- **Type:** request +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{ + "action": "lint", + "project": "finecode", + "params": {"file_paths": ["/path/to/file.py"]}, + "partial_result_token": "diag_1", + "options": { + "result_formats": ["json", "string"], + "trigger": "system", + "dev_env": "ide" + } +} +``` + +Required: `action`, `project`, `partial_result_token`. + +Supported `result_formats`: `"json"`, `"string"`, etc. (same as `actions/run`). + +**Result:** Same as `actions/run` (the final aggregated result). + +During execution, the server sends `actions/partialResult` notifications (see below). + +> **Guarantee:** The WM Server always delivers results via `actions/partialResult` +> notifications, even when an extension runner does not stream incrementally (i.e. +> it collects all results internally and returns them as a single final response). +> In that case the server emits the final result as a partial result notification +> before returning the aggregated response. Clients can therefore rely solely on +> `actions/partialResult` notifications to receive results and safely ignore the +> response body of this request. + +--- + +#### `actions/reload` + +Hot-reload handler code for an action without restarting runners. + +- **Type:** request +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{"project": "finecode", "action": "lint"} +``` + +**Result:** `{}` + +--- + +### `documents/` — Document Sync + +Notifications from the LSP client to keep the WM Server (and extension runners) +informed about open documents. These are fire-and-forget (no response). + +#### `documents/opened` + +- **Type:** notification (client -> server) +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{"uri": "file:///path/to/file.py", "version": 1} +``` + +--- + +#### `documents/closed` + +- **Type:** notification (client -> server) +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{"uri": "file:///path/to/file.py"} +``` + +--- + +#### `documents/changed` + +- **Type:** notification (client -> server) +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{ + "uri": "file:///path/to/file.py", + "version": 2, + "content_changes": [ + { + "range": { + "start": {"line": 5, "character": 0}, + "end": {"line": 5, "character": 10} + }, + "text": "new_text" + } + ] +} +``` + +--- + +### `runners/` — Runner Management + +#### `runners/list` + +List extension runners and their statuses. + +- **Type:** request +- **Clients:** LSP, MCP +- **Status:** stub + +**Params:** + +```json +{"project": "finecode"} +``` + +`project` is optional. If omitted, returns runners for all projects. + +**Result:** + +```json +{ + "runners": [ + { + "project": "finecode", + "env": "runtime", + "status": "RUNNING", + "readable_id": "finecode::runtime" + } + ] +} +``` + +`status` values: `"NO_VENV"`, `"INITIALIZING"`, `"FAILED"`, `"RUNNING"`, `"EXITED"` + +--- + +#### `runners/restart` + +Restart an extension runner. Optionally start in debug mode. + +- **Type:** request +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{"project": "finecode", "env": "runtime", "debug": false} +``` + +`debug` is optional, defaults to `false`. + +**Result:** `{}` + +--- + +#### `runners/checkEnv` + +Check whether the named environment for a project is valid (i.e. the virtualenv +exists and its dependencies are correctly installed). + +- **Type:** request +- **Clients:** CLI +- **Status:** implemented + +**Params:** + +```json +{"project": "my_project", "env_name": "dev_workspace"} +``` + +**Result:** + +```json +{"valid": true} +``` + +--- + +#### `runners/removeEnv` + +Remove the named environment for a project. If a runner is currently using that +environment, it is stopped first. + +- **Type:** request +- **Clients:** CLI +- **Status:** implemented + +**Params:** + +```json +{"project": "my_project", "env_name": "dev_workspace"} +``` + +**Result:** `{}` + +--- + +### `server/` — Server Lifecycle & Notifications + +#### `server/getInfo` + +Return static information about the running WM Server instance. + +- **Type:** request +- **Clients:** LSP, MCP, CLI +- **Status:** implemented + +**Params:** `{}` + +**Result:** + +```json +{ + "log_file_path": "/abs/path/to/.venvs/dev_workspace/logs/wm_server/wm_server.log" +} +``` + +`log_file_path` is the absolute path to the WM Server's log file for the current process. +Clients can log or display this path so the user can open the file directly when troubleshooting. + +--- + +#### `server/shutdown` + +Explicitly shut down the WM Server. Clients can use this when they intentionally +want the WM to stop or restart, rather than waiting for disconnect-timeout +auto-shutdown. + +- **Type:** request +- **Clients:** any +- **Status:** implemented + +**Params:** `{}` + +**Result:** `{}` + +--- + +### Server -> Client Notifications + +These are sent by the WM Server to connected clients. Clients must implement +a background reader to receive them. + +#### `actions/partialResult` + +Sent during `actions/runWithPartialResults` execution as results stream in. + +- **Type:** notification (server -> client) +- **Clients:** LSP +- **Status:** stub + +**Params:** + +```json +{ + "token": "diag_1", + "value": { + "result_by_format": { + "json": {"messages": {"file.py": [...]}}, + "string": "3 issues found in file.py" + } + } +} +``` + +`token` matches the `partial_result_token` from the originating request. + +`result_by_format` contains results in all formats requested in the originating +`actions/runWithPartialResults` params (same structure as `actions/run` response, +but without `return_code`). + +> **Note:** Notifications are delivered only to the client connection that +> initiated the corresponding `actions/runWithPartialResults` request. The +> WM Server does **not** broadcast these messages to every connected client. + +--- + +#### `actions/treeChanged` + +Sent when a project's status or actions change (e.g., after config reload, +runner start/stop). + +- **Type:** notification (server -> client) +- **Clients:** LSP +- **Status:** implemented + +**Params:** + +```json +{ + "node": { + "node_id": "project_0", + "name": "finecode", + "node_type": 1, + "status": "ok", + "subnodes": [] + } +} +``` + +--- + +#### `server/userMessage` + +Broadcast user-facing messages (errors, warnings, info) to connected clients. + +- **Type:** notification (server -> client) +- **Clients:** LSP +- **Status:** implemented + +**Params:** + +```json +{"message": "Runner failed to start", "type": "ERROR"} +``` + +`type` values: `"INFO"`, `"WARNING"`, `"ERROR"` diff --git a/extensions/fine_python_ast/pyproject.toml b/extensions/fine_python_ast/pyproject.toml index e4a7367c..151f8a3a 100644 --- a/extensions/fine_python_ast/pyproject.toml +++ b/extensions/fine_python_ast/pyproject.toml @@ -26,3 +26,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_black/fine_python_black/action.py b/extensions/fine_python_black/fine_python_black/action.py index 9f5d17fd..4f2eaf79 100644 --- a/extensions/fine_python_black/fine_python_black/action.py +++ b/extensions/fine_python_black/fine_python_black/action.py @@ -23,7 +23,7 @@ from black.mode import Mode, TargetVersion from finecode_extension_api import code_action -from finecode_extension_api.actions import format as format_action +from finecode_extension_api.actions.code_quality import format_action from finecode_extension_api.interfaces import icache, ilogger, iprocessexecutor diff --git a/extensions/fine_python_black/pyproject.toml b/extensions/fine_python_black/pyproject.toml index 37d2b894..1b0cc889 100644 --- a/extensions/fine_python_black/pyproject.toml +++ b/extensions/fine_python_black/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_flake8/fine_python_flake8/action.py b/extensions/fine_python_flake8/fine_python_flake8/action.py index 055067fd..36a92d81 100644 --- a/extensions/fine_python_flake8/fine_python_flake8/action.py +++ b/extensions/fine_python_flake8/fine_python_flake8/action.py @@ -12,22 +12,23 @@ from flake8.plugins import finder from finecode_extension_api import code_action -from finecode_extension_api.actions import lint_files as lint_files_action +from finecode_extension_api.actions.code_quality import lint_files_action from finecode_extension_api.interfaces import ( icache, ifileeditor, ilogger, iprocessexecutor, ) +from finecode_extension_api.resource_uri import ResourceUri, resource_uri_to_path def map_flake8_check_result_to_lint_message(result: tuple) -> lint_files_action.LintMessage: error_code, line_number, column, text, physical_line = result return lint_files_action.LintMessage( range=lint_files_action.Range( - start=lint_files_action.Position(line=line_number, character=column), + start=lint_files_action.Position(line=line_number - 1, character=column), end=lint_files_action.Position( - line=line_number, + line=line_number - 1, character=len(physical_line) if physical_line is not None else column, ), ), @@ -147,14 +148,15 @@ def __init__( self.logger.disable("bugbear") async def run_on_single_file( - self, file_path: Path + self, file_uri: ResourceUri ) -> lint_files_action.LintFilesRunResult | None: - messages = {} + file_path = resource_uri_to_path(file_uri) + messages: dict[ResourceUri, list[lint_files_action.LintMessage]] = {} try: cached_lint_messages = await self.cache.get_file_cache( file_path, self.CACHE_KEY ) - messages[str(file_path)] = cached_lint_messages + messages[file_uri] = cached_lint_messages return lint_files_action.LintFilesRunResult(messages=messages) except icache.CacheMissException: pass @@ -178,7 +180,7 @@ async def run_on_single_file( file_ast=file_ast, config=self.config, ) - messages[str(file_path)] = lint_messages + messages[file_uri] = lint_messages await self.cache.save_file_cache( file_path, file_version, self.CACHE_KEY, lint_messages ) @@ -193,12 +195,12 @@ async def run( if self.config.select is not None and len(self.config.select) == 0: # empty set of rules is selected, no need to run flake8 return None - - file_paths = [file_path async for file_path in payload] - for file_path in file_paths: + file_uris = [file_uri async for file_uri in payload] + + for file_uri in file_uris: run_context.partial_result_scheduler.schedule( - file_path, self.run_on_single_file(file_path) + file_uri, self.run_on_single_file(file_uri) ) diff --git a/extensions/fine_python_import_linter/pyproject.toml b/extensions/fine_python_import_linter/pyproject.toml index 891a7cbe..9e9fd634 100644 --- a/extensions/fine_python_import_linter/pyproject.toml +++ b/extensions/fine_python_import_linter/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_isort/fine_python_isort/action.py b/extensions/fine_python_isort/fine_python_isort/action.py index 5dce2da2..fb96af04 100644 --- a/extensions/fine_python_isort/fine_python_isort/action.py +++ b/extensions/fine_python_isort/fine_python_isort/action.py @@ -2,13 +2,13 @@ import dataclasses from io import StringIO -from pathlib import Path import isort.api as isort_api import isort.settings as isort_settings from finecode_extension_api import code_action -from finecode_extension_api.actions import format_files as format_files_action +from finecode_extension_api.actions.code_quality import format_files_action from finecode_extension_api.interfaces import icache, ilogger, iprocessexecutor +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass @@ -45,20 +45,20 @@ async def run( payload: format_files_action.FormatFilesRunPayload, run_context: format_files_action.FormatFilesRunContext, ) -> format_files_action.FormatFilesRunResult: - result_by_file_path: dict[Path, format_files_action.FormatRunFileResult] = {} - for file_path in payload.file_paths: - file_content, file_version = run_context.file_info_by_path[file_path] + result_by_file_path: dict[ResourceUri, format_files_action.FormatRunFileResult] = {} + for file_uri in payload.file_paths: + file_content, file_version = run_context.file_info_by_path[file_uri] new_file_content, file_changed = await self.process_executor.submit( format_one, file_content, dataclasses.asdict(self.config) ) # save for next handlers - run_context.file_info_by_path[file_path] = format_files_action.FileInfo( + run_context.file_info_by_path[file_uri] = format_files_action.FileInfo( new_file_content, file_version ) - result_by_file_path[file_path] = format_files_action.FormatRunFileResult( + result_by_file_path[file_uri] = format_files_action.FormatRunFileResult( changed=file_changed, code=new_file_content ) diff --git a/extensions/fine_python_isort/pyproject.toml b/extensions/fine_python_isort/pyproject.toml index e4fb2050..dcfd3f60 100644 --- a/extensions/fine_python_isort/pyproject.toml +++ b/extensions/fine_python_isort/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_module_exports/pyproject.toml b/extensions/fine_python_module_exports/pyproject.toml index c57cad14..6e765f61 100644 --- a/extensions/fine_python_module_exports/pyproject.toml +++ b/extensions/fine_python_module_exports/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_mypy/fine_python_mypy/action.py b/extensions/fine_python_mypy/fine_python_mypy/action.py index b191abde..005c1d2e 100644 --- a/extensions/fine_python_mypy/fine_python_mypy/action.py +++ b/extensions/fine_python_mypy/fine_python_mypy/action.py @@ -9,7 +9,7 @@ import fine_python_mypy.output_parser as output_parser from finecode_extension_api import code_action -from finecode_extension_api.actions import lint as lint_action +from finecode_extension_api.actions.code_quality import lint_action from finecode_extension_api.interfaces import ( icache, icommandrunner, diff --git a/extensions/fine_python_mypy/fine_python_mypy/output_parser.py b/extensions/fine_python_mypy/fine_python_mypy/output_parser.py index 4cfad6cc..83cb2bbd 100644 --- a/extensions/fine_python_mypy/fine_python_mypy/output_parser.py +++ b/extensions/fine_python_mypy/fine_python_mypy/output_parser.py @@ -3,7 +3,7 @@ from loguru import logger -from finecode_extension_api.actions.lint import ( +from finecode_extension_api.actions.code_quality.lint_files_action import ( LintMessage, LintMessageSeverity, Position, @@ -18,7 +18,7 @@ ERROR_CODE_BASE_URL = "https://mypy.readthedocs.io/en/latest/_refs.html#code-" SEE_HREF_PREFIX = "See https://mypy.readthedocs.io" SEE_PREFIX_LEN = len("See ") -LINE_OFFSET = 0 +LINE_OFFSET = 1 CHAR_OFFSET = 1 NOTE_CODE = "note" diff --git a/extensions/fine_python_mypy/pyproject.toml b/extensions/fine_python_mypy/pyproject.toml index da3a34b6..02c1ce3f 100644 --- a/extensions/fine_python_mypy/pyproject.toml +++ b/extensions/fine_python_mypy/pyproject.toml @@ -26,3 +26,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_package_info/fine_python_package_info/build_artifact_py_handler.py b/extensions/fine_python_package_info/fine_python_package_info/build_artifact_py_handler.py index 872ee878..b0eda43e 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/build_artifact_py_handler.py +++ b/extensions/fine_python_package_info/fine_python_package_info/build_artifact_py_handler.py @@ -1,7 +1,7 @@ import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import build_artifact_action +from finecode_extension_api.actions.artifact import build_artifact_action from finecode_extension_api.interfaces import ( icommandrunner, iextensionrunnerinfoprovider, diff --git a/extensions/fine_python_package_info/fine_python_package_info/get_dist_artifact_version_py_handler.py b/extensions/fine_python_package_info/fine_python_package_info/get_dist_artifact_version_py_handler.py index a78ecad0..ec01389f 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/get_dist_artifact_version_py_handler.py +++ b/extensions/fine_python_package_info/fine_python_package_info/get_dist_artifact_version_py_handler.py @@ -1,8 +1,7 @@ import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import \ - get_dist_artifact_version as get_dist_artifact_version_action +from finecode_extension_api.actions.publishing import get_dist_artifact_version_action from finecode_extension_api.interfaces import ilogger diff --git a/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_registries_py_handler.py b/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_registries_py_handler.py index 43210318..739f83f2 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_registries_py_handler.py +++ b/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_registries_py_handler.py @@ -1,8 +1,7 @@ import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import \ - get_src_artifact_registries as get_src_artifact_registries_action +from finecode_extension_api.actions.artifact import get_src_artifact_registries_action from finecode_extension_api.interfaces import ( ilogger, irepositorycredentialsprovider, diff --git a/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_version_py_handler.py b/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_version_py_handler.py index 3ebabf55..25070dd7 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_version_py_handler.py +++ b/extensions/fine_python_package_info/fine_python_package_info/get_src_artifact_version_py_handler.py @@ -1,8 +1,7 @@ import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import \ - get_src_artifact_version as get_src_artifact_version_action +from finecode_extension_api.actions.artifact import get_src_artifact_version_action from finecode_extension_api.interfaces import ilogger, iprojectinfoprovider diff --git a/extensions/fine_python_package_info/fine_python_package_info/group_src_artifact_files_by_lang_python.py b/extensions/fine_python_package_info/fine_python_package_info/group_src_artifact_files_by_lang_python.py index 0a1f3275..92025ca0 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/group_src_artifact_files_by_lang_python.py +++ b/extensions/fine_python_package_info/fine_python_package_info/group_src_artifact_files_by_lang_python.py @@ -6,9 +6,8 @@ import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - group_src_artifact_files_by_lang as group_src_artifact_files_by_lang_action, -) +from finecode_extension_api.actions.artifact import group_src_artifact_files_by_lang_action +from finecode_extension_api.resource_uri import ResourceUri, path_to_resource_uri from fine_python_package_info import ipypackagelayoutinfoprovider @@ -79,6 +78,9 @@ async def run( py_files += list(dir_absolute_path.rglob("*.py")) + py_uris: list[ResourceUri] = [ + path_to_resource_uri(p) for p in py_files + ] return group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunResult( - files_by_lang={"python": py_files} + files_by_lang={"python": py_uris} ) diff --git a/extensions/fine_python_package_info/fine_python_package_info/is_artifact_published_to_registry_py_handler.py b/extensions/fine_python_package_info/fine_python_package_info/is_artifact_published_to_registry_py_handler.py index c20c5baa..f491be59 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/is_artifact_published_to_registry_py_handler.py +++ b/extensions/fine_python_package_info/fine_python_package_info/is_artifact_published_to_registry_py_handler.py @@ -1,10 +1,9 @@ import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import \ - get_src_artifact_registries as get_src_artifact_registries_action -from finecode_extension_api.actions import \ - is_artifact_published_to_registry as is_artifact_published_to_registry_action +from finecode_extension_api.actions.artifact import get_src_artifact_registries_action +from finecode_extension_api.actions.publishing import is_artifact_published_to_registry_action +from finecode_extension_api.resource_uri import resource_uri_to_path from finecode_extension_api.interfaces import ( iactionrunner, ihttpclient, @@ -64,8 +63,8 @@ async def run( package_name = package_name.replace('_', '-') # Get registries using the action - get_registries_action = self.action_runner.get_action_by_name( - "get_src_artifact_registries", get_src_artifact_registries_action.GetSrcArtifactRegistriesAction + get_registries_action = self.action_runner.get_action_by_source( + get_src_artifact_registries_action.GetSrcArtifactRegistriesAction ) registries_payload = ( get_src_artifact_registries_action.GetSrcArtifactRegistriesRunPayload( @@ -134,7 +133,7 @@ async def run( except KeyError as exception: raise code_action.ActionFailedException("File object has no 'filename' key") from exception - is_published_by_dist_path = {dist_path: dist_path.name in published_file_names for dist_path in dist_artifact_paths} + is_published_by_dist_path = {dist_path: resource_uri_to_path(dist_path).name in published_file_names for dist_path in dist_artifact_paths} else: is_published_by_dist_path = {dist_path: False for dist_path in dist_artifact_paths} diff --git a/extensions/fine_python_package_info/fine_python_package_info/list_src_artifact_files_by_lang_python.py b/extensions/fine_python_package_info/fine_python_package_info/list_src_artifact_files_by_lang_python.py index fde35069..f3f7439c 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/list_src_artifact_files_by_lang_python.py +++ b/extensions/fine_python_package_info/fine_python_package_info/list_src_artifact_files_by_lang_python.py @@ -6,9 +6,8 @@ import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - list_src_artifact_files_by_lang as list_src_artifact_files_by_lang_action, -) +from finecode_extension_api.actions.artifact import list_src_artifact_files_by_lang_action +from finecode_extension_api.resource_uri import ResourceUri, path_to_resource_uri from fine_python_package_info import ipypackagelayoutinfoprovider @@ -78,6 +77,9 @@ async def run( py_files += list(dir_absolute_path.rglob("*.py")) + py_uris: list[ResourceUri] = [ + path_to_resource_uri(p) for p in py_files + ] return list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangRunResult( - files_by_lang={"python": py_files} + files_by_lang={"python": py_uris} ) diff --git a/extensions/fine_python_package_info/fine_python_package_info/publish_artifact_to_registry_py_handler.py b/extensions/fine_python_package_info/fine_python_package_info/publish_artifact_to_registry_py_handler.py index a455e97c..596bd765 100644 --- a/extensions/fine_python_package_info/fine_python_package_info/publish_artifact_to_registry_py_handler.py +++ b/extensions/fine_python_package_info/fine_python_package_info/publish_artifact_to_registry_py_handler.py @@ -6,8 +6,7 @@ from twine.commands import upload as twine_upload from finecode_extension_api import code_action -from finecode_extension_api.actions import \ - publish_artifact_to_registry as publish_artifact_to_registry_action +from finecode_extension_api.actions.publishing import publish_artifact_to_registry_action from finecode_extension_api.interfaces import ( icommandrunner, ilogger, diff --git a/extensions/fine_python_package_info/pyproject.toml b/extensions/fine_python_package_info/pyproject.toml index 07551276..37528e4c 100644 --- a/extensions/fine_python_package_info/pyproject.toml +++ b/extensions/fine_python_package_info/pyproject.toml @@ -31,3 +31,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_pip/pyproject.toml b/extensions/fine_python_pip/pyproject.toml index 83dbd3cf..fce24258 100644 --- a/extensions/fine_python_pip/pyproject.toml +++ b/extensions/fine_python_pip/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = ".", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_pip/src/fine_python_pip/__init__.py b/extensions/fine_python_pip/src/fine_python_pip/__init__.py index 84397026..b57bf370 100644 --- a/extensions/fine_python_pip/src/fine_python_pip/__init__.py +++ b/extensions/fine_python_pip/src/fine_python_pip/__init__.py @@ -1,3 +1,4 @@ from .install_deps_in_env_handler import PipInstallDepsInEnvHandler +from .lock_dependencies_handler import PipLockDependenciesHandler -__all__ = ["PipInstallDepsInEnvHandler"] +__all__ = ["PipInstallDepsInEnvHandler", "PipLockDependenciesHandler"] diff --git a/extensions/fine_python_pip/src/fine_python_pip/install_deps_in_env_handler.py b/extensions/fine_python_pip/src/fine_python_pip/install_deps_in_env_handler.py index bec58402..d6c4b70c 100644 --- a/extensions/fine_python_pip/src/fine_python_pip/install_deps_in_env_handler.py +++ b/extensions/fine_python_pip/src/fine_python_pip/install_deps_in_env_handler.py @@ -2,9 +2,7 @@ import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - install_deps_in_env as install_deps_in_env_action, -) +from finecode_extension_api.actions.environments import install_deps_in_env_action from finecode_extension_api.interfaces import icommandrunner, ilogger @@ -93,19 +91,26 @@ def _construct_pip_install_cmd( async def _run_pip_cmd( self, cmd: str, env_name: str, project_dir_path: pathlib.Path ) -> str | None: + self.logger.debug(f"Running pip: {cmd}") process = await self.command_runner.run(cmd, cwd=project_dir_path) await process.wait_for_end() + process_stdout = process.get_output() + process_stderr = process.get_error_output() + if process_stdout: + self.logger.trace(f"pip stdout:\n{process_stdout}") + if process_stderr: + self.logger.trace(f"pip stderr:\n{process_stderr}") if process.get_exit_code() != 0: - process_stdout = process.get_output() - process_stderr = process.get_error_output() logs = "" - if len(process_stdout) > 0 and len(process_stderr) > 0: + if process_stdout and process_stderr: logs = f"stdout: {process_stdout}\nstderr: {process_stderr}" - elif len(process_stdout) > 0: + elif process_stdout: logs = process_stdout else: logs = process_stderr - return f'Installation of dependencies "{cmd}" in env {env_name} from {project_dir_path} failed:\n{logs}' + error = f'Installation of dependencies in env {env_name} from {project_dir_path} failed (cmd: {cmd}):\n{logs}' + self.logger.error(error) + return error return None diff --git a/extensions/fine_python_pip/src/fine_python_pip/lock_dependencies_handler.py b/extensions/fine_python_pip/src/fine_python_pip/lock_dependencies_handler.py new file mode 100644 index 00000000..29b502f1 --- /dev/null +++ b/extensions/fine_python_pip/src/fine_python_pip/lock_dependencies_handler.py @@ -0,0 +1,49 @@ +import pathlib + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import lock_dependencies_action +from finecode_extension_api.interfaces import icommandrunner, ilogger + + +class PipLockDependenciesHandler( + code_action.ActionHandler[ + lock_dependencies_action.LockDependenciesAction, + code_action.ActionHandlerConfig, + ] +): + def __init__( + self, + config: code_action.ActionHandlerConfig, + command_runner: icommandrunner.ICommandRunner, + logger: ilogger.ILogger, + ) -> None: + self.config = config + self.command_runner = command_runner + self.logger = logger + + async def run( + self, + payload: lock_dependencies_action.LockDependenciesRunPayload, + run_context: lock_dependencies_action.LockDependenciesRunContext, + ) -> lock_dependencies_action.LockDependenciesRunResult: + src_artifact_def_path = payload.src_artifact_def_path + output_path = payload.output_path + project_dir_path = src_artifact_def_path.parent + + cmd = ( + f"pip lock" + f" -o {output_path}" + ) + + process = await self.command_runner.run(cmd, cwd=project_dir_path) + await process.wait_for_end() + + if process.get_exit_code() != 0: + error_output = process.get_error_output() or process.get_output() + raise code_action.ActionFailedException( + f"pip lock failed for {src_artifact_def_path}:\n{error_output}" + ) + + return lock_dependencies_action.LockDependenciesRunResult( + lock_file_path=pathlib.Path(output_path), + ) diff --git a/extensions/fine_python_pyrefly/fine_python_pyrefly/lint_files_handler.py b/extensions/fine_python_pyrefly/fine_python_pyrefly/lint_files_handler.py index 29560afc..517869c1 100644 --- a/extensions/fine_python_pyrefly/fine_python_pyrefly/lint_files_handler.py +++ b/extensions/fine_python_pyrefly/fine_python_pyrefly/lint_files_handler.py @@ -6,7 +6,7 @@ from pathlib import Path from finecode_extension_api import code_action -from finecode_extension_api.actions import lint_files as lint_files_action +from finecode_extension_api.actions.code_quality import lint_files_action from finecode_extension_api.interfaces import ( icache, icommandrunner, @@ -16,6 +16,7 @@ isrcartifactfileclassifier, iextensionrunnerinfoprovider, ) +from finecode_extension_api.resource_uri import ResourceUri, resource_uri_to_path from fine_python_pyrefly.pyrefly_lsp_service import PyreflyLspService @@ -82,18 +83,19 @@ def __init__( }) async def run_on_single_file( - self, file_path: Path + self, file_uri: ResourceUri ) -> lint_files_action.LintFilesRunResult: - messages = {} + file_path = resource_uri_to_path(file_uri) + messages: dict[ResourceUri, list[lint_files_action.LintMessage]] = {} try: cached_lint_messages = await self.cache.get_file_cache( file_path, self.CACHE_KEY ) - messages[str(file_path)] = cached_lint_messages + messages[file_uri] = cached_lint_messages return lint_files_action.LintFilesRunResult(messages=messages) except icache.CacheMissException: pass - + async with self.file_editor.session( author=self.FILE_OPERATION_AUTHOR ) as session: @@ -107,7 +109,7 @@ async def run_on_single_file( lint_messages = await self.lsp_service.check_file(file_path) - messages[str(file_path)] = lint_messages + messages[file_uri] = lint_messages await self.cache.save_file_cache( file_path, file_version, self.CACHE_KEY, lint_messages ) @@ -119,12 +121,12 @@ async def run( payload: lint_files_action.LintFilesRunPayload, run_context: lint_files_action.LintFilesRunContext, ) -> None: - file_paths = [file_path async for file_path in payload] + file_uris = [file_uri async for file_uri in payload] - for file_path in file_paths: + for file_uri in file_uris: run_context.partial_result_scheduler.schedule( - file_path, - self.run_on_single_file(file_path), + file_uri, + self.run_on_single_file(file_uri), ) async def run_pyrefly_lint_on_single_file( @@ -217,8 +219,8 @@ def map_pyrefly_error_to_lint_message(error: dict) -> lint_files_action.LintMess return lint_files_action.LintMessage( range=lint_files_action.Range( - start=lint_files_action.Position(line=start_line, character=start_column), - end=lint_files_action.Position(line=end_line, character=end_column), + start=lint_files_action.Position(line=start_line - 1, character=start_column), + end=lint_files_action.Position(line=end_line - 1, character=end_column), ), message=error.get("description", ""), code=error_code, diff --git a/extensions/fine_python_pyrefly/fine_python_pyrefly/pyrefly_lsp_service.py b/extensions/fine_python_pyrefly/fine_python_pyrefly/pyrefly_lsp_service.py index ea9cef96..9b91f870 100644 --- a/extensions/fine_python_pyrefly/fine_python_pyrefly/pyrefly_lsp_service.py +++ b/extensions/fine_python_pyrefly/fine_python_pyrefly/pyrefly_lsp_service.py @@ -5,7 +5,7 @@ from typing import override from finecode_extension_api import service -from finecode_extension_api.actions import lint_files as lint_files_action +from finecode_extension_api.actions.code_quality import lint_files_action from finecode_extension_api.interfaces import ifileeditor, ilspclient, ilogger from finecode_extension_api.contrib.lsp_service import LspService, map_diagnostics_to_lint_messages diff --git a/extensions/fine_python_pyrefly/pyproject.toml b/extensions/fine_python_pyrefly/pyproject.toml index 33a1d7e1..9fe3137d 100644 --- a/extensions/fine_python_pyrefly/pyproject.toml +++ b/extensions/fine_python_pyrefly/pyproject.toml @@ -29,3 +29,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_pytest/.gitignore b/extensions/fine_python_pytest/.gitignore new file mode 100644 index 00000000..cbb2a25b --- /dev/null +++ b/extensions/fine_python_pytest/.gitignore @@ -0,0 +1,5 @@ +.venvs +build/ +*.egg-info/ +__pycache__ +finecode_config_dump/ diff --git a/extensions/fine_python_pytest/fine_python_pytest/__init__.py b/extensions/fine_python_pytest/fine_python_pytest/__init__.py new file mode 100644 index 00000000..8dcf518b --- /dev/null +++ b/extensions/fine_python_pytest/fine_python_pytest/__init__.py @@ -0,0 +1,9 @@ +from .list_tests_handler import PytestListTestsHandler, PytestListTestsHandlerConfig +from .run_tests_handler import PytestRunTestsHandler, PytestRunTestsHandlerConfig + +__all__ = [ + "PytestListTestsHandler", + "PytestListTestsHandlerConfig", + "PytestRunTestsHandler", + "PytestRunTestsHandlerConfig", +] diff --git a/finecode_extension_api/src/finecode_extension_api/actions/check_formatting.py b/extensions/fine_python_pytest/fine_python_pytest/py.typed similarity index 100% rename from finecode_extension_api/src/finecode_extension_api/actions/check_formatting.py rename to extensions/fine_python_pytest/fine_python_pytest/py.typed diff --git a/extensions/fine_python_pytest/pyproject.toml b/extensions/fine_python_pytest/pyproject.toml new file mode 100644 index 00000000..79cd5996 --- /dev/null +++ b/extensions/fine_python_pytest/pyproject.toml @@ -0,0 +1,32 @@ +[project] +name = "fine_python_pytest" +version = "0.1.0a0" +description = "" +authors = [{ name = "Vladyslav Hnatiuk", email = "aders1234@gmail.com" }] +readme = "README.md" +requires-python = ">=3.11, <= 3.14" +dependencies = [ + "finecode_extension_api~=0.4.0a0", + "pytest>=7.0.0,<9.0.0", + "pytest-json-report>=1.5.0", +] + +[dependency-groups] +dev_workspace = ["finecode~=0.4.0a0", "finecode_dev_common_preset~=0.3.0a0"] + +[tool.finecode] +presets = [{ source = "finecode_dev_common_preset" }] + +[tool.finecode.env.dev_workspace.dependencies] +finecode_dev_common_preset = { path = "../../finecode_dev_common_preset", editable = true } +finecode = { path = "../../", editable = true } +finecode_extension_runner = { path = "../../finecode_extension_runner", editable = true } +finecode_extension_api = { path = "../../finecode_extension_api", editable = true } +finecode_jsonrpc = { path = "../../finecode_jsonrpc", editable = true } +finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable = true } +fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } +fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } +fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_ruff/fine_python_ruff/format_files_handler.py b/extensions/fine_python_ruff/fine_python_ruff/format_files_handler.py index 82ecd485..97b21cb9 100644 --- a/extensions/fine_python_ruff/fine_python_ruff/format_files_handler.py +++ b/extensions/fine_python_ruff/fine_python_ruff/format_files_handler.py @@ -12,13 +12,16 @@ from typing import override from finecode_extension_api import code_action -from finecode_extension_api.actions import format_files as format_files_action +from finecode_extension_api.actions.code_quality import format_files_action from finecode_extension_api.interfaces import ( icache, icommandrunner, ilogger, iextensionrunnerinfoprovider, + iprojectinfoprovider, ) +from finecode_extension_api.resource_uri import ResourceUri, resource_uri_to_path +from fine_python_ruff.ruff_lsp_service import RuffLspService @dataclasses.dataclass @@ -28,6 +31,7 @@ class RuffFormatFilesHandlerConfig(code_action.ActionHandlerConfig): quote_style: str = "double" # "double" or "single" target_version: str = "py38" # minimum Python version preview: bool = False + use_cli: bool = False class RuffFormatFilesHandler( @@ -42,35 +46,63 @@ def __init__( logger: ilogger.ILogger, cache: icache.ICache, command_runner: icommandrunner.ICommandRunner, + project_info_provider: iprojectinfoprovider.IProjectInfoProvider, + lsp_service: RuffLspService, ) -> None: self.config = config self.logger = logger self.cache = cache self.command_runner = command_runner self.extension_runner_info_provider = extension_runner_info_provider + self.project_info_provider = project_info_provider + self.lsp_service = lsp_service self.ruff_bin_path = Path(sys.executable).parent / "ruff" + if not self.config.use_cli: + # reference: https://docs.astral.sh/ruff/editors/settings/ + format_settings: dict[str, object] = {} + if self.config.preview: + format_settings["preview"] = True + settings: dict[str, object] = { + "lineLength": self.config.line_length, + "targetVersion": self.config.target_version, + } + if format_settings: + settings["format"] = format_settings + self.lsp_service.update_settings(settings) + @override async def run( self, payload: format_files_action.FormatFilesRunPayload, run_context: format_files_action.FormatFilesRunContext, ) -> format_files_action.FormatFilesRunResult: - result_by_file_path: dict[Path, format_files_action.FormatRunFileResult] = {} - for file_path in payload.file_paths: - file_content, file_version = run_context.file_info_by_path[file_path] - - new_file_content, file_changed = await self.format_one( - file_path, file_content - ) + if not self.config.use_cli: + root_uri = self.project_info_provider.get_current_project_dir_path().as_uri() + await self.lsp_service.ensure_started(root_uri) + + result_by_file_path: dict[ResourceUri, format_files_action.FormatRunFileResult] = {} + for file_uri in payload.file_paths: + file_path = resource_uri_to_path(file_uri) + file_content, file_version = run_context.file_info_by_path[file_uri] + + if self.config.use_cli: + new_file_content, file_changed = await self.format_one_cli( + file_path, file_content + ) + else: + new_file_content = await self.lsp_service.format_file( + file_path, file_content + ) + file_changed = new_file_content != file_content # save for next handlers - run_context.file_info_by_path[file_path] = format_files_action.FileInfo( + run_context.file_info_by_path[file_uri] = format_files_action.FileInfo( new_file_content, file_version ) - result_by_file_path[file_path] = format_files_action.FormatRunFileResult( + result_by_file_path[file_uri] = format_files_action.FormatRunFileResult( changed=file_changed, code=new_file_content ) @@ -78,8 +110,8 @@ async def run( result_by_file_path=result_by_file_path ) - async def format_one(self, file_path: Path, file_content: str) -> tuple[str, bool]: - """Format a single file using ruff format""" + async def format_one_cli(self, file_path: Path, file_content: str) -> tuple[str, bool]: + """Format a single file using ruff format CLI""" # Build ruff format command cmd = [ str(self.ruff_bin_path), diff --git a/extensions/fine_python_ruff/fine_python_ruff/lint_files_handler.py b/extensions/fine_python_ruff/fine_python_ruff/lint_files_handler.py index e984982a..238d5ac9 100644 --- a/extensions/fine_python_ruff/fine_python_ruff/lint_files_handler.py +++ b/extensions/fine_python_ruff/fine_python_ruff/lint_files_handler.py @@ -6,7 +6,7 @@ from pathlib import Path from finecode_extension_api import code_action -from finecode_extension_api.actions import lint_files as lint_files_action +from finecode_extension_api.actions.code_quality import lint_files_action from finecode_extension_api.interfaces import ( icache, icommandrunner, @@ -14,6 +14,7 @@ ifileeditor, iprojectinfoprovider, ) +from finecode_extension_api.resource_uri import ResourceUri, resource_uri_to_path from fine_python_ruff.ruff_lsp_service import RuffLspService @@ -75,14 +76,15 @@ def __init__( }) async def run_on_single_file( - self, file_path: Path + self, file_uri: ResourceUri ) -> lint_files_action.LintFilesRunResult: - messages = {} + file_path = resource_uri_to_path(file_uri) + messages: dict[ResourceUri, list[lint_files_action.LintMessage]] = {} try: cached_lint_messages = await self.cache.get_file_cache( file_path, self.CACHE_KEY ) - messages[str(file_path)] = cached_lint_messages + messages[file_uri] = cached_lint_messages return lint_files_action.LintFilesRunResult(messages=messages) except icache.CacheMissException: pass @@ -101,7 +103,7 @@ async def run_on_single_file( await self.lsp_service.ensure_started(root_uri) lint_messages = await self.lsp_service.check_file(file_path) - messages[str(file_path)] = lint_messages + messages[file_uri] = lint_messages await self.cache.save_file_cache( file_path, file_version, self.CACHE_KEY, lint_messages ) @@ -113,12 +115,12 @@ async def run( payload: lint_files_action.LintFilesRunPayload, run_context: lint_files_action.LintFilesRunContext, ) -> None: - file_paths = [file_path async for file_path in payload] + file_uris = [file_uri async for file_uri in payload] - for file_path in file_paths: + for file_uri in file_uris: run_context.partial_result_scheduler.schedule( - file_path, - self.run_on_single_file(file_path), + file_uri, + self.run_on_single_file(file_uri), ) async def run_ruff_lint_on_single_file( @@ -201,8 +203,8 @@ def map_ruff_violation_to_lint_message( return lint_files_action.LintMessage( range=lint_files_action.Range( - start=lint_files_action.Position(line=start_line, character=start_column), - end=lint_files_action.Position(line=end_line, character=end_column), + start=lint_files_action.Position(line=start_line - 1, character=start_column), + end=lint_files_action.Position(line=end_line - 1, character=end_column), ), message=violation.get("message", ""), code=code, diff --git a/extensions/fine_python_ruff/fine_python_ruff/ruff_lsp_service.py b/extensions/fine_python_ruff/fine_python_ruff/ruff_lsp_service.py index c4d69a80..6f08c191 100644 --- a/extensions/fine_python_ruff/fine_python_ruff/ruff_lsp_service.py +++ b/extensions/fine_python_ruff/fine_python_ruff/ruff_lsp_service.py @@ -5,9 +5,9 @@ from typing import override from finecode_extension_api import service -from finecode_extension_api.actions import lint_files as lint_files_action +from finecode_extension_api.actions.code_quality import lint_files_action from finecode_extension_api.interfaces import ifileeditor, ilspclient, ilogger -from finecode_extension_api.contrib.lsp_service import LspService, map_diagnostics_to_lint_messages +from finecode_extension_api.contrib.lsp_service import LspService, map_diagnostics_to_lint_messages, apply_text_edits class RuffLspService(service.DisposableService): @@ -52,3 +52,15 @@ async def check_file( return map_diagnostics_to_lint_messages( raw_diagnostics, default_source="ruff" ) + + async def format_file( + self, + file_path: Path, + file_content: str, + timeout: float = 30.0, + ) -> str: + """Format a file via LSP and return the formatted content.""" + raw_edits = await self._lsp_service.format_file(file_path, file_content, timeout=timeout) + if not raw_edits: + return file_content + return apply_text_edits(file_content, raw_edits) diff --git a/extensions/fine_python_ruff/pyproject.toml b/extensions/fine_python_ruff/pyproject.toml index be63222e..b48824a7 100644 --- a/extensions/fine_python_ruff/pyproject.toml +++ b/extensions/fine_python_ruff/pyproject.toml @@ -29,3 +29,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_setuptools_scm/fine_python_setuptools_scm/get_src_artifact_version_setuptools_scm_handler.py b/extensions/fine_python_setuptools_scm/fine_python_setuptools_scm/get_src_artifact_version_setuptools_scm_handler.py index 971a1bcb..a598bd53 100644 --- a/extensions/fine_python_setuptools_scm/fine_python_setuptools_scm/get_src_artifact_version_setuptools_scm_handler.py +++ b/extensions/fine_python_setuptools_scm/fine_python_setuptools_scm/get_src_artifact_version_setuptools_scm_handler.py @@ -4,9 +4,7 @@ from setuptools_scm._get_version_impl import _get_version from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - get_src_artifact_version as get_src_artifact_version_action, -) +from finecode_extension_api.actions.artifact import get_src_artifact_version_action from finecode_extension_api.interfaces import iprojectinfoprovider, ilogger diff --git a/extensions/fine_python_setuptools_scm/pyproject.toml b/extensions/fine_python_setuptools_scm/pyproject.toml index 32c21709..54efb02f 100644 --- a/extensions/fine_python_setuptools_scm/pyproject.toml +++ b/extensions/fine_python_setuptools_scm/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_virtualenv/pyproject.toml b/extensions/fine_python_virtualenv/pyproject.toml index 24406b27..67def704 100644 --- a/extensions/fine_python_virtualenv/pyproject.toml +++ b/extensions/fine_python_virtualenv/pyproject.toml @@ -26,3 +26,6 @@ finecode_builtin_handlers = { path = "../../finecode_builtin_handlers", editable fine_python_recommended = { path = "../../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../../presets/fine_python_lint", editable = true } fine_python_format = { path = "../../presets/fine_python_format", editable = true } +fine_python_test = { path = "../../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../fine_python_virtualenv", editable = true } diff --git a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/__init__.py b/extensions/fine_python_virtualenv/src/fine_python_virtualenv/__init__.py index bd3eaab1..7abb9e6c 100644 --- a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/__init__.py +++ b/extensions/fine_python_virtualenv/src/fine_python_virtualenv/__init__.py @@ -1,4 +1,5 @@ -from .prepare_envs_handler import VirtualenvPrepareEnvHandler -from .prepare_runners_handler import VirtualenvPrepareRunnersHandler +from .create_env_handler import VirtualenvCreateEnvHandler -__all__ = ["VirtualenvPrepareEnvHandler", "VirtualenvPrepareRunnersHandler"] +__all__ = [ + "VirtualenvCreateEnvHandler", +] diff --git a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/create_env_handler.py b/extensions/fine_python_virtualenv/src/fine_python_virtualenv/create_env_handler.py new file mode 100644 index 00000000..f14524c1 --- /dev/null +++ b/extensions/fine_python_virtualenv/src/fine_python_virtualenv/create_env_handler.py @@ -0,0 +1,58 @@ +import dataclasses + +import virtualenv + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import create_env_action +from finecode_extension_api.actions.environments.create_envs_action import CreateEnvsRunResult +from finecode_extension_api.interfaces import ifilemanager, ilogger + + +@dataclasses.dataclass +class VirtualenvCreateEnvHandlerConfig(code_action.ActionHandlerConfig): ... + + +class VirtualenvCreateEnvHandler( + code_action.ActionHandler[ + create_env_action.CreateEnvAction, VirtualenvCreateEnvHandlerConfig + ] +): + def __init__( + self, + config: VirtualenvCreateEnvHandlerConfig, + logger: ilogger.ILogger, + file_manager: ifilemanager.IFileManager, + ) -> None: + self.config = config + self.logger = logger + self.file_manager = file_manager + + async def run( + self, + payload: create_env_action.CreateEnvRunPayload, + run_context: create_env_action.CreateEnvRunContext, + ) -> CreateEnvsRunResult: + env_info = payload.env + if payload.recreate and env_info.venv_dir_path.exists(): + self.logger.debug(f"Remove virtualenv dir {env_info.venv_dir_path}") + await self.file_manager.remove_dir(env_info.venv_dir_path) + + self.logger.info(f"Creating virtualenv {env_info.venv_dir_path}") + if not env_info.venv_dir_path.exists(): + try: + virtualenv.cli_run( + [env_info.venv_dir_path.as_posix()], + options=None, + setup_logging=False, + env=None, + ) + except Exception as exc: + return CreateEnvsRunResult( + errors=[ + f"Failed to create virtualenv {env_info.venv_dir_path}: {exc}" + ] + ) + else: + self.logger.info(f"Virtualenv in {env_info} exists already") + + return CreateEnvsRunResult(errors=[]) diff --git a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/prepare_envs_handler.py b/extensions/fine_python_virtualenv/src/fine_python_virtualenv/prepare_envs_handler.py deleted file mode 100644 index e0a62b56..00000000 --- a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/prepare_envs_handler.py +++ /dev/null @@ -1,54 +0,0 @@ -import dataclasses - -import virtualenv - -from finecode_extension_api import code_action -from finecode_extension_api.actions import prepare_envs as prepare_envs_action -from finecode_extension_api.interfaces import ifilemanager, ilogger - - -@dataclasses.dataclass -class VirtualenvPrepareEnvHandlerConfig(code_action.ActionHandlerConfig): ... - - -class VirtualenvPrepareEnvHandler( - code_action.ActionHandler[ - prepare_envs_action.PrepareEnvsAction, VirtualenvPrepareEnvHandlerConfig - ] -): - def __init__( - self, - config: VirtualenvPrepareEnvHandlerConfig, - logger: ilogger.ILogger, - file_manager: ifilemanager.IFileManager, - ) -> None: - self.config = config - self.logger = logger - self.file_manager = file_manager - - async def run( - self, - payload: prepare_envs_action.PrepareEnvsRunPayload, - run_context: prepare_envs_action.PrepareEnvsRunContext, - ) -> prepare_envs_action.PrepareEnvsRunResult: - # create virtual envs - - # would it be faster parallel? - for env_info in payload.envs: - if payload.recreate and env_info.venv_dir_path.exists(): - self.logger.debug(f"Remove virtualenv dir {env_info.venv_dir_path}") - await self.file_manager.remove_dir(env_info.venv_dir_path) - - self.logger.info(f"Creating virtualenv {env_info.venv_dir_path}") - if not env_info.venv_dir_path.exists(): - # TODO: '-p ' - virtualenv.cli_run( - [env_info.venv_dir_path.as_posix()], - options=None, - setup_logging=False, - env=None, - ) - else: - self.logger.info(f"Virtualenv in {env_info} exists already") - - return prepare_envs_action.PrepareEnvsRunResult(errors=[]) diff --git a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/prepare_runners_handler.py b/extensions/fine_python_virtualenv/src/fine_python_virtualenv/prepare_runners_handler.py deleted file mode 100644 index c546e911..00000000 --- a/extensions/fine_python_virtualenv/src/fine_python_virtualenv/prepare_runners_handler.py +++ /dev/null @@ -1,55 +0,0 @@ -import dataclasses - -import virtualenv - -from finecode_extension_api import code_action -from finecode_extension_api.actions import prepare_runners as prepare_runners_action -from finecode_extension_api.interfaces import ifilemanager, ilogger - - -@dataclasses.dataclass -class VirtualenvPrepareRunnersHandlerConfig(code_action.ActionHandlerConfig): ... - - -class VirtualenvPrepareRunnersHandler( - code_action.ActionHandler[ - prepare_runners_action.PrepareRunnersAction, - VirtualenvPrepareRunnersHandlerConfig, - ] -): - def __init__( - self, - config: VirtualenvPrepareRunnersHandlerConfig, - logger: ilogger.ILogger, - file_manager: ifilemanager.IFileManager, - ) -> None: - self.config = config - self.logger = logger - self.file_manager = file_manager - - async def run( - self, - payload: prepare_runners_action.PrepareRunnersRunPayload, - run_context: prepare_runners_action.PrepareRunnersRunContext, - ) -> prepare_runners_action.PrepareRunnersRunResult: - # create virtual envs - - # would it be faster parallel? - for env_info in payload.envs: - if payload.recreate and env_info.venv_dir_path.exists(): - self.logger.debug(f"Remove virtualenv dir {env_info.venv_dir_path}") - await self.file_manager.remove_dir(env_info.venv_dir_path) - - self.logger.info(f"Creating virtualenv {env_info.venv_dir_path}") - if not env_info.venv_dir_path.exists(): - # TODO: '-p ' - virtualenv.cli_run( - [env_info.venv_dir_path.as_posix()], - options=None, - setup_logging=False, - env=None, - ) - else: - self.logger.info(f"Virtualenv in {env_info} exists already") - - return prepare_runners_action.PrepareRunnersRunResult(errors=[]) diff --git a/finecode_builtin_handlers/pyproject.toml b/finecode_builtin_handlers/pyproject.toml index f112084f..40129c00 100644 --- a/finecode_builtin_handlers/pyproject.toml +++ b/finecode_builtin_handlers/pyproject.toml @@ -20,6 +20,9 @@ finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = fine_python_recommended = { path = "../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../presets/fine_python_lint", editable = true } fine_python_format = { path = "../presets/fine_python_format", editable = true } +fine_python_test = { path = "../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../extensions/fine_python_virtualenv", editable = true } [tool.finecode] presets = [{ source = "finecode_dev_common_preset" }] diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/__init__.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/__init__.py index 8262ba04..ab9be2df 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/__init__.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/__init__.py @@ -1,31 +1,41 @@ """FineCode Built-in handlers.""" from .clean_finecode_logs import CleanFinecodeLogsHandler +from .create_envs_discover_envs import CreateEnvsDiscoverEnvsHandler +from .create_envs_dispatch import CreateEnvsDispatchHandler from .dump_config import DumpConfigHandler from .dump_config_save import DumpConfigSaveHandler from .format import FormatHandler +from .format_files_dispatch import FormatFilesDispatchHandler from .format_files_save_handler import SaveFormatFilesHandler from .init_repository_provider import InitRepositoryProviderHandler -from .lint import LintHandler -from .prepare_envs_install_deps import PrepareEnvsInstallDepsHandler -from .prepare_envs_read_configs import PrepareEnvsReadConfigsHandler -from .prepare_runners_install_runner_and_presets import ( - PrepareRunnersInstallRunnerAndPresetsHandler, +from .install_env_install_deps import InstallEnvInstallDepsHandler +from .install_env_install_deps_from_lock import ( + InstallEnvInstallDepsFromLockHandler, ) -from .prepare_runners_read_configs import PrepareRunnersReadConfigsHandler +from .install_env_read_config import InstallEnvReadConfigHandler +from .install_envs_discover_envs import InstallEnvsDiscoverEnvsHandler +from .install_envs_dispatch import InstallEnvsDispatchHandler +from .lint import LintHandler +from .lint_files_dispatch import LintFilesDispatchHandler from .publish_artifact import PublishArtifactHandler __all__ = [ "CleanFinecodeLogsHandler", + "CreateEnvsDiscoverEnvsHandler", + "CreateEnvsDispatchHandler", "DumpConfigHandler", + "DumpConfigSaveHandler", + "FormatFilesDispatchHandler", "FormatHandler", "InitRepositoryProviderHandler", + "InstallEnvInstallDepsHandler", + "InstallEnvInstallDepsFromLockHandler", + "InstallEnvReadConfigHandler", + "InstallEnvsDiscoverEnvsHandler", + "InstallEnvsDispatchHandler", + "LintFilesDispatchHandler", "LintHandler", - "PrepareEnvsInstallDepsHandler", - "PrepareEnvsReadConfigsHandler", - "PrepareRunnersInstallRunnerAndPresetsHandler", - "PrepareRunnersReadConfigsHandler", - "DumpConfigSaveHandler", "PublishArtifactHandler", - "SaveFormatFilesHandler" + "SaveFormatFilesHandler", ] diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/clean_finecode_logs.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/clean_finecode_logs.py index 2f1c1984..0826fd95 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/clean_finecode_logs.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/clean_finecode_logs.py @@ -1,9 +1,8 @@ +# docs: docs/reference/actions.md import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - clean_finecode_logs as clean_finecode_logs_action, -) +from finecode_extension_api.actions.system import clean_finecode_logs_action from finecode_extension_api.interfaces import ilogger, iextensionrunnerinfoprovider diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/create_envs_discover_envs.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/create_envs_discover_envs.py new file mode 100644 index 00000000..4703b72a --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/create_envs_discover_envs.py @@ -0,0 +1,66 @@ +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import create_envs_action +from finecode_extension_api.interfaces import ( + iextensionrunnerinfoprovider, + ilogger, + iprojectinfoprovider, +) +from finecode_extension_api.resource_uri import path_to_resource_uri + + +@dataclasses.dataclass +class CreateEnvsDiscoverEnvsHandlerConfig(code_action.ActionHandlerConfig): ... + + +class CreateEnvsDiscoverEnvsHandler( + code_action.ActionHandler[ + create_envs_action.CreateEnvsAction, CreateEnvsDiscoverEnvsHandlerConfig + ] +): + """Discover and populate run_context.envs from the current project's config. + + If payload.envs is already non-empty (explicit caller), those envs are + used as-is — the caller is responsible for any filtering. + Otherwise all envs defined in ``dependency-groups`` are discovered. + """ + + def __init__( + self, + project_info_provider: iprojectinfoprovider.IProjectInfoProvider, + runner_info_provider: iextensionrunnerinfoprovider.IExtensionRunnerInfoProvider, + logger: ilogger.ILogger, + ) -> None: + self.project_info_provider = project_info_provider + self.runner_info_provider = runner_info_provider + self.logger = logger + + async def run( + self, + payload: create_envs_action.CreateEnvsRunPayload, + run_context: create_envs_action.CreateEnvsRunContext, + ) -> create_envs_action.CreateEnvsRunResult: + if payload.envs: + envs = list(payload.envs) + else: + project_def_path = self.project_info_provider.get_current_project_def_path() + project_raw_config = ( + await self.project_info_provider.get_current_project_raw_config() + ) + deps_groups = project_raw_config.get("dependency-groups", {}) + + envs = [ + create_envs_action.EnvInfo( + name=env_name, + venv_dir_path=path_to_resource_uri( + self.runner_info_provider.get_venv_dir_path_of_env(env_name) + ), + project_def_path=path_to_resource_uri(project_def_path), + ) + for env_name in deps_groups + ] + + self.logger.debug(f"Discovered envs for creation: {[e.name for e in envs]}") + run_context.envs = envs + return create_envs_action.CreateEnvsRunResult(errors=[]) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/create_envs_dispatch.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/create_envs_dispatch.py new file mode 100644 index 00000000..e8f10b64 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/create_envs_dispatch.py @@ -0,0 +1,65 @@ +import asyncio +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import ( + create_env_action, + create_envs_action, +) +from finecode_extension_api.interfaces import iactionrunner, ilogger + + +@dataclasses.dataclass +class CreateEnvsDispatchHandlerConfig(code_action.ActionHandlerConfig): ... + + +class CreateEnvsDispatchHandler( + code_action.ActionHandler[ + create_envs_action.CreateEnvsAction, CreateEnvsDispatchHandlerConfig + ] +): + """Dispatch a create_env call per environment concurrently.""" + + def __init__( + self, action_runner: iactionrunner.IActionRunner, logger: ilogger.ILogger + ) -> None: + self.action_runner = action_runner + self.logger = logger + + async def run( + self, + payload: create_envs_action.CreateEnvsRunPayload, + run_context: create_envs_action.CreateEnvsRunContext, + ) -> create_envs_action.CreateEnvsRunResult: + if run_context.envs is None: + raise code_action.ActionFailedException( + "envs must be either provided in payload or be discovered by previous `create_envs` handlers" + ) + + create_env_action_instance = self.action_runner.get_action_by_source( + create_env_action.CreateEnvAction, + ) + + tasks: list[asyncio.Task[create_envs_action.CreateEnvsRunResult]] = [] + try: + async with asyncio.TaskGroup() as tg: + for env in run_context.envs: + task = tg.create_task( + self.action_runner.run_action( + action=create_env_action_instance, + payload=create_env_action.CreateEnvRunPayload( + env=env, + recreate=payload.recreate, + ), + meta=run_context.meta, + ) + ) + tasks.append(task) + except ExceptionGroup as eg: + error_str = ". ".join([str(e) for e in eg.exceptions]) + raise code_action.ActionFailedException(error_str) from eg + + errors: list[str] = [] + for task in tasks: + errors += task.result().errors + return create_envs_action.CreateEnvsRunResult(errors=errors) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/dependency_config_utils.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/dependency_config_utils.py index f42e8743..e05d577c 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/dependency_config_utils.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/dependency_config_utils.py @@ -96,8 +96,49 @@ def raw_dep_to_dep_dict(raw_dep: str, env_deps_config: dict) -> dict[str, str | return dep_dict +def process_raw_deps( + raw_deps: list, + env_deps_config: dict, + dependencies: list, + deps_groups: dict, + project_def_path: pathlib.Path, + _seen: set[str] | None = None, +) -> None: + if _seen is None: + _seen = set() + for raw_dep in raw_deps: + if isinstance(raw_dep, str): + name = get_dependency_name(raw_dep) + if name in _seen: + continue + _seen.add(name) + dep_config = env_deps_config.get(name, {}) + editable = dep_config.get("editable", False) + if editable and (raw_path := dep_config.get("path")): + resolved = pathlib.Path(raw_path) + if not resolved.is_absolute(): + resolved = (project_def_path.parent / resolved).resolve() + version_or_source = f" @ file://{resolved.as_posix()}" + else: + version_or_source = raw_dep[len(name):] + dependencies.append( + { + "name": name, + "version_or_source": version_or_source, + "editable": editable, + } + ) + elif isinstance(raw_dep, dict) and "include-group" in raw_dep: + included_group_deps = deps_groups.get(raw_dep["include-group"], []) + process_raw_deps( + included_group_deps, env_deps_config, dependencies, deps_groups, + project_def_path, _seen, + ) + + __all__ = [ "make_project_config_pip_compatible", "get_dependency_name", + "process_raw_deps", "raw_dep_to_dep_dict", ] diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config.py index a8b611c7..fdaf7de4 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config.py @@ -1,7 +1,8 @@ +# docs: docs/reference/actions.md import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import dump_config as dump_config_action +from finecode_extension_api.actions.system import dump_config_action @dataclasses.dataclass diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config_save.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config_save.py index 2d531c24..fdcb6419 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config_save.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/dump_config_save.py @@ -1,9 +1,10 @@ +# docs: docs/reference/actions.md import dataclasses import tomlkit from finecode_extension_api import code_action -from finecode_extension_api.actions import dump_config as dump_config_action +from finecode_extension_api.actions.system import dump_config_action from finecode_extension_api.interfaces import ifilemanager, ifileeditor diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/format.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/format.py index 5f20c491..0a5da2cc 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/format.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/format.py @@ -1,19 +1,11 @@ -import asyncio +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - format as format_action, - format_files as format_files_action, - list_src_artifact_files_by_lang as list_src_artifact_files_by_lang_action, - group_src_artifact_files_by_lang as group_src_artifact_files_by_lang_action, -) -from finecode_extension_api.interfaces import ( - iactionrunner, - ifileeditor, - ilogger, -) +from finecode_extension_api.actions.artifact import list_src_artifact_files_by_lang_action +from finecode_extension_api.actions.code_quality import format_action, format_files_action +from finecode_extension_api.interfaces import iactionrunner, ifileeditor, ilogger +from finecode_extension_api.resource_uri import ResourceUri, path_to_resource_uri @dataclasses.dataclass @@ -37,119 +29,51 @@ async def run( self, payload: format_action.FormatRunPayload, run_context: format_action.FormatRunContext, - ) -> format_action.FormatRunResult: - files_by_lang: dict[str, list[pathlib.Path]] = {} - - # first get languages for which formatters are available, they change rarely - # only on project config change - all_actions = self.action_runner.get_actions_names() - format_files_prefix = "format_files_" - format_files_actions = [ - action_name - for action_name in all_actions - if action_name.startswith(format_files_prefix) - ] - # TODO: ordered set? - # TODO: cache and update on project config change - langs_supported_by_format = list( - set( - [ - action_name[len(format_files_prefix) :] - for action_name in format_files_actions - ] - ) - ) + ): run_meta = run_context.meta + file_uris: list[ResourceUri] if payload.target == format_action.FormatTarget.PROJECT: if ( run_meta.dev_env == code_action.DevEnv.IDE and run_meta.trigger == code_action.RunActionTrigger.SYSTEM ): - # performance optimization: if IDE automatically(=`trigger == SYSTEM`) - # tries to format the whole project, format only files owned by IDE(usually - # these are opened files). - # In future it could be improved by formatting opened files + dependencies - # or e.g. files changed according to git + dependencies. - files_to_format: list[pathlib.Path] = self.file_editor.get_opened_files() - group_project_files_action = self.action_runner.get_action_by_name( - "group_src_artifact_files_by_lang", group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangAction - ) - group_src_artifact_files_by_lang_payload = group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunPayload( - file_paths=files_to_format, langs=langs_supported_by_format - ) - files_by_lang_result = await self.action_runner.run_action( - action=group_project_files_action, - payload=group_src_artifact_files_by_lang_payload, - meta=run_meta - ) - files_by_lang = files_by_lang_result.files_by_lang + # Performance optimisation: when the IDE triggers a background project + # format automatically, only format the currently opened files. + file_uris = [ + path_to_resource_uri(p) + for p in self.file_editor.get_opened_files() + ] else: - # not automatic check of IDE, format the whole project. - # Instead of getting all files in the project and then grouping them by - # language, use `list_src_artifact_files_by_lang_action` action which returns - # only files with supported languages - list_src_artifact_file_by_lang_action_instance = ( - self.action_runner.get_action_by_name("list_src_artifact_files_by_lang", list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangAction) - ) - list_src_artifact_files_by_lang_payload = ( - list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangRunPayload( - langs=langs_supported_by_format - ) + list_action = self.action_runner.get_action_by_source( + list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangAction, ) files_by_lang_result = await self.action_runner.run_action( - action=list_src_artifact_file_by_lang_action_instance, - payload=list_src_artifact_files_by_lang_payload, - meta=run_meta + action=list_action, + payload=list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangRunPayload( + langs=None + ), + meta=run_meta, ) - files_by_lang = files_by_lang_result.files_by_lang - + file_uris = [ + f + for files in files_by_lang_result.files_by_lang.values() + for f in files + ] else: - # format target are files, format them - files_to_format = payload.file_paths - group_src_artifact_files_by_lang_action_instance = ( - self.action_runner.get_action_by_name("group_src_artifact_files_by_lang", group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangAction) - ) - group_src_artifact_files_by_lang_payload = ( - group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunPayload( - file_paths=files_to_format, langs=langs_supported_by_format - ) - ) - files_by_lang_result = await self.action_runner.run_action( - action=group_src_artifact_files_by_lang_action_instance, - payload=group_src_artifact_files_by_lang_payload, - meta=run_meta - ) - files_by_lang = files_by_lang_result.files_by_lang - - # TODO: handle errors - format_tasks = [] - try: - async with asyncio.TaskGroup() as tg: - for lang, lang_files in files_by_lang.items(): - # TODO: handle errors - # TODO: handle KeyError? - action = self.action_runner.get_action_by_name( - format_files_prefix + lang, format_files_action.FormatFilesAction - ) - format_files_payload = format_files_action.FormatFilesRunPayload( - file_paths=lang_files, save=payload.save - ) - format_task = tg.create_task( - self.action_runner.run_action( - action=action, payload=format_files_payload, meta=run_meta - ) - ) - format_tasks.append(format_task) - except ExceptionGroup as eg: - error_str = ". ".join([str(exception) for exception in eg.exceptions]) - raise code_action.ActionFailedException(error_str) from eg + file_uris = payload.file_paths - format_results = [task.result() for task in format_tasks] - if len(format_results) > 0: - result = format_action.FormatRunResult(result_by_file_path={}) - for subresult in format_results: - result.update(subresult) - return result - else: - return format_action.FormatRunResult(result_by_file_path={}) + format_files_action_instance = self.action_runner.get_action_by_source( + format_files_action.FormatFilesAction + ) + async for partial in self.action_runner.run_action_iter( + action=format_files_action_instance, + payload=format_files_action.FormatFilesRunPayload( + file_paths=file_uris, + save=payload.save, + ), + meta=run_meta, + ): + yield format_action.FormatRunResult( + result_by_file_path=partial.result_by_file_path + ) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_dispatch.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_dispatch.py new file mode 100644 index 00000000..2989eb29 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_dispatch.py @@ -0,0 +1,88 @@ +import asyncio +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.artifact import group_src_artifact_files_by_lang_action +from finecode_extension_api.actions.code_quality import format_files_action +from finecode_extension_api.interfaces import iactionrunner, ilogger + + +@dataclasses.dataclass +class FormatFilesDispatchHandlerConfig(code_action.ActionHandlerConfig): ... + + +class FormatFilesDispatchHandler( + code_action.ActionHandler[ + format_files_action.FormatFilesAction, + FormatFilesDispatchHandlerConfig, + ] +): + """Group files by language once and dispatch to format_{lang}_files subactions sequentially. + + Subaction names follow the convention: language "python" maps to "format_python_files", + "javascript" maps to "format_javascript_files", etc. Each subaction must be registered + in the project config. + """ + + def __init__( + self, + action_runner: iactionrunner.IActionRunner, + logger: ilogger.ILogger, + ) -> None: + self.action_runner = action_runner + self.logger = logger + + async def run( + self, + payload: format_files_action.FormatFilesRunPayload, + run_context: format_files_action.FormatFilesRunContext, + ) -> format_files_action.FormatFilesRunResult: + subactions_by_lang = self.action_runner.get_actions_for_parent( + format_files_action.FormatFilesAction + ) + + if not subactions_by_lang: + self.logger.debug("FormatFilesDispatchHandler: no language subactions registered") + return format_files_action.FormatFilesRunResult(result_by_file_path={}) + + # Group files by language — single pass, O(files). + group_action = self.action_runner.get_action_by_source( + group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangAction, + ) + files_by_lang_result = await self.action_runner.run_action( + action=group_action, + payload=group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunPayload( + file_paths=payload.file_paths, + langs=list(subactions_by_lang.keys()), + ), + meta=run_context.meta, + ) + files_by_lang = files_by_lang_result.files_by_lang + + # Dispatch sequentially (format actions modify files on disk). + format_tasks: list[asyncio.Task[format_files_action.FormatFilesRunResult]] = [] + try: + async with asyncio.TaskGroup() as tg: + for lang, files in files_by_lang.items(): + if not files: + continue + format_tasks.append( + tg.create_task( + self.action_runner.run_action( + action=subactions_by_lang[lang], + payload=format_files_action.FormatFilesRunPayload( + file_paths=files, + save=payload.save, + ), + meta=run_context.meta, + ) + ) + ) + except ExceptionGroup as eg: + error_str = ". ".join([str(e) for e in eg.exceptions]) + raise code_action.ActionFailedException(error_str) from eg + + result = format_files_action.FormatFilesRunResult(result_by_file_path={}) + for task in format_tasks: + result.update(task.result()) + return result diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_save_handler.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_save_handler.py index 7db75b1d..22064069 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_save_handler.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/format_files_save_handler.py @@ -1,8 +1,10 @@ +# docs: docs/reference/actions.md import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import format_files as format_files_actions +from finecode_extension_api.actions.code_quality import format_files_action from finecode_extension_api.interfaces import ifileeditor, ilogger +from finecode_extension_api.resource_uri import resource_uri_to_path @dataclasses.dataclass @@ -11,7 +13,7 @@ class SaveFormatFilesHandlerConfig(code_action.ActionHandlerConfig): ... class SaveFormatFilesHandler( code_action.ActionHandler[ - format_files_actions.FormatFilesAction, SaveFormatFilesHandlerConfig + format_files_action.FormatFilesAction, SaveFormatFilesHandlerConfig ] ): FILE_OPERATION_AUTHOR = ifileeditor.FileOperationAuthor(id="SaveFormatFilesHandler") @@ -24,28 +26,29 @@ def __init__( async def run( self, - payload: format_files_actions.FormatFilesRunPayload, - run_context: format_files_actions.FormatFilesRunContext, - ) -> format_files_actions.FormatFilesRunResult: - file_paths = payload.file_paths + payload: format_files_action.FormatFilesRunPayload, + run_context: format_files_action.FormatFilesRunContext, + ) -> format_files_action.FormatFilesRunResult: + file_uris = payload.file_paths save = payload.save if save is True: async with self.file_editor.session(self.FILE_OPERATION_AUTHOR) as session: - for file_path in file_paths: - file_content = run_context.file_info_by_path[file_path].file_content + for file_uri in file_uris: + file_content = run_context.file_info_by_path[file_uri].file_content # TODO: only if changed? await session.save_file( - file_path=file_path, file_content=file_content + file_path=resource_uri_to_path(file_uri), + file_content=file_content, ) - result = format_files_actions.FormatFilesRunResult( + result = format_files_action.FormatFilesRunResult( result_by_file_path={ - file_path: format_files_actions.FormatRunFileResult( + file_uri: format_files_action.FormatRunFileResult( changed=False, # this handler doesn't change files, only saves them - code=run_context.file_info_by_path[file_path].file_content, + code=run_context.file_info_by_path[file_uri].file_content, ) - for file_path in file_paths + for file_uri in file_uris } ) return result diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/init_repository_provider.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/init_repository_provider.py index 1e762379..5e486032 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/init_repository_provider.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/init_repository_provider.py @@ -1,9 +1,8 @@ +# docs: docs/reference/actions.md import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - init_repository_provider as init_repository_provider_action, -) +from finecode_extension_api.actions.publishing import init_repository_provider_action from finecode_extension_api.interfaces import irepositorycredentialsprovider diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/install_env_install_deps.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_env_install_deps.py new file mode 100644 index 00000000..f6674930 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_env_install_deps.py @@ -0,0 +1,86 @@ +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import ( + install_deps_in_env_action, + install_env_action, +) +from finecode_extension_api.actions.environments.install_envs_action import ( + InstallEnvsRunResult, +) +from finecode_extension_api.interfaces import iactionrunner, ilogger +from finecode_extension_api.resource_uri import path_to_resource_uri, resource_uri_to_path +from finecode_builtin_handlers.dependency_config_utils import process_raw_deps + + +@dataclasses.dataclass +class InstallEnvInstallDepsHandlerConfig(code_action.ActionHandlerConfig): ... + + +class InstallEnvInstallDepsHandler( + code_action.ActionHandler[ + install_env_action.InstallEnvAction, + InstallEnvInstallDepsHandlerConfig, + ] +): + def __init__( + self, action_runner: iactionrunner.IActionRunner, logger: ilogger.ILogger + ) -> None: + self.action_runner = action_runner + self.logger = logger + + async def run( + self, + payload: install_env_action.InstallEnvRunPayload, + run_context: install_env_action.InstallEnvRunContext, + ) -> InstallEnvsRunResult: + env = payload.env + project_def = run_context.project_def + if project_def is None: + raise code_action.ActionFailedException( + "project_def must be set by InstallEnvReadConfigHandler" + ) + + install_deps_in_env_action_instance = self.action_runner.get_action_by_source( + install_deps_in_env_action.InstallDepsInEnvAction, + ) + + deps_groups = project_def.get("dependency-groups", {}) + env_raw_deps = deps_groups.get(env.name, []) + env_deps_config = ( + project_def.get("tool", {}) + .get("finecode", {}) + .get("env", {}) + .get(env.name, {}) + .get("dependencies", {}) + ) + project_def_path = resource_uri_to_path(env.project_def_path) + dependencies: list[dict] = [] + process_raw_deps( + env_raw_deps, + env_deps_config, + dependencies, + deps_groups, + project_def_path=project_def_path, + ) + + install_deps_payload = install_deps_in_env_action.InstallDepsInEnvRunPayload( + env_name=env.name, + venv_dir_path=env.venv_dir_path, + project_dir_path=path_to_resource_uri(project_def_path.parent), + dependencies=[ + install_deps_in_env_action.Dependency( + name=dep["name"], + version_or_source=dep["version_or_source"], + editable=dep["editable"], + ) + for dep in dependencies + ], + ) + + result = await self.action_runner.run_action( + action=install_deps_in_env_action_instance, + payload=install_deps_payload, + meta=run_context.meta, + ) + return InstallEnvsRunResult(errors=result.errors) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/install_env_read_config.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_env_read_config.py new file mode 100644 index 00000000..2f88b257 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_env_read_config.py @@ -0,0 +1,44 @@ +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import install_env_action +from finecode_extension_api.actions.environments.install_envs_action import ( + InstallEnvsRunResult, +) +from finecode_extension_api.interfaces import ilogger, iprojectinfoprovider +from finecode_extension_api.resource_uri import resource_uri_to_path +from finecode_builtin_handlers import dependency_config_utils + + +@dataclasses.dataclass +class InstallEnvReadConfigHandlerConfig(code_action.ActionHandlerConfig): ... + + +class InstallEnvReadConfigHandler( + code_action.ActionHandler[ + install_env_action.InstallEnvAction, + InstallEnvReadConfigHandlerConfig, + ] +): + def __init__( + self, + project_info_provider: iprojectinfoprovider.IProjectInfoProvider, + logger: ilogger.ILogger, + ) -> None: + self.project_info_provider = project_info_provider + self.logger = logger + + async def run( + self, + payload: install_env_action.InstallEnvRunPayload, + run_context: install_env_action.InstallEnvRunContext, + ) -> InstallEnvsRunResult: + project_def_path = resource_uri_to_path(payload.env.project_def_path) + project_raw_config = await self.project_info_provider.get_project_raw_config( + project_def_path + ) + dependency_config_utils.make_project_config_pip_compatible( + project_raw_config, project_def_path + ) + run_context.project_def = project_raw_config + return InstallEnvsRunResult(errors=[]) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/install_envs_discover_envs.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_envs_discover_envs.py new file mode 100644 index 00000000..b6d16e44 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_envs_discover_envs.py @@ -0,0 +1,72 @@ +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import install_envs_action +from finecode_extension_api.actions.environments.create_envs_action import EnvInfo +from finecode_extension_api.interfaces import ( + iextensionrunnerinfoprovider, + ilogger, + iprojectinfoprovider, +) +from finecode_extension_api.resource_uri import path_to_resource_uri + + +@dataclasses.dataclass +class InstallEnvsDiscoverEnvsHandlerConfig(code_action.ActionHandlerConfig): ... + + +class InstallEnvsDiscoverEnvsHandler( + code_action.ActionHandler[ + install_envs_action.InstallEnvsAction, + InstallEnvsDiscoverEnvsHandlerConfig, + ] +): + """Discover and populate run_context.envs from the current project's config. + + If payload.envs is already non-empty (explicit caller), those envs are + used as-is — the caller is responsible for any filtering. + Otherwise envs are discovered from dependency-groups: every dependency group + defined in the project definition is included as an env. + payload.env_names filters the discovered list.""" + + def __init__( + self, + project_info_provider: iprojectinfoprovider.IProjectInfoProvider, + runner_info_provider: iextensionrunnerinfoprovider.IExtensionRunnerInfoProvider, + logger: ilogger.ILogger, + ) -> None: + self.project_info_provider = project_info_provider + self.runner_info_provider = runner_info_provider + self.logger = logger + + async def run( + self, + payload: install_envs_action.InstallEnvsRunPayload, + run_context: install_envs_action.InstallEnvsRunContext, + ) -> install_envs_action.InstallEnvsRunResult: + if payload.envs: + envs = list(payload.envs) + else: + project_def_path = self.project_info_provider.get_current_project_def_path() + project_raw_config = ( + await self.project_info_provider.get_current_project_raw_config() + ) + deps_groups = project_raw_config.get("dependency-groups", {}) + + envs = [ + EnvInfo( + name=env_name, + venv_dir_path=path_to_resource_uri( + self.runner_info_provider.get_venv_dir_path_of_env(env_name) + ), + project_def_path=path_to_resource_uri(project_def_path), + ) + for env_name in deps_groups + ] + + if payload.env_names is not None: + envs = [e for e in envs if e.name in payload.env_names] + + self.logger.debug(f"Discovered handler envs: {[e.name for e in envs]}") + run_context.envs = envs + return install_envs_action.InstallEnvsRunResult(errors=[]) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/install_envs_dispatch.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_envs_dispatch.py new file mode 100644 index 00000000..c9f1f331 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/install_envs_dispatch.py @@ -0,0 +1,66 @@ +import asyncio +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments import ( + install_env_action, + install_envs_action, +) +from finecode_extension_api.interfaces import iactionrunner, ilogger + + +@dataclasses.dataclass +class InstallEnvsDispatchHandlerConfig(code_action.ActionHandlerConfig): ... + + +class InstallEnvsDispatchHandler( + code_action.ActionHandler[ + install_envs_action.InstallEnvsAction, + InstallEnvsDispatchHandlerConfig, + ] +): + """Dispatch an install_env call per environment concurrently.""" + + def __init__( + self, action_runner: iactionrunner.IActionRunner, logger: ilogger.ILogger + ) -> None: + self.action_runner = action_runner + self.logger = logger + + async def run( + self, + payload: install_envs_action.InstallEnvsRunPayload, + run_context: install_envs_action.InstallEnvsRunContext, + ) -> install_envs_action.InstallEnvsRunResult: + install_env_action_instance = self.action_runner.get_action_by_source( + install_env_action.InstallEnvAction, + ) + + if run_context.envs is None: + raise code_action.ActionFailedException( + "envs must be populated must be provided in payload or populated by previous handlers" + ) + tasks: list[ + asyncio.Task[install_envs_action.InstallEnvsRunResult] + ] = [] + try: + async with asyncio.TaskGroup() as tg: + for env in run_context.envs: + task = tg.create_task( + self.action_runner.run_action( + action=install_env_action_instance, + payload=install_env_action.InstallEnvRunPayload( + env=env, + ), + meta=run_context.meta, + ) + ) + tasks.append(task) + except ExceptionGroup as eg: + error_str = ". ".join([str(e) for e in eg.exceptions]) + raise code_action.ActionFailedException(error_str) from eg + + errors: list[str] = [] + for task in tasks: + errors += task.result().errors + return install_envs_action.InstallEnvsRunResult(errors=errors) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/lint.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/lint.py index 8cdf7969..be86bb53 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/lint.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/lint.py @@ -1,23 +1,18 @@ -import asyncio +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - lint as lint_action, - lint_files as lint_files_action, - list_src_artifact_files_by_lang as list_src_artifact_files_by_lang_action, - group_src_artifact_files_by_lang as group_src_artifact_files_by_lang_action -) -from finecode_extension_api.interfaces import ( - iactionrunner, - ifileeditor, - ilogger, -) +from finecode_extension_api.actions.artifact import list_src_artifact_files_by_lang_action +from finecode_extension_api.actions.code_quality import lint_action, lint_files_action +from finecode_extension_api.interfaces import iactionrunner, ifileeditor, ilogger +from finecode_extension_api.resource_uri import ResourceUri, path_to_resource_uri @dataclasses.dataclass -class LintHandlerConfig(code_action.ActionHandlerConfig): ... +class LintHandlerConfig(code_action.ActionHandlerConfig): + lint_opened_files_only_in_ide: bool = True + """When True (default), background IDE lints triggered automatically only lint + currently opened files for performance. Set to False to always lint the full project.""" class LintHandler( @@ -26,8 +21,13 @@ class LintHandler( ] ): def __init__( - self, action_runner: iactionrunner.IActionRunner, logger: ilogger.ILogger, file_editor: ifileeditor.IFileEditor + self, + config: LintHandlerConfig, + action_runner: iactionrunner.IActionRunner, + logger: ilogger.ILogger, + file_editor: ifileeditor.IFileEditor, ) -> None: + self.config = config self.action_runner = action_runner self.file_editor = file_editor self.logger = logger @@ -36,87 +36,47 @@ async def run( self, payload: lint_action.LintRunPayload, run_context: lint_action.LintRunContext, - ) -> lint_action.LintRunResult: - # files_to_lint: list[pathlib.Path] = [] - files_by_lang: dict[str, list[pathlib.Path]] = {} - - # first get languages for which linters are available, they change rarely - # only on project config change - all_actions = self.action_runner.get_actions_names() - lint_files_prefix = 'lint_files_' - lint_files_actions = [action_name for action_name in all_actions if action_name.startswith(lint_files_prefix)] - # TODO: ordered set? - # TODO: cache and update on project config change - langs_supported_by_lint = list(set([action_name[len(lint_files_prefix):] for action_name in lint_files_actions])) + ): run_meta = run_context.meta + file_uris: list[ResourceUri] if payload.target == lint_action.LintTarget.PROJECT: - if run_meta.dev_env == code_action.DevEnv.IDE and run_meta.trigger == code_action.RunActionTrigger.SYSTEM: - # performance optimization: if IDE automatically(=`trigger == SYSTEM`) - # tries to lint the whole project, lint only files owned by IDE(usually - # these are opened files). - # In future it could be improved by linting opened files + dependencies - # or e.g. files changed according to git + dependencies. - files_to_lint: list[pathlib.Path] = self.file_editor.get_opened_files() - group_project_files_action = self.action_runner.get_action_by_name('group_src_artifact_files_by_lang', group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangAction) - group_src_artifact_files_by_lang_payload = group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunPayload(file_paths=files_to_lint, langs=langs_supported_by_lint) - files_by_lang_result = await self.action_runner.run_action( - action=group_project_files_action, - payload=group_src_artifact_files_by_lang_payload, - meta=run_meta - ) - files_by_lang = files_by_lang_result.files_by_lang + if ( + self.config.lint_opened_files_only_in_ide + and run_meta.dev_env == code_action.DevEnv.IDE + and run_meta.trigger == code_action.RunActionTrigger.SYSTEM + ): + # Performance optimisation: when the IDE triggers a background project + # lint automatically, only lint the currently opened files. + file_uris = [ + path_to_resource_uri(p) + for p in self.file_editor.get_opened_files() + ] else: - # not automatic check of IDE, lint the whole project. - # Instead of getting all files in the project and then grouping them by - # language, use `list_src_artifact_files_by_lang_action` action which returns - # only files with supported languages - list_src_artifact_file_by_lang_action_instance = self.action_runner.get_action_by_name('list_src_artifact_files_by_lang', list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangAction) - list_src_artifact_files_by_lang_payload = list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangRunPayload(langs=langs_supported_by_lint) + list_action = self.action_runner.get_action_by_source( + list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangAction, + ) files_by_lang_result = await self.action_runner.run_action( - action=list_src_artifact_file_by_lang_action_instance, - payload=list_src_artifact_files_by_lang_payload, - meta=run_meta + action=list_action, + payload=list_src_artifact_files_by_lang_action.ListSrcArtifactFilesByLangRunPayload( + langs=None + ), + meta=run_meta, ) - files_by_lang = files_by_lang_result.files_by_lang - + file_uris = [ + f + for files in files_by_lang_result.files_by_lang.values() + for f in files + ] else: - # lint target are files, lint them - files_to_lint = payload.file_paths - group_src_artifact_files_by_lang_action_instance = self.action_runner.get_action_by_name('group_src_artifact_files_by_lang', group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangAction) - group_src_artifact_files_by_lang_payload = group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunPayload(file_paths=files_to_lint, langs=langs_supported_by_lint) - files_by_lang_result = await self.action_runner.run_action( - action=group_src_artifact_files_by_lang_action_instance, - payload=group_src_artifact_files_by_lang_payload, - meta=run_meta - ) - files_by_lang = files_by_lang_result.files_by_lang - - # TODO: handle errors - lint_tasks = [] - try: - async with asyncio.TaskGroup() as tg: - for lang, lang_files in files_by_lang.items(): - # TODO: handle errors - # TODO: handle KeyError? - actions = self.action_runner.get_actions_for_language(source="finecode_extension_api.actions.lint_files.LintFilesAction", language=lang, expected_type=lint_files_action.LintFilesAction) - lint_files_payload = lint_files_action.LintFilesRunPayload(file_paths=lang_files) - for action in actions: - lint_task = tg.create_task(self.action_runner.run_action( - action=action, - payload=lint_files_payload, - meta=run_meta - )) - lint_tasks.append(lint_task) - except ExceptionGroup as eg: - error_str = ". ".join([str(exception) for exception in eg.exceptions]) - raise code_action.ActionFailedException(error_str) from eg + file_uris = payload.file_paths - lint_results = [task.result() for task in lint_tasks] - if len(lint_results) > 0: - result = lint_action.LintRunResult(messages={}) - for subresult in lint_results: - result.update(subresult) - return result - else: - return lint_action.LintRunResult(messages={}) + lint_files_action_instance = self.action_runner.get_action_by_source( + lint_files_action.LintFilesAction + ) + async for partial in self.action_runner.run_action_iter( + action=lint_files_action_instance, + payload=lint_files_action.LintFilesRunPayload(file_paths=file_uris), + meta=run_meta, + ): + yield lint_action.LintRunResult(messages=partial.messages) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/lint_files_dispatch.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/lint_files_dispatch.py new file mode 100644 index 00000000..a119cfa6 --- /dev/null +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/lint_files_dispatch.py @@ -0,0 +1,87 @@ +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.artifact import group_src_artifact_files_by_lang_action +from finecode_extension_api.actions.code_quality import lint_files_action +from finecode_extension_api.interfaces import iactionrunner, ilogger +from finecode_extension_api.resource_uri import ResourceUri + + +@dataclasses.dataclass +class LintFilesDispatchHandlerConfig(code_action.ActionHandlerConfig): ... + + +class LintFilesDispatchHandler( + code_action.ActionHandler[ + lint_files_action.LintFilesAction, + LintFilesDispatchHandlerConfig, + ] +): + """Group files by language once and dispatch to lint_{lang}_files subactions. + + Subaction names follow the convention: language "python" maps to "lint_python_files", + "javascript" maps to "lint_javascript_files", etc. Each subaction must be registered + in the project config. + """ + + def __init__( + self, + action_runner: iactionrunner.IActionRunner, + logger: ilogger.ILogger, + ) -> None: + self.action_runner = action_runner + self.logger = logger + + async def _lint_file( + self, + subaction: iactionrunner.ActionDeclaration[lint_files_action.LintFilesAction], + file_uri: ResourceUri, + meta: code_action.RunActionMeta, + ) -> lint_files_action.LintFilesRunResult: + return await self.action_runner.run_action( + action=subaction, + payload=lint_files_action.LintFilesRunPayload(file_paths=[file_uri]), + meta=meta, + ) + + async def run( + self, + payload: lint_files_action.LintFilesRunPayload, + run_context: lint_files_action.LintFilesRunContext, + ) -> None: + subactions_by_lang = self.action_runner.get_actions_for_parent( + lint_files_action.LintFilesAction + ) + + if not subactions_by_lang: + self.logger.debug("LintFilesDispatchHandler: no language subactions registered") + return + + # Group files by language — single pass, O(files). + group_action = self.action_runner.get_action_by_source( + group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangAction, + ) + files_by_lang_result = await self.action_runner.run_action( + action=group_action, + payload=group_src_artifact_files_by_lang_action.GroupSrcArtifactFilesByLangRunPayload( + file_paths=payload.file_paths, + langs=list(subactions_by_lang.keys()), + ), + meta=run_context.meta, + ) + files_by_lang = files_by_lang_result.files_by_lang + + # Build reverse mapping: file → language subaction. + file_to_subaction: dict[ResourceUri, iactionrunner.ActionDeclaration[lint_files_action.LintFilesAction]] = {} + for lang, files in files_by_lang.items(): + for file_uri in files: + file_to_subaction[file_uri] = subactions_by_lang[lang] + + # Schedule per-file coroutines via partial_result_scheduler so that + # run_action can execute them concurrently and send partial results. + for file_uri in payload.file_paths: + if file_uri in file_to_subaction: + run_context.partial_result_scheduler.schedule( + file_uri, + self._lint_file(file_to_subaction[file_uri], file_uri, run_context.meta), + ) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_envs_install_deps.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_envs_install_deps.py deleted file mode 100644 index 88d1c5f4..00000000 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_envs_install_deps.py +++ /dev/null @@ -1,107 +0,0 @@ -import asyncio -import dataclasses - -from finecode_extension_api import code_action -from finecode_extension_api.actions import prepare_envs as prepare_envs_action, install_deps_in_env as install_deps_in_env_action -from finecode_extension_api.interfaces import ( - iactionrunner, - ilogger, -) -from finecode_builtin_handlers import dependency_config_utils - - -@dataclasses.dataclass -class PrepareEnvsInstallDepsHandlerConfig(code_action.ActionHandlerConfig): ... - - -class PrepareEnvsInstallDepsHandler( - code_action.ActionHandler[ - prepare_envs_action.PrepareEnvsAction, PrepareEnvsInstallDepsHandlerConfig - ] -): - def __init__( - self, action_runner: iactionrunner.IActionRunner, logger: ilogger.ILogger - ) -> None: - self.action_runner = action_runner - self.logger = logger - - async def run( - self, - payload: prepare_envs_action.PrepareEnvsRunPayload, - run_context: prepare_envs_action.PrepareEnvsRunContext, - ) -> prepare_envs_action.PrepareEnvsRunResult: - envs = payload.envs - - install_deps_in_env_action_instance = self.action_runner.get_action_by_name(name="install_deps_in_env", expected_type=install_deps_in_env_action.InstallDepsInEnvAction) - install_deps_tasks: list[asyncio.Task[install_deps_in_env_action.InstallDepsInEnvRunResult]] = [] - run_meta = run_context.meta - try: - async with asyncio.TaskGroup() as tg: - for env in envs: - project_def = run_context.project_def_by_venv_dir_path[ - env.venv_dir_path - ] - - # straightforward solution for now - deps_groups = project_def.get("dependency-groups", {}) - env_raw_deps = deps_groups.get(env.name, []) - env_deps_config = ( - project_def.get("tool", {}) - .get("finecode", {}) - .get("env", {}) - .get(env.name, {}) - .get("dependencies", {}) - ) - dependencies = [] - - process_raw_deps( - env_raw_deps, env_deps_config, dependencies, deps_groups - ) - - install_deps_payload = install_deps_in_env_action.InstallDepsInEnvRunPayload( - env_name=env.name, - venv_dir_path=env.venv_dir_path, - project_dir_path=env.project_def_path.parent, - dependencies=[install_deps_in_env_action.Dependency(name=dep['name'], version_or_source=dep['version_or_source'], editable=dep['editable']) for dep in dependencies] - ) - - task = tg.create_task( - self.action_runner.run_action( - action=install_deps_in_env_action_instance, - payload=install_deps_payload, - meta=run_meta - ) - ) - install_deps_tasks.append(task) - except ExceptionGroup as eg: - error_str = ". ".join([str(exception) for exception in eg.exceptions]) - raise code_action.ActionFailedException(error_str) from eg - - install_deps_results = [task.result() for task in install_deps_tasks] - errors: list[str] = [] - for result in install_deps_results: - errors += result.errors - - return prepare_envs_action.PrepareEnvsRunResult(errors=errors) - - -def process_raw_deps( - raw_deps: list, env_deps_config, dependencies, deps_groups -) -> None: - for raw_dep in raw_deps: - if isinstance(raw_dep, str): - name = dependency_config_utils.get_dependency_name(raw_dep) - version_or_source = raw_dep[len(name) :] - editable = env_deps_config.get(name, {}).get("editable", False) - dependencies.append( - { - "name": name, - "version_or_source": version_or_source, - "editable": editable, - } - ) - elif isinstance(raw_dep, dict) and "include-group" in raw_dep: - included_group_deps = deps_groups.get(raw_dep["include-group"], []) - process_raw_deps( - included_group_deps, env_deps_config, dependencies, deps_groups - ) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_envs_read_configs.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_envs_read_configs.py deleted file mode 100644 index 9f340826..00000000 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_envs_read_configs.py +++ /dev/null @@ -1,68 +0,0 @@ -import asyncio -import dataclasses -import pathlib -import typing - -from finecode_extension_api import code_action -from finecode_extension_api.actions import prepare_envs as prepare_envs_action -from finecode_extension_api.interfaces import ( - ilogger, - iprojectinfoprovider, -) -from finecode_builtin_handlers import dependency_config_utils - - -@dataclasses.dataclass -class PrepareEnvsReadConfigsHandlerConfig(code_action.ActionHandlerConfig): ... - - -class PrepareEnvsReadConfigsHandler( - code_action.ActionHandler[ - prepare_envs_action.PrepareEnvsAction, PrepareEnvsReadConfigsHandlerConfig - ] -): - def __init__( - self, - project_info_provider: iprojectinfoprovider.IProjectInfoProvider, - logger: ilogger.ILogger, - ) -> None: - self.project_info_provider = project_info_provider - self.logger = logger - - async def run( - self, - payload: prepare_envs_action.PrepareEnvsRunPayload, - run_context: prepare_envs_action.PrepareEnvsRunContext, - ) -> prepare_envs_action.PrepareEnvsRunResult: - project_defs_pathes = set( - [env_info.project_def_path for env_info in payload.envs] - ) - raw_config_by_project_def_path: dict[pathlib.Path, dict[str, typing.Any]] = {} - - get_config_tasks: list[asyncio.Task] = [] - async with asyncio.TaskGroup() as tg: - for project_def_path in project_defs_pathes: - task = tg.create_task( - self.project_info_provider.get_project_raw_config(project_def_path) - ) - get_config_tasks.append(task) - - for idx, project_def_path in enumerate(project_defs_pathes): - project_raw_config = get_config_tasks[idx].result() - dependency_config_utils.make_project_config_pip_compatible( - project_raw_config, project_def_path - ) - raw_config_by_project_def_path[project_def_path] = project_raw_config - - for env_info in payload.envs: - run_context.project_def_path_by_venv_dir_path[env_info.venv_dir_path] = ( - env_info.project_def_path - ) - project_raw_config = raw_config_by_project_def_path[ - env_info.project_def_path - ] - run_context.project_def_by_venv_dir_path[env_info.venv_dir_path] = ( - project_raw_config - ) - - return prepare_envs_action.PrepareEnvsRunResult(errors=[]) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_runners_install_runner_and_presets.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_runners_install_runner_and_presets.py deleted file mode 100644 index b1bc61eb..00000000 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_runners_install_runner_and_presets.py +++ /dev/null @@ -1,175 +0,0 @@ -import asyncio -import dataclasses -import typing - -from finecode_extension_api import code_action -from finecode_extension_api.actions import prepare_runners as prepare_runners_action, install_deps_in_env as install_deps_in_env_action -from finecode_extension_api.interfaces import ( - iactionrunner, - ilogger, -) -from finecode_builtin_handlers import dependency_config_utils - - -@dataclasses.dataclass -class PrepareRunnersInstallRunnerAndPresetsHandlerConfig( - code_action.ActionHandlerConfig -): ... - - -class PrepareRunnersInstallRunnerAndPresetsHandler( - code_action.ActionHandler[ - prepare_runners_action.PrepareRunnersAction, - PrepareRunnersInstallRunnerAndPresetsHandlerConfig, - ] -): - def __init__( - self, action_runner: iactionrunner.IActionRunner, logger: ilogger.ILogger - ) -> None: - self.action_runner = action_runner - self.logger = logger - - async def run( - self, - payload: prepare_runners_action.PrepareRunnersRunPayload, - run_context: prepare_runners_action.PrepareRunnersRunContext, - ) -> prepare_runners_action.PrepareRunnersRunResult: - # find finecode_extension_runner in deps - # find presets in config and their version in deps - # install all these packages - envs = payload.envs - - dependencies_by_env: dict[str, list[dict]] = {} - for env in envs: - project_def = run_context.project_def_by_venv_dir_path[env.venv_dir_path] - project_def_path = run_context.project_def_path_by_venv_dir_path[ - env.venv_dir_path - ] - try: - dependencies = get_dependencies_in_project_raw_config( - project_def, env.name - ) - except FailedToGetDependencies as exception: - raise code_action.ActionFailedException( - f"Failed to get dependencies of env {env.name} in {project_def_path}: {exception.message} (install_runner_and_presets handler)" - ) - dependencies_by_env[env.name] = dependencies - - install_deps_in_env_action_instance = self.action_runner.get_action_by_name(name="install_deps_in_env", expected_type=install_deps_in_env_action.InstallDepsInEnvAction) - install_deps_tasks: list[asyncio.Task[install_deps_in_env_action.InstallDepsInEnvRunResult]] = [] - run_meta = run_context.meta - try: - async with asyncio.TaskGroup() as tg: - for env in envs: - install_deps_payload = install_deps_in_env_action.InstallDepsInEnvRunPayload( - env_name=env.name, - venv_dir_path=env.venv_dir_path, - project_dir_path=env.project_def_path.parent, - dependencies=[install_deps_in_env_action.Dependency(name=dep['name'], version_or_source=dep['version_or_source'], editable=dep['editable']) for dep in dependencies_by_env[env.name]] - ) - task = tg.create_task( - self.action_runner.run_action( - action=install_deps_in_env_action_instance, - payload=install_deps_payload, - meta=run_meta - ) - ) - install_deps_tasks.append(task) - except ExceptionGroup as eg: - errors: list[str] = [] - for exception in eg.exceptions: - if isinstance(exception, iactionrunner.BaseRunActionException): - errors.append(exception.message) - else: - # unexpected exception - error_str = ". ".join( - [str(exception) for exception in eg.exceptions] - ) - raise code_action.ActionFailedException(error_str) from eg - - result = prepare_runners_action.PrepareRunnersRunResult(errors=errors) - raise code_action.StopActionRunWithResult(result=result) from eg - - install_deps_results = [task.result() for task in install_deps_tasks] - errors: list[str] = [] - for result in install_deps_results: - errors += result.errors - result = prepare_runners_action.PrepareRunnersRunResult(errors=errors) - - return result - - -class FailedToGetDependencies(Exception): - def __init__(self, message: str) -> None: - self.message = message - - -def get_dependencies_in_project_raw_config( - project_raw_config: dict[str, typing.Any], env_name: str -): - # returns dependencies: presets and extension runner - presets_in_config = ( - project_raw_config.get("tool", {}).get("finecode", {}).get("presets", []) - ) - presets_packages_names: list[str] = [] - for preset_def in presets_in_config: - try: - preset_package = preset_def.get("source") - except KeyError: - # workspace manager validates configuration and source should - # always exist, but still handle - raise FailedToGetDependencies(f"preset has no source: {preset_def}") - presets_packages_names.append(preset_package) - - # straightforward solution for now - deps_groups = project_raw_config.get("dependency-groups", {}) - env_raw_deps = deps_groups.get(env_name, []) - env_deps_config = ( - project_raw_config.get("tool", {}) - .get("finecode", {}) - .get("env", {}) - .get(env_name, {}) - .get("dependencies", {}) - ) - dependencies = [] - - try: - runner_dep = next( - dep - for dep in env_raw_deps - if isinstance(dep, str) - and dependency_config_utils.get_dependency_name(dep) - == "finecode_extension_runner" - ) - except StopIteration: - raise FailedToGetDependencies( - f"prepare_runners expects finecode_extension_runner dependency in each environment, but it was not found in {env_name}" - ) - - runner_dep_dict = dependency_config_utils.raw_dep_to_dep_dict( - raw_dep=runner_dep, env_deps_config=env_deps_config - ) - dependencies.append(runner_dep_dict) - - for preset_package in presets_packages_names: - try: - preset_dep = next( - dep - for dep in env_raw_deps - if isinstance(dep, str) - and dependency_config_utils.get_dependency_name(dep) == preset_package - ) - except StopIteration: - if env_name == "dev_workspace": - # all preset packages must be in 'dev_workspace' env - raise FailedToGetDependencies( - f"'{preset_package}' is used as preset source, but not declared in 'dev_workspace' dependency group" - ) - else: - continue - - preset_dep_dict = dependency_config_utils.raw_dep_to_dep_dict( - raw_dep=preset_dep, env_deps_config=env_deps_config - ) - dependencies.append(preset_dep_dict) - return dependencies diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_runners_read_configs.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_runners_read_configs.py deleted file mode 100644 index e54b7f13..00000000 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/prepare_runners_read_configs.py +++ /dev/null @@ -1,69 +0,0 @@ -import asyncio -import dataclasses -import pathlib -import typing - -from finecode_extension_api import code_action -from finecode_extension_api.actions import prepare_runners as prepare_runners_action -from finecode_extension_api.interfaces import ( - ilogger, - iprojectinfoprovider, -) -from finecode_builtin_handlers import dependency_config_utils - - -@dataclasses.dataclass -class PrepareRunnersReadConfigsHandlerConfig(code_action.ActionHandlerConfig): ... - - -class PrepareRunnersReadConfigsHandler( - code_action.ActionHandler[ - prepare_runners_action.PrepareRunnersAction, - PrepareRunnersReadConfigsHandlerConfig, - ] -): - def __init__( - self, - project_info_provider: iprojectinfoprovider.IProjectInfoProvider, - logger: ilogger.ILogger, - ) -> None: - self.project_info_provider = project_info_provider - self.logger = logger - - async def run( - self, - payload: prepare_runners_action.PrepareRunnersRunPayload, - run_context: prepare_runners_action.PrepareRunnersRunContext, - ) -> prepare_runners_action.PrepareRunnersRunResult: - project_defs_pathes = set( - [env_info.project_def_path for env_info in payload.envs] - ) - raw_config_by_project_def_path: dict[pathlib.Path, dict[str, typing.Any]] = {} - - get_config_tasks: list[asyncio.Task] = [] - async with asyncio.TaskGroup() as tg: - for project_def_path in project_defs_pathes: - task = tg.create_task( - self.project_info_provider.get_project_raw_config(project_def_path) - ) - get_config_tasks.append(task) - - for idx, project_def_path in enumerate(project_defs_pathes): - project_raw_config = get_config_tasks[idx].result() - dependency_config_utils.make_project_config_pip_compatible( - project_raw_config, project_def_path - ) - raw_config_by_project_def_path[project_def_path] = project_raw_config - - for env_info in payload.envs: - run_context.project_def_path_by_venv_dir_path[env_info.venv_dir_path] = ( - env_info.project_def_path - ) - project_raw_config = raw_config_by_project_def_path[ - env_info.project_def_path - ] - run_context.project_def_by_venv_dir_path[env_info.venv_dir_path] = ( - project_raw_config - ) - - return prepare_runners_action.PrepareRunnersRunResult(errors=[]) diff --git a/finecode_builtin_handlers/src/finecode_builtin_handlers/publish_artifact.py b/finecode_builtin_handlers/src/finecode_builtin_handlers/publish_artifact.py index 53f5eb17..2b3aa908 100644 --- a/finecode_builtin_handlers/src/finecode_builtin_handlers/publish_artifact.py +++ b/finecode_builtin_handlers/src/finecode_builtin_handlers/publish_artifact.py @@ -1,18 +1,18 @@ +# docs: docs/reference/actions.md import asyncio import dataclasses -import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import \ - get_src_artifact_registries as get_src_artifact_registries_action -from finecode_extension_api.actions import \ - get_src_artifact_version as get_src_artifact_version_action -from finecode_extension_api.actions import \ - is_artifact_published_to_registry as is_artifact_published_to_registry_action -from finecode_extension_api.actions import \ - publish_artifact as publish_artifact_action -from finecode_extension_api.actions import \ - publish_artifact_to_registry as publish_artifact_to_registry_action +from finecode_extension_api.resource_uri import ResourceUri +from finecode_extension_api.actions.artifact import ( + get_src_artifact_registries_action, + get_src_artifact_version_action, +) +from finecode_extension_api.actions.publishing import ( + is_artifact_published_to_registry_action, + publish_artifact_action, + publish_artifact_to_registry_action, +) from finecode_extension_api.interfaces import ( iactionrunner, ilogger, @@ -54,8 +54,8 @@ async def run( src_artifact_def_path = payload.src_artifact_def_path dist_artifact_paths = payload.dist_artifact_paths - get_version_action = self.action_runner.get_action_by_name( - "get_src_artifact_version", get_src_artifact_version_action.GetSrcArtifactVersionAction + get_version_action = self.action_runner.get_action_by_source( + get_src_artifact_version_action.GetSrcArtifactVersionAction ) version_payload = ( get_src_artifact_version_action.GetSrcArtifactVersionRunPayload( @@ -67,8 +67,8 @@ async def run( ) version = version_result.version - get_registries_action = self.action_runner.get_action_by_name( - "get_src_artifact_registries", get_src_artifact_registries_action.GetSrcArtifactRegistriesAction + get_registries_action = self.action_runner.get_action_by_source( + get_src_artifact_registries_action.GetSrcArtifactRegistriesAction ) registries_payload = ( get_src_artifact_registries_action.GetSrcArtifactRegistriesRunPayload( @@ -85,15 +85,15 @@ async def run( raise code_action.ActionFailedException("No registries are configured") # Build dict of paths to publish per registry - dist_paths_to_publish_by_registry: dict[str, list[pathlib.Path]] + dist_paths_to_publish_by_registry: dict[str, list[ResourceUri]] if payload.force: dist_paths_to_publish_by_registry = { registry.name: dist_artifact_paths for registry in registries_to_publish } else: - is_published_action = self.action_runner.get_action_by_name( - "is_artifact_published_to_registry", is_artifact_published_to_registry_action.IsArtifactPublishedToRegistryAction + is_published_action = self.action_runner.get_action_by_source( + is_artifact_published_to_registry_action.IsArtifactPublishedToRegistryAction ) check_tasks: list[tuple[asyncio.Task[is_artifact_published_to_registry_action.IsArtifactPublishedToRegistryRunResult], get_src_artifact_registries_action.Registry]] = [] @@ -131,8 +131,8 @@ async def run( dist_paths_to_publish_by_registry[registry.name] = not_published_paths # Publish to registries with unpublished artifacts - publish_to_registry_action = self.action_runner.get_action_by_name( - "publish_artifact_to_registry", publish_artifact_to_registry_action.PublishArtifactToRegistryAction + publish_to_registry_action = self.action_runner.get_action_by_source( + publish_artifact_to_registry_action.PublishArtifactToRegistryAction ) publish_tasks: list[asyncio.Task[publish_artifact_to_registry_action.PublishArtifactToRegistryRunResult]] = [] diff --git a/finecode_dev_common_preset/src/finecode_dev_common_preset/preset.toml b/finecode_dev_common_preset/src/finecode_dev_common_preset/preset.toml index 50653a0d..b03c18c0 100644 --- a/finecode_dev_common_preset/src/finecode_dev_common_preset/preset.toml +++ b/finecode_dev_common_preset/src/finecode_dev_common_preset/preset.toml @@ -12,7 +12,14 @@ finecode_extension_runner = { path = "../../../finecode_extension_runner", edita finecode_extension_api = { path = "../../../finecode_extension_api", editable = true } [tool.finecode.env.dev.dependencies] +finecode = { path = "../../..", editable = true } +finecode_extension_api = { path = "../../../finecode_extension_api", editable = true } finecode_extension_runner = { path = "../../../finecode_extension_runner", editable = true } +fine_python_pytest = { path = "../../../extensions/fine_python_pytest", editable = true } +finecode_builtin_handlers = { path = "../../../finecode_builtin_handlers", editable = true } +finecode_jsonrpc = { path = "../../../finecode_jsonrpc", editable = true } +fine_python_pip = { path = "../../../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../../../extensions/fine_python_virtualenv", editable = true } [tool.finecode.env.dev_no_runtime.dependencies] finecode_extension_api = { path = "../../../finecode_extension_api", editable = true } @@ -29,6 +36,8 @@ fine_python_flake8 = { path = "../../../extensions/fine_python_flake8", editable fine_python_pyrefly = { path = "../../../extensions/fine_python_pyrefly", editable = true } fine_python_package_info = { path = "../../../extensions/fine_python_package_info", editable = true } fine_python_setuptools_scm = { path = "../../../extensions/fine_python_setuptools_scm", editable = true } +fine_python_pip = { path = "../../../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../../../extensions/fine_python_virtualenv", editable = true } # currently, all packages in finecode repository are pure python packages, reuse # setuptools build in all of them @@ -52,7 +61,7 @@ source = "fine_python_pyrefly.PyreflyLintFilesHandler" config.python_version = '3.11' [tool.finecode.action.init_repository_provider] -source = "finecode_extension_api.actions.init_repository_provider.InitRepositoryProviderAction" +source = "finecode_extension_api.actions.InitRepositoryProviderAction" handlers = [ { name = 'init_repository_provider', source = 'finecode_builtin_handlers.InitRepositoryProviderHandler', env = "dev_no_runtime", dependencies = [ "finecode_builtin_handlers~=0.2.0a0", @@ -60,7 +69,7 @@ handlers = [ ] [tool.finecode.action.publish_artifact] -source = "finecode_extension_api.actions.publish_artifact.PublishArtifactAction" +source = "finecode_extension_api.actions.PublishArtifactAction" handlers = [ { name = 'publish_artifact', source = 'finecode_builtin_handlers.PublishArtifactHandler', env = "dev_no_runtime", dependencies = [ "finecode_builtin_handlers~=0.2.0a0", @@ -68,7 +77,7 @@ handlers = [ ] [tool.finecode.action.get_src_artifact_version] -source = "finecode_extension_api.actions.get_src_artifact_version.GetSrcArtifactVersionAction" +source = "finecode_extension_api.actions.GetSrcArtifactVersionAction" handlers = [ { name = 'get_src_artifact_version_py', source = 'fine_python_package_info.GetSrcArtifactVersionPyHandler', env = "dev_no_runtime", dependencies = [ "fine_python_package_info~=0.2.0a1", @@ -76,7 +85,7 @@ handlers = [ ] [tool.finecode.action.get_dist_artifact_version] -source = "finecode_extension_api.actions.get_dist_artifact_version.GetDistArtifactVersionAction" +source = "finecode_extension_api.actions.GetDistArtifactVersionAction" handlers = [ { name = 'get_dist_artifact_version_py', source = 'fine_python_package_info.GetDistArtifactVersionPyHandler', env = "dev_no_runtime", dependencies = [ "fine_python_package_info~=0.2.0a1", @@ -84,7 +93,7 @@ handlers = [ ] [tool.finecode.action.get_src_artifact_registries] -source = "finecode_extension_api.actions.get_src_artifact_registries.GetSrcArtifactRegistriesAction" +source = "finecode_extension_api.actions.GetSrcArtifactRegistriesAction" handlers = [ { name = 'get_src_artifact_registries_py', source = 'fine_python_package_info.GetSrcArtifactRegistriesPyHandler', env = "dev_no_runtime", dependencies = [ "fine_python_package_info~=0.2.0a1", @@ -92,7 +101,7 @@ handlers = [ ] [tool.finecode.action.publish_artifact_to_registry] -source = "finecode_extension_api.actions.publish_artifact_to_registry.PublishArtifactToRegistryAction" +source = "finecode_extension_api.actions.PublishArtifactToRegistryAction" handlers = [ { name = 'publish_artifact_to_registry_py', source = 'fine_python_package_info.PublishArtifactToRegistryPyHandler', env = "dev_no_runtime", dependencies = [ "fine_python_package_info~=0.2.0a1", @@ -100,7 +109,7 @@ handlers = [ ] [tool.finecode.action.is_artifact_published_to_registry] -source = "finecode_extension_api.actions.is_artifact_published_to_registry.IsArtifactPublishedToRegistryAction" +source = "finecode_extension_api.actions.IsArtifactPublishedToRegistryAction" handlers = [ { name = 'is_artifact_published_to_registry_py', source = 'fine_python_package_info.IsArtifactPublishedToRegistryPyHandler', env = "dev_no_runtime", dependencies = [ "fine_python_package_info~=0.2.0a1", @@ -108,7 +117,7 @@ handlers = [ ] [tool.finecode.action.build_artifact] -source = "finecode_extension_api.actions.build_artifact_action.BuildArtifactAction" +source = "finecode_extension_api.actions.BuildArtifactAction" handlers = [ { name = 'build_artifact_py', source = 'fine_python_package_info.BuildArtifactPyHandler', env = "dev_no_runtime", dependencies = [ "fine_python_package_info~=0.2.0a1", @@ -116,7 +125,7 @@ handlers = [ ] [tool.finecode.action.verify_artifact_published_to_registry] -source = "finecode_extension_api.actions.verify_artifact_published_to_registry.VerifyArtifactPublishedToRegistryAction" +source = "finecode_extension_api.actions.VerifyArtifactPublishedToRegistryAction" # TODO: install, try to import etc handlers = [] diff --git a/finecode_dev_extensions/pyproject.toml b/finecode_dev_extensions/pyproject.toml index 5e7120aa..77b9f129 100644 --- a/finecode_dev_extensions/pyproject.toml +++ b/finecode_dev_extensions/pyproject.toml @@ -20,7 +20,10 @@ finecode_dev_extensions = { path = "../finecode_dev_extensions", editable = true fine_python_recommended = { path = "../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../presets/fine_python_lint", editable = true } fine_python_format = { path = "../presets/fine_python_format", editable = true } +fine_python_test = { path = "../presets/fine_python_test", editable = true } finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = true } +fine_python_pip = { path = "../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../extensions/fine_python_virtualenv", editable = true } [tool.finecode] presets = [{ source = "finecode_dev_common_preset" }] diff --git a/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_action.py b/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_action.py index 0a129325..5a04196e 100644 --- a/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_action.py +++ b/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_action.py @@ -1,13 +1,13 @@ import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class PublishAndVerifyArtifactRunPayload(code_action.RunActionPayload): - src_artifact_def_path: pathlib.Path - dist_artifact_paths: list[pathlib.Path] + src_artifact_def_path: ResourceUri + dist_artifact_paths: list[ResourceUri] force: bool = False diff --git a/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_handler.py b/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_handler.py index 98a8b90c..78be911a 100644 --- a/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_handler.py +++ b/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_handler.py @@ -1,11 +1,10 @@ import dataclasses -import pathlib from finecode_extension_api import code_action -from finecode_extension_api.actions import ( - get_dist_artifact_version, - publish_artifact, - verify_artifact_published_to_registry, +from finecode_extension_api.actions.publishing import ( + get_dist_artifact_version_action, + publish_artifact_action, + verify_artifact_published_to_registry_action, ) from finecode_extension_api.interfaces import iactionrunner, iprojectinfoprovider @@ -45,14 +44,14 @@ async def run( ) -> PublishAndVerifyArtifactRunResult: run_meta = run_context.meta - src_artifact_def_path: pathlib.Path = payload.src_artifact_def_path - dist_artifact_paths: list[pathlib.Path] = payload.dist_artifact_paths + src_artifact_def_path = payload.src_artifact_def_path + dist_artifact_paths = payload.dist_artifact_paths # Publish the artifact - publish_action = self.action_runner.get_action_by_name( - "publish_artifact", publish_artifact.PublishArtifactAction + publish_action = self.action_runner.get_action_by_source( + publish_artifact_action.PublishArtifactAction ) - publish_payload = publish_artifact.PublishArtifactRunPayload( + publish_payload = publish_artifact_action.PublishArtifactRunPayload( src_artifact_def_path=src_artifact_def_path, dist_artifact_paths=dist_artifact_paths, force=payload.force, @@ -64,11 +63,10 @@ async def run( # TODO: impl verify of each dist file. NOTE; they can have different versions # Get version from the dist artifact - get_version_action = self.action_runner.get_action_by_name( - "get_dist_artifact_version", - get_dist_artifact_version.GetDistArtifactVersionAction, + get_version_action = self.action_runner.get_action_by_source( + get_dist_artifact_version_action.GetDistArtifactVersionAction, ) - get_version_payload = get_dist_artifact_version.GetDistArtifactVersionRunPayload( + get_version_payload = get_dist_artifact_version_action.GetDistArtifactVersionRunPayload( dist_artifact_path=dist_artifact_paths[0] ) get_version_result = await self.action_runner.run_action( @@ -79,13 +77,12 @@ async def run( # Verify each published registry verification_errors: dict[str, list[str]] = {} - verify_action = self.action_runner.get_action_by_name( - "verify_artifact_published_to_registry", - verify_artifact_published_to_registry.VerifyArtifactPublishedToRegistryAction, + verify_action = self.action_runner.get_action_by_source( + verify_artifact_published_to_registry_action.VerifyArtifactPublishedToRegistryAction, ) for registry_name in published_registries: - verify_payload = verify_artifact_published_to_registry.VerifyArtifactPublishedToRegistryRunPayload( + verify_payload = verify_artifact_published_to_registry_action.VerifyArtifactPublishedToRegistryRunPayload( dist_artifact_paths=dist_artifact_paths, registry_name=registry_name, version=version, diff --git a/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_init_repository_provider_handler.py b/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_init_repository_provider_handler.py index e196a348..839bb2bc 100644 --- a/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_init_repository_provider_handler.py +++ b/finecode_dev_extensions/src/finecode_dev_extensions/publish_and_verify_artifact_init_repository_provider_handler.py @@ -1,7 +1,7 @@ import dataclasses from finecode_extension_api import code_action -from finecode_extension_api.actions import init_repository_provider +from finecode_extension_api.actions.publishing import init_repository_provider_action from finecode_extension_api.interfaces import iactionrunner from finecode_extension_api.interfaces.irepositorycredentialsprovider import ( Repository, @@ -47,11 +47,10 @@ async def run( ) -> PublishAndVerifyArtifactRunResult: run_meta = run_context.meta - init_action = self.action_runner.get_action_by_name( - "init_repository_provider", - init_repository_provider.InitRepositoryProviderAction, + init_action = self.action_runner.get_action_by_source( + init_repository_provider_action.InitRepositoryProviderAction, ) - init_payload = init_repository_provider.InitRepositoryProviderRunPayload( + init_payload = init_repository_provider_action.InitRepositoryProviderRunPayload( repositories=self.config.repositories, credentials_by_repository=self.config.credentials_by_repository, ) diff --git a/finecode_extension_api/pyproject.toml b/finecode_extension_api/pyproject.toml index 5a1525b0..e531127a 100644 --- a/finecode_extension_api/pyproject.toml +++ b/finecode_extension_api/pyproject.toml @@ -23,3 +23,6 @@ finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = fine_python_recommended = { path = "../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../presets/fine_python_lint", editable = true } fine_python_format = { path = "../presets/fine_python_format", editable = true } +fine_python_test = { path = "../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../extensions/fine_python_virtualenv", editable = true } diff --git a/src/finecode/config/__init__.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/__init__.py similarity index 100% rename from src/finecode/config/__init__.py rename to finecode_extension_api/src/finecode_extension_api/actions/artifact/__init__.py diff --git a/finecode_extension_api/src/finecode_extension_api/actions/build_artifact_action.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/build_artifact_action.py similarity index 62% rename from finecode_extension_api/src/finecode_extension_api/actions/build_artifact_action.py rename to finecode_extension_api/src/finecode_extension_api/actions/artifact/build_artifact_action.py index 9d269cb9..1b601851 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/build_artifact_action.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/artifact/build_artifact_action.py @@ -1,13 +1,14 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class BuildArtifactRunPayload(code_action.RunActionPayload): - # if not provided, current artifact will be built - src_artifact_def_path: pathlib.Path | None = None + src_artifact_def_path: ResourceUri | None = None + """``file://`` URI of the artifact definition file (e.g. pyproject.toml). Defaults to the current project's artifact.""" class BuildArtifactRunContext( @@ -17,20 +18,22 @@ class BuildArtifactRunContext( @dataclasses.dataclass class BuildArtifactRunResult(code_action.RunActionResult): - src_artifact_def_path: pathlib.Path - build_output_paths: list[pathlib.Path] + src_artifact_def_path: ResourceUri + build_output_paths: list[ResourceUri] def update(self, other: code_action.RunActionResult) -> None: if not isinstance(other, BuildArtifactRunResult): return if self.src_artifact_def_path != other.src_artifact_def_path: - raise code_action.ActionFailedException(f"BuildArtifactRunResult can be updated only with result of the same src artifact: {self.src_artifact_def_path} != {other.src_artifact_def_path}") + raise code_action.ActionFailedException( + f"BuildArtifactRunResult can be updated only with result of the same src artifact: {self.src_artifact_def_path} != {other.src_artifact_def_path}" + ) self.build_output_paths = other.build_output_paths def to_text(self) -> str | textstyler.StyledText: - paths_str = "\n ".join(str(p) for p in self.build_output_paths) + paths_str = "\n ".join(self.build_output_paths) return f"Built artifact at:\n {paths_str}" @property @@ -45,6 +48,8 @@ class BuildArtifactAction( BuildArtifactRunResult, ] ): + """Build an artifact from source.""" + PAYLOAD_TYPE = BuildArtifactRunPayload RUN_CONTEXT_TYPE = BuildArtifactRunContext RESULT_TYPE = BuildArtifactRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_language_action.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_language_action.py new file mode 100644 index 00000000..e78571c5 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_language_action.py @@ -0,0 +1,48 @@ +# docs: docs/reference/actions.md +import dataclasses + +from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri + + +@dataclasses.dataclass +class GetSrcArtifactLanguageRunPayload(code_action.RunActionPayload): + src_artifact_def_path: ResourceUri + + +class GetSrcArtifactLanguageRunContext( + code_action.RunActionContext[GetSrcArtifactLanguageRunPayload] +): ... + + +@dataclasses.dataclass +class GetSrcArtifactLanguageRunResult(code_action.RunActionResult): + # Language identifier, e.g. "python", "javascript", "rust". + language: str + + def update(self, other: code_action.RunActionResult) -> None: + if not isinstance(other, GetSrcArtifactLanguageRunResult): + return + + self.language = other.language + + def to_text(self) -> str | textstyler.StyledText: + return self.language + + @property + def return_code(self) -> code_action.RunReturnCode: + return code_action.RunReturnCode.SUCCESS + + +class GetSrcArtifactLanguageAction( + code_action.Action[ + GetSrcArtifactLanguageRunPayload, + GetSrcArtifactLanguageRunContext, + GetSrcArtifactLanguageRunResult, + ] +): + """Detect the programming language of a source artifact.""" + + PAYLOAD_TYPE = GetSrcArtifactLanguageRunPayload + RUN_CONTEXT_TYPE = GetSrcArtifactLanguageRunContext + RESULT_TYPE = GetSrcArtifactLanguageRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/get_src_artifact_registries.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_registries_action.py similarity index 88% rename from finecode_extension_api/src/finecode_extension_api/actions/get_src_artifact_registries.py rename to finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_registries_action.py index 059e6514..45d941b5 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/get_src_artifact_registries.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_registries_action.py @@ -1,7 +1,8 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass @@ -12,7 +13,7 @@ class Registry: @dataclasses.dataclass class GetSrcArtifactRegistriesRunPayload(code_action.RunActionPayload): - src_artifact_def_path: pathlib.Path + src_artifact_def_path: ResourceUri class GetSrcArtifactRegistriesRunContext( @@ -51,6 +52,8 @@ class GetSrcArtifactRegistriesAction( GetSrcArtifactRegistriesRunResult, ] ): + """List the registries configured for an artifact.""" + PAYLOAD_TYPE = GetSrcArtifactRegistriesRunPayload RUN_CONTEXT_TYPE = GetSrcArtifactRegistriesRunContext RESULT_TYPE = GetSrcArtifactRegistriesRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/get_src_artifact_version.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_version_action.py similarity index 84% rename from finecode_extension_api/src/finecode_extension_api/actions/get_src_artifact_version.py rename to finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_version_action.py index 302dc31f..b69cf3d7 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/get_src_artifact_version.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/artifact/get_src_artifact_version_action.py @@ -1,12 +1,13 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class GetSrcArtifactVersionRunPayload(code_action.RunActionPayload): - src_artifact_def_path: pathlib.Path + src_artifact_def_path: ResourceUri class GetSrcArtifactVersionRunContext( @@ -39,6 +40,8 @@ class GetSrcArtifactVersionAction( GetSrcArtifactVersionRunResult, ] ): + """Read the current version from an source artifact definition file.""" + PAYLOAD_TYPE = GetSrcArtifactVersionRunPayload RUN_CONTEXT_TYPE = GetSrcArtifactVersionRunContext RESULT_TYPE = GetSrcArtifactVersionRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/group_src_artifact_files_by_lang.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/group_src_artifact_files_by_lang_action.py similarity index 71% rename from finecode_extension_api/src/finecode_extension_api/actions/group_src_artifact_files_by_lang.py rename to finecode_extension_api/src/finecode_extension_api/actions/artifact/group_src_artifact_files_by_lang_action.py index 218c5d56..702346d3 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/group_src_artifact_files_by_lang.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/artifact/group_src_artifact_files_by_lang_action.py @@ -1,5 +1,5 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib import sys if sys.version_info >= (3, 12): @@ -8,12 +8,15 @@ from typing_extensions import override from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class GroupSrcArtifactFilesByLangRunPayload(code_action.RunActionPayload): - file_paths: list[pathlib.Path] + file_paths: list[ResourceUri] + """Files to group by language (``file://`` URIs).""" langs: list[str] | None = None + """Language identifiers to include (e.g. ['python', 'javascript']). None means all languages.""" class GroupSrcArtifactFilesByLangRunContext( @@ -24,14 +27,19 @@ def __init__( run_id: int, initial_payload: GroupSrcArtifactFilesByLangRunPayload, meta: code_action.RunActionMeta, - info_provider: code_action.RunContextInfoProvider + info_provider: code_action.RunContextInfoProvider, ) -> None: - super().__init__(run_id=run_id, initial_payload=initial_payload, meta=meta, info_provider=info_provider) + super().__init__( + run_id=run_id, + initial_payload=initial_payload, + meta=meta, + info_provider=info_provider, + ) @dataclasses.dataclass class GroupSrcArtifactFilesByLangRunResult(code_action.RunActionResult): - files_by_lang: dict[str, list[pathlib.Path]] + files_by_lang: dict[str, list[ResourceUri]] @override def update(self, other: code_action.RunActionResult) -> None: @@ -48,8 +56,8 @@ def to_text(self) -> str | textstyler.StyledText: formatted_result = textstyler.StyledText() for language, files in self.files_by_lang.items(): formatted_result.append_styled(text=language + "\n", bold=True) - for file_path in files: - formatted_result.append(file_path.as_posix() + "\n") + for file_uri in files: + formatted_result.append(file_uri + "\n") return formatted_result @@ -60,6 +68,8 @@ class GroupSrcArtifactFilesByLangAction( GroupSrcArtifactFilesByLangRunResult, ] ): + """Group a given list of files by programming language.""" + PAYLOAD_TYPE = GroupSrcArtifactFilesByLangRunPayload RUN_CONTEXT_TYPE = GroupSrcArtifactFilesByLangRunContext RESULT_TYPE = GroupSrcArtifactFilesByLangRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/list_src_artifact_files_by_lang.py b/finecode_extension_api/src/finecode_extension_api/actions/artifact/list_src_artifact_files_by_lang_action.py similarity index 83% rename from finecode_extension_api/src/finecode_extension_api/actions/list_src_artifact_files_by_lang.py rename to finecode_extension_api/src/finecode_extension_api/actions/artifact/list_src_artifact_files_by_lang_action.py index 445b56eb..b760b44d 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/list_src_artifact_files_by_lang.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/artifact/list_src_artifact_files_by_lang_action.py @@ -1,5 +1,5 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib import sys if sys.version_info >= (3, 12): @@ -8,11 +8,13 @@ from typing_extensions import override from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class ListSrcArtifactFilesByLangRunPayload(code_action.RunActionPayload): langs: list[str] | None = None + """Language identifiers to include (e.g. ['python', 'javascript']). None means all languages.""" class ListSrcArtifactFilesByLangRunContext( @@ -35,7 +37,7 @@ def __init__( @dataclasses.dataclass class ListSrcArtifactFilesByLangRunResult(code_action.RunActionResult): - files_by_lang: dict[str, list[pathlib.Path]] + files_by_lang: dict[str, list[ResourceUri]] @override def update(self, other: code_action.RunActionResult) -> None: @@ -52,8 +54,8 @@ def to_text(self) -> str | textstyler.StyledText: formatted_result = textstyler.StyledText() for language, files in self.files_by_lang.items(): formatted_result.append_styled(text=language + "\n", bold=True) - for file_path in files: - formatted_result.append(file_path.as_posix() + "\n") + for file_uri in files: + formatted_result.append(file_uri + "\n") return formatted_result @@ -64,6 +66,8 @@ class ListSrcArtifactFilesByLangAction( ListSrcArtifactFilesByLangRunResult, ] ): + """List source artifact files grouped by programming language.""" + PAYLOAD_TYPE = ListSrcArtifactFilesByLangRunPayload RUN_CONTEXT_TYPE = ListSrcArtifactFilesByLangRunContext RESULT_TYPE = ListSrcArtifactFilesByLangRunResult diff --git a/src/finecode/runner/__init__.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/__init__.py similarity index 100% rename from src/finecode/runner/__init__.py rename to finecode_extension_api/src/finecode_extension_api/actions/code_quality/__init__.py diff --git a/finecode_extension_api/src/finecode_extension_api/actions/code_quality/check_formatting_action.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/check_formatting_action.py new file mode 100644 index 00000000..17b26c22 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/check_formatting_action.py @@ -0,0 +1 @@ +# docs: docs/reference/actions.md diff --git a/finecode_extension_api/src/finecode_extension_api/actions/format.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_action.py similarity index 55% rename from finecode_extension_api/src/finecode_extension_api/actions/format.py rename to finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_action.py index 40a9bd3a..3d4850f1 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/format.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_action.py @@ -1,9 +1,10 @@ +# docs: docs/reference/actions.md import dataclasses import enum -from pathlib import Path from finecode_extension_api import code_action -from finecode_extension_api.actions import format_files as format_files_action +from finecode_extension_api.actions.code_quality import format_files_action +from finecode_extension_api.resource_uri import ResourceUri class FormatTarget(enum.StrEnum): @@ -14,9 +15,11 @@ class FormatTarget(enum.StrEnum): @dataclasses.dataclass class FormatRunPayload(code_action.RunActionPayload): save: bool = True + """Whether to write formatted content back to disk.""" target: FormatTarget = FormatTarget.PROJECT - # optional, expected only with `target == FormatTarget.FILES` - file_paths: list[Path] = dataclasses.field(default_factory=list) + """Scope of formatting: 'project' (default) formats the whole project, 'files' formats only file_paths.""" + file_paths: list[ResourceUri] = dataclasses.field(default_factory=list) + """Files to format (``file://`` URIs). Only used when target is 'files'.""" class FormatRunContext(code_action.RunActionContext[FormatRunPayload]): ... @@ -29,6 +32,8 @@ class FormatRunResult(format_files_action.FormatFilesRunResult): ... class FormatAction( code_action.Action[FormatRunPayload, FormatRunContext, FormatRunResult] ): + """Format source code in a project or specific files.""" + PAYLOAD_TYPE = FormatRunPayload RUN_CONTEXT_TYPE = FormatRunContext RESULT_TYPE = FormatRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/format_files.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_files_action.py similarity index 80% rename from finecode_extension_api/src/finecode_extension_api/actions/format_files.py rename to finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_files_action.py index ea6dfb27..b17c7d08 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/format_files.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_files_action.py @@ -1,9 +1,10 @@ +# docs: docs/reference/actions.md import dataclasses import sys -from pathlib import Path from typing import NamedTuple from finecode_extension_api.interfaces import ifileeditor +from finecode_extension_api.resource_uri import ResourceUri, resource_uri_to_path if sys.version_info >= (3, 12): from typing import override @@ -15,7 +16,7 @@ @dataclasses.dataclass class FormatFilesRunPayload(code_action.RunActionPayload): - file_paths: list[Path] + file_paths: list[ResourceUri] save: bool @@ -44,7 +45,7 @@ def __init__( ) self.file_editor = file_editor - self.file_info_by_path: dict[Path, FileInfo] = {} + self.file_info_by_path: dict[ResourceUri, FileInfo] = {} self.file_editor_session: ifileeditor.IFileEditorSession @override @@ -52,13 +53,15 @@ async def init(self) -> None: self.file_editor_session = await self.exit_stack.enter_async_context( self.file_editor.session(FILE_OPERATION_AUTHOR) ) - for file_path in self.initial_payload.file_paths: + for file_uri in self.initial_payload.file_paths: file_info = await self.exit_stack.enter_async_context( - self.file_editor_session.read_file(file_path, block=True) + self.file_editor_session.read_file( + resource_uri_to_path(file_uri), block=True + ) ) file_content = file_info.content file_version = file_info.version - self.file_info_by_path[file_path] = FileInfo( + self.file_info_by_path[file_uri] = FileInfo( file_content=file_content, file_version=file_version ) @@ -72,7 +75,7 @@ class FormatRunFileResult: @dataclasses.dataclass class FormatFilesRunResult(code_action.RunActionResult): - result_by_file_path: dict[Path, FormatRunFileResult] + result_by_file_path: dict[ResourceUri, FormatRunFileResult] @override def update(self, other: code_action.RunActionResult) -> None: @@ -87,10 +90,10 @@ def to_text(self) -> str | textstyler.StyledText: text: textstyler.StyledText = textstyler.StyledText() unchanged_counter: int = 0 - for file_path, file_result in self.result_by_file_path.items(): + for file_uri, file_result in self.result_by_file_path.items(): if file_result.changed: text.append("reformatted ") - text.append_styled(file_path.as_posix(), bold=True) + text.append_styled(file_uri, bold=True) text.append("\n") else: unchanged_counter += 1 @@ -107,6 +110,8 @@ class FormatFilesAction( FormatFilesRunPayload, FormatFilesRunContext, FormatFilesRunResult ] ): + """Format specific files. Internal action dispatched by format.""" + PAYLOAD_TYPE = FormatFilesRunPayload RUN_CONTEXT_TYPE = FormatFilesRunContext RESULT_TYPE = FormatFilesRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_python_files_action.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_python_files_action.py new file mode 100644 index 00000000..54230238 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/format_python_files_action.py @@ -0,0 +1,23 @@ +from finecode_extension_api import code_action +from finecode_extension_api.actions.code_quality.format_files_action import ( + FormatFilesAction, + FormatFilesRunContext, + FormatFilesRunPayload, + FormatFilesRunResult, +) + + +class FormatPythonFilesAction( + code_action.Action[ + FormatFilesRunPayload, + FormatFilesRunContext, + FormatFilesRunResult, + ] +): + """Format Python source files.""" + + PAYLOAD_TYPE = FormatFilesRunPayload + RUN_CONTEXT_TYPE = FormatFilesRunContext + RESULT_TYPE = FormatFilesRunResult + LANGUAGE = "python" + PARENT_ACTION = FormatFilesAction diff --git a/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_action.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_action.py new file mode 100644 index 00000000..1fd5d253 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_action.py @@ -0,0 +1,41 @@ +# docs: docs/reference/actions.md +import dataclasses +import enum + +from finecode_extension_api import code_action +from finecode_extension_api.actions.code_quality import lint_files_action +from finecode_extension_api.resource_uri import ResourceUri + + +class LintTarget(enum.StrEnum): + PROJECT = "project" + FILES = "files" + + +@dataclasses.dataclass +class LintRunPayload(code_action.RunActionPayload): + target: LintTarget = LintTarget.PROJECT + """Scope of linting: 'project' (default) lints the whole project, 'files' lints only file_paths.""" + file_paths: list[ResourceUri] = dataclasses.field(default_factory=list) + """Files to lint (``file://`` URIs). Only used when target is 'files'.""" + + +@dataclasses.dataclass +class LintRunResult(lint_files_action.LintFilesRunResult): ... + + +class LintRunContext( + code_action.RunActionWithPartialResultsContext[LintRunPayload] +): ... + + +class LintAction(code_action.Action[LintRunPayload, LintRunContext, LintRunResult]): + """Run linters on a project or specific files and report diagnostics.""" + + PAYLOAD_TYPE = LintRunPayload + RUN_CONTEXT_TYPE = LintRunContext + RESULT_TYPE = LintRunResult + + +# reexport +LintMessage = lint_files_action.LintMessage diff --git a/finecode_extension_api/src/finecode_extension_api/actions/lint_files.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_files_action.py similarity index 73% rename from finecode_extension_api/src/finecode_extension_api/actions/lint_files.py rename to finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_files_action.py index a357e16f..8f76a349 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/lint_files.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_files_action.py @@ -1,13 +1,32 @@ +# docs: docs/reference/actions.md import collections.abc import dataclasses import enum -from pathlib import Path from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class Position: + """A position in a text document. + + Both ``line`` and ``character`` are **0-based**, matching the LSP specification: + - ``line``: 0-based line index (line 0 = first line of the file). + - ``character``: 0-based UTF-16 code unit offset within the line. + + Extension authors note: most CLI linters (ruff, mypy, flake8) report 1-based line + numbers in their output. You must subtract 1 when building a ``Position`` from such + output:: + + # ruff JSON: location["row"] is 1-based + Position(line=location["row"] - 1, character=location["column"]) + + Extensions that receive diagnostics from an embedded LSP server (via + ``map_diagnostics_to_lint_messages``) get 0-based values directly from the LSP + protocol — do NOT subtract 1 in that case. + """ + line: int character: int @@ -38,16 +57,16 @@ class LintMessage: @dataclasses.dataclass class LintFilesRunPayload( - code_action.RunActionPayload, collections.abc.AsyncIterable[Path] + code_action.RunActionPayload, collections.abc.AsyncIterable[ResourceUri] ): - file_paths: list[Path] + file_paths: list[ResourceUri] - def __aiter__(self) -> collections.abc.AsyncIterator[Path]: + def __aiter__(self) -> collections.abc.AsyncIterator[ResourceUri]: return LintFilesRunPayloadIterator(self) @dataclasses.dataclass -class LintFilesRunPayloadIterator(collections.abc.AsyncIterator[Path]): +class LintFilesRunPayloadIterator(collections.abc.AsyncIterator[ResourceUri]): def __init__(self, lint_files_run_payload: LintFilesRunPayload): self.lint_files_run_payload = lint_files_run_payload self.current_file_path_index = 0 @@ -55,7 +74,7 @@ def __init__(self, lint_files_run_payload: LintFilesRunPayload): def __aiter__(self): return self - async def __anext__(self) -> Path: + async def __anext__(self) -> ResourceUri: if len(self.lint_files_run_payload.file_paths) <= self.current_file_path_index: raise StopAsyncIteration() self.current_file_path_index += 1 @@ -66,11 +85,7 @@ async def __anext__(self) -> Path: class LintFilesRunResult(code_action.RunActionResult): # messages is a dict to support messages for multiple files because it could be the # case that linter checks given file and its dependencies. - # - # dict key should be Path, but pygls fails to handle slashes in dict keys, use - # strings with posix representation of path instead until the problem is properly - # solved - messages: dict[str, list[LintMessage]] + messages: dict[ResourceUri, list[LintMessage]] def update(self, other: code_action.RunActionResult) -> None: if not isinstance(other, LintFilesRunResult): @@ -91,8 +106,8 @@ def to_text(self) -> str | textstyler.StyledText: if message.source is not None: source_str = f" ({message.source})" text.append_styled(file_path_str, bold=True) - text.append(f":{message.range.start.line}") - text.append(f":{message.range.start.character}: ") + text.append(f":{message.range.start.line + 1}") + text.append(f":{message.range.start.character + 1}: ") if message.code is not None: text.append_styled( message.code, foreground=textstyler.Color.RED @@ -124,6 +139,8 @@ class LintFilesAction( LintFilesRunResult, ] ): + """Run linters on specific files and report diagnostics. Internal action dispatched by lint.""" + PAYLOAD_TYPE = LintFilesRunPayload RUN_CONTEXT_TYPE = LintFilesRunContext RESULT_TYPE = LintFilesRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_python_files_action.py b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_python_files_action.py new file mode 100644 index 00000000..83a66900 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/code_quality/lint_python_files_action.py @@ -0,0 +1,23 @@ +from finecode_extension_api import code_action +from finecode_extension_api.actions.code_quality.lint_files_action import ( + LintFilesAction, + LintFilesRunContext, + LintFilesRunPayload, + LintFilesRunResult, +) + + +class LintPythonFilesAction( + code_action.Action[ + LintFilesRunPayload, + LintFilesRunContext, + LintFilesRunResult, + ] +): + """Lint Python source files and report diagnostics.""" + + PAYLOAD_TYPE = LintFilesRunPayload + RUN_CONTEXT_TYPE = LintFilesRunContext + RESULT_TYPE = LintFilesRunResult + LANGUAGE = "python" + PARENT_ACTION = LintFilesAction diff --git a/src/finecode/services/__init__.py b/finecode_extension_api/src/finecode_extension_api/actions/environments/__init__.py similarity index 100% rename from src/finecode/services/__init__.py rename to finecode_extension_api/src/finecode_extension_api/actions/environments/__init__.py diff --git a/finecode_extension_api/src/finecode_extension_api/actions/environments/create_env_action.py b/finecode_extension_api/src/finecode_extension_api/actions/environments/create_env_action.py new file mode 100644 index 00000000..06b2b7c0 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/environments/create_env_action.py @@ -0,0 +1,26 @@ +import dataclasses + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments.create_envs_action import EnvInfo, CreateEnvsRunResult + + +@dataclasses.dataclass +class CreateEnvRunPayload(code_action.RunActionPayload): + env: EnvInfo + recreate: bool = False + + +class CreateEnvRunContext(code_action.RunActionContext[CreateEnvRunPayload]): + pass + + +class CreateEnvAction( + code_action.Action[ + CreateEnvRunPayload, CreateEnvRunContext, CreateEnvsRunResult + ] +): + """Create a single environment(without installing dependencies, only environment).""" + + PAYLOAD_TYPE = CreateEnvRunPayload + RUN_CONTEXT_TYPE = CreateEnvRunContext + RESULT_TYPE = CreateEnvsRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/environments/create_envs_action.py b/finecode_extension_api/src/finecode_extension_api/actions/environments/create_envs_action.py new file mode 100644 index 00000000..c084f43a --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/environments/create_envs_action.py @@ -0,0 +1,80 @@ +import dataclasses +import sys + +if sys.version_info >= (3, 12): + from typing import override +else: + from typing_extensions import override + +from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri + + +@dataclasses.dataclass +class EnvInfo: + name: str + venv_dir_path: ResourceUri + project_def_path: ResourceUri + + +@dataclasses.dataclass +class CreateEnvsRunPayload(code_action.RunActionPayload): + envs: list[EnvInfo] = dataclasses.field(default_factory=list) + """Explicit list of environments to create. Empty means handlers discover envs.""" + recreate: bool = False + """Remove and recreate existing environments from scratch even if they are already valid.""" + + +class CreateEnvsRunContext(code_action.RunActionContext[CreateEnvsRunPayload]): + def __init__( + self, + run_id: int, + initial_payload: CreateEnvsRunPayload, + meta: code_action.RunActionMeta, + info_provider: code_action.RunContextInfoProvider, + ) -> None: + super().__init__( + run_id=run_id, + initial_payload=initial_payload, + meta=meta, + info_provider=info_provider, + ) + + self.envs: list[EnvInfo] | None = None + + async def init(self) -> None: + if self.initial_payload.envs: + self.envs = list(self.initial_payload.envs) + + +@dataclasses.dataclass +class CreateEnvsRunResult(code_action.RunActionResult): + errors: list[str] + + @override + def update(self, other: code_action.RunActionResult) -> None: + if not isinstance(other, CreateEnvsRunResult): + return + self.errors += other.errors + + def to_text(self) -> str | textstyler.StyledText: + return "\n".join(self.errors) + + @property + def return_code(self) -> code_action.RunReturnCode: + if len(self.errors) == 0: + return code_action.RunReturnCode.SUCCESS + else: + return code_action.RunReturnCode.ERROR + + +class CreateEnvsAction( + code_action.Action[ + CreateEnvsRunPayload, CreateEnvsRunContext, CreateEnvsRunResult + ] +): + """Create environments for the workspace(without installing dependencies, only environment).""" + + PAYLOAD_TYPE = CreateEnvsRunPayload + RUN_CONTEXT_TYPE = CreateEnvsRunContext + RESULT_TYPE = CreateEnvsRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/install_deps_in_env.py b/finecode_extension_api/src/finecode_extension_api/actions/environments/install_deps_in_env_action.py similarity index 89% rename from finecode_extension_api/src/finecode_extension_api/actions/install_deps_in_env.py rename to finecode_extension_api/src/finecode_extension_api/actions/environments/install_deps_in_env_action.py index 02627d13..9a2586cd 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/install_deps_in_env.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/environments/install_deps_in_env_action.py @@ -1,5 +1,5 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib import sys if sys.version_info >= (3, 12): @@ -8,6 +8,7 @@ from typing_extensions import override from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass @@ -20,8 +21,8 @@ class Dependency: @dataclasses.dataclass class InstallDepsInEnvRunPayload(code_action.RunActionPayload): env_name: str - venv_dir_path: pathlib.Path - project_dir_path: pathlib.Path + venv_dir_path: ResourceUri + project_dir_path: ResourceUri dependencies: list[Dependency] @@ -65,6 +66,8 @@ def return_code(self) -> code_action.RunReturnCode: class InstallDepsInEnvAction(code_action.Action): + """Install dependencies into an environment.""" + PAYLOAD_TYPE = InstallDepsInEnvRunPayload RUN_CONTEXT_TYPE = InstallDepsInEnvRunContext RESULT_TYPE = InstallDepsInEnvRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/environments/install_env_action.py b/finecode_extension_api/src/finecode_extension_api/actions/environments/install_env_action.py new file mode 100644 index 00000000..b597af9b --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/environments/install_env_action.py @@ -0,0 +1,47 @@ +import dataclasses +import typing + +from finecode_extension_api import code_action +from finecode_extension_api.actions.environments.create_envs_action import EnvInfo +from finecode_extension_api.actions.environments.install_envs_action import ( + InstallEnvsRunResult, +) + + +@dataclasses.dataclass +class InstallEnvRunPayload(code_action.RunActionPayload): + env: EnvInfo + + +class InstallEnvRunContext( + code_action.RunActionContext[InstallEnvRunPayload] +): + def __init__( + self, + run_id: int, + initial_payload: InstallEnvRunPayload, + meta: code_action.RunActionMeta, + info_provider: code_action.RunContextInfoProvider, + ) -> None: + super().__init__( + run_id=run_id, + initial_payload=initial_payload, + meta=meta, + info_provider=info_provider, + ) + + self.project_def: dict[str, typing.Any] | None = None + + +class InstallEnvAction( + code_action.Action[ + InstallEnvRunPayload, + InstallEnvRunContext, + InstallEnvsRunResult, + ] +): + """Install dependencies into environment.""" + + PAYLOAD_TYPE = InstallEnvRunPayload + RUN_CONTEXT_TYPE = InstallEnvRunContext + RESULT_TYPE = InstallEnvsRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/environments/install_envs_action.py b/finecode_extension_api/src/finecode_extension_api/actions/environments/install_envs_action.py new file mode 100644 index 00000000..cde9db98 --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/actions/environments/install_envs_action.py @@ -0,0 +1,87 @@ +import dataclasses +import sys +import typing + +if sys.version_info >= (3, 12): + from typing import override +else: + from typing_extensions import override + +from finecode_extension_api import code_action, textstyler +from finecode_extension_api.actions.environments.create_envs_action import EnvInfo +from finecode_extension_api.resource_uri import ResourceUri + + +@dataclasses.dataclass +class InstallEnvsRunPayload(code_action.RunActionPayload): + envs: list[EnvInfo] = dataclasses.field(default_factory=list) + """Explicit list of environments to install dependencies in. Empty means handlers discover envs at run time.""" + env_names: list[str] | None = None + """Filter: when set, only in environments whose name is in this list dependencies will be installed. Applied during discovery only.""" + + + +class InstallEnvsRunContext( + code_action.RunActionContext[InstallEnvsRunPayload] +): + def __init__( + self, + run_id: int, + initial_payload: InstallEnvsRunPayload, + meta: code_action.RunActionMeta, + info_provider: code_action.RunContextInfoProvider, + ) -> None: + super().__init__( + run_id=run_id, + initial_payload=initial_payload, + meta=meta, + info_provider=info_provider, + ) + + self.envs: list[EnvInfo] | None = None + self.project_def_path_by_venv_dir_path: dict[ResourceUri, ResourceUri] = {} + self.project_def_by_venv_dir_path: dict[ + ResourceUri, dict[str, typing.Any] + ] = {} + + async def init(self) -> None: + self.envs = list(self.initial_payload.envs) + for env_info in self.initial_payload.envs: + self.project_def_path_by_venv_dir_path[env_info.venv_dir_path] = ( + env_info.project_def_path + ) + + +@dataclasses.dataclass +class InstallEnvsRunResult(code_action.RunActionResult): + errors: list[str] + + @override + def update(self, other: code_action.RunActionResult) -> None: + if not isinstance(other, InstallEnvsRunResult): + return + self.errors += other.errors + + def to_text(self) -> str | textstyler.StyledText: + return "\n".join(self.errors) + + @property + def return_code(self) -> code_action.RunReturnCode: + if len(self.errors) == 0: + return code_action.RunReturnCode.SUCCESS + else: + return code_action.RunReturnCode.ERROR + + +class InstallEnvsAction( + code_action.Action[ + InstallEnvsRunPayload, + InstallEnvsRunContext, + InstallEnvsRunResult, + ] +): + """Install dependencies into all environments.""" + + PAYLOAD_TYPE = InstallEnvsRunPayload + RUN_CONTEXT_TYPE = InstallEnvsRunContext + RESULT_TYPE = InstallEnvsRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/lint.py b/finecode_extension_api/src/finecode_extension_api/actions/lint.py deleted file mode 100644 index 9b52a993..00000000 --- a/finecode_extension_api/src/finecode_extension_api/actions/lint.py +++ /dev/null @@ -1,37 +0,0 @@ -import dataclasses -import enum -from pathlib import Path - -from finecode_extension_api import code_action -from finecode_extension_api.actions import lint_files - - -class LintTarget(enum.StrEnum): - PROJECT = "project" - FILES = "files" - - -@dataclasses.dataclass -class LintRunPayload(code_action.RunActionPayload): - target: LintTarget = LintTarget.PROJECT - # optional, expected only with `target == LintTarget.FILES` - file_paths: list[Path] = dataclasses.field(default_factory=list) - - -@dataclasses.dataclass -class LintRunResult(lint_files.LintFilesRunResult): ... - - -class LintRunContext( - code_action.RunActionWithPartialResultsContext[LintRunPayload] -): ... - - -class LintAction(code_action.Action[LintRunPayload, LintRunContext, LintRunResult]): - PAYLOAD_TYPE = LintRunPayload - RUN_CONTEXT_TYPE = LintRunContext - RESULT_TYPE = LintRunResult - - -# reexport -LintMessage = lint_files.LintMessage diff --git a/finecode_extension_api/src/finecode_extension_api/actions/prepare_envs.py b/finecode_extension_api/src/finecode_extension_api/actions/prepare_envs.py deleted file mode 100644 index 6c2452e0..00000000 --- a/finecode_extension_api/src/finecode_extension_api/actions/prepare_envs.py +++ /dev/null @@ -1,97 +0,0 @@ -import dataclasses -import pathlib -import sys -import typing - -if sys.version_info >= (3, 12): - from typing import override -else: - from typing_extensions import override - -from finecode_extension_api import code_action, textstyler - - -@dataclasses.dataclass -class EnvInfo: - name: str - venv_dir_path: pathlib.Path - project_def_path: pathlib.Path - - -@dataclasses.dataclass -class PrepareEnvsRunPayload(code_action.RunActionPayload): - envs: list[EnvInfo] - # remove old env and create a new one from scratch even if the current one is valid. - # Useful for example if you changed something in venv manually and want to revert - # changes (just by running prepare it would be not solved because version of the - # packages are the same and they are already installed) - recreate: bool = False - - -class PrepareEnvsRunContext(code_action.RunActionContext[PrepareEnvsRunPayload]): - def __init__( - self, - run_id: int, - initial_payload: PrepareEnvsRunPayload, - meta: code_action.RunActionMeta, - info_provider: code_action.RunContextInfoProvider, - ) -> None: - super().__init__( - run_id=run_id, - initial_payload=initial_payload, - meta=meta, - info_provider=info_provider, - ) - - # project def pathes are stored also in context, because prepare envs can run - # tools like pip which expected 'normalized' project definition(=without - # additional features which finecode provides). So the usual workflow looks like - # normalizing(dumping) configuration first and then use dumped config for - # further handlers. - self.project_def_path_by_venv_dir_path: dict[pathlib.Path, pathlib.Path] = {} - # to avoid multiple writing and reading files in each action handler, save - # modified project definition here. It also can be used as extension point if - # for example additional dependencies should be installed by adding handler - # which inserts them into project definition instead of modying `install_deps` - # handler - self.project_def_by_venv_dir_path: dict[ - pathlib.Path, dict[str, typing.Any] - ] = {} - - async def init(self) -> None: - for env_info in self.initial_payload.envs: - self.project_def_path_by_venv_dir_path[env_info.venv_dir_path] = ( - env_info.project_def_path - ) - - -@dataclasses.dataclass -class PrepareEnvsRunResult(code_action.RunActionResult): - # `PrepareEnvs` action is general, so make result general as well - errors: list[str] - - @override - def update(self, other: code_action.RunActionResult) -> None: - if not isinstance(other, PrepareEnvsRunResult): - return - self.errors += other.errors - - def to_text(self) -> str | textstyler.StyledText: - return "\n".join(self.errors) - - @property - def return_code(self) -> code_action.RunReturnCode: - if len(self.errors) == 0: - return code_action.RunReturnCode.SUCCESS - else: - return code_action.RunReturnCode.ERROR - - -class PrepareEnvsAction( - code_action.Action[ - PrepareEnvsRunPayload, PrepareEnvsRunContext, PrepareEnvsRunResult - ] -): - PAYLOAD_TYPE = PrepareEnvsRunPayload - RUN_CONTEXT_TYPE = PrepareEnvsRunContext - RESULT_TYPE = PrepareEnvsRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/prepare_runners.py b/finecode_extension_api/src/finecode_extension_api/actions/prepare_runners.py deleted file mode 100644 index 6b575ada..00000000 --- a/finecode_extension_api/src/finecode_extension_api/actions/prepare_runners.py +++ /dev/null @@ -1,97 +0,0 @@ -import dataclasses -import pathlib -import sys -import typing - -if sys.version_info >= (3, 12): - from typing import override -else: - from typing_extensions import override - -from finecode_extension_api import code_action, textstyler - - -@dataclasses.dataclass -class EnvInfo: - name: str - venv_dir_path: pathlib.Path - project_def_path: pathlib.Path - - -@dataclasses.dataclass -class PrepareRunnersRunPayload(code_action.RunActionPayload): - envs: list[EnvInfo] - # remove old env and create a new one from scratch even if the current one is valid. - # Useful for example if you changed something in venv manually and want to revert - # changes (just by running prepare it would be not solved because version of the - # packages are the same and they are already installed) - recreate: bool = False - - -class PrepareRunnersRunContext(code_action.RunActionContext[PrepareRunnersRunPayload]): - def __init__( - self, - run_id: int, - initial_payload: PrepareRunnersRunPayload, - meta: code_action.RunActionMeta, - info_provider: code_action.RunContextInfoProvider, - ) -> None: - super().__init__( - run_id=run_id, - initial_payload=initial_payload, - meta=meta, - info_provider=info_provider, - ) - - # project def pathes are stored also in context, because prepare envs can run - # tools like pip which expected 'normalized' project definition(=without - # additional features which finecode provides). So the usual workflow looks like - # normalizing(dumping) configuration first and then use dumped config for - # further handlers. - self.project_def_path_by_venv_dir_path: dict[pathlib.Path, pathlib.Path] = {} - # to avoid multiple writing and reading files in each action handler, save - # modified project definition here. It also can be used as extension point if - # for example additional dependencies should be installed by adding handler - # which inserts them into project definition instead of modying `install_deps` - # handler - self.project_def_by_venv_dir_path: dict[ - pathlib.Path, dict[str, typing.Any] - ] = {} - - async def init(self) -> None: - for env_info in self.initial_payload.envs: - self.project_def_path_by_venv_dir_path[env_info.venv_dir_path] = ( - env_info.project_def_path - ) - - -@dataclasses.dataclass -class PrepareRunnersRunResult(code_action.RunActionResult): - # `PrepareRunners` action is general, so make result general as well - errors: list[str] - - @override - def update(self, other: code_action.RunActionResult) -> None: - if not isinstance(other, PrepareRunnersRunResult): - return - self.errors += other.errors - - def to_text(self) -> str | textstyler.StyledText: - return "\n".join(self.errors) - - @property - def return_code(self) -> code_action.RunReturnCode: - if len(self.errors) == 0: - return code_action.RunReturnCode.SUCCESS - else: - return code_action.RunReturnCode.ERROR - - -class PrepareRunnersAction( - code_action.Action[ - PrepareRunnersRunPayload, PrepareRunnersRunContext, PrepareRunnersRunResult - ] -): - PAYLOAD_TYPE = PrepareRunnersRunPayload - RUN_CONTEXT_TYPE = PrepareRunnersRunContext - RESULT_TYPE = PrepareRunnersRunResult diff --git a/src/finecode/utils/__init__.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/__init__.py similarity index 100% rename from src/finecode/utils/__init__.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/__init__.py diff --git a/finecode_extension_api/src/finecode_extension_api/actions/get_dist_artifact_version.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/get_dist_artifact_version_action.py similarity index 85% rename from finecode_extension_api/src/finecode_extension_api/actions/get_dist_artifact_version.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/get_dist_artifact_version_action.py index c08d321f..82d9a83e 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/get_dist_artifact_version.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/publishing/get_dist_artifact_version_action.py @@ -1,12 +1,13 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class GetDistArtifactVersionRunPayload(code_action.RunActionPayload): - dist_artifact_path: pathlib.Path + dist_artifact_path: ResourceUri class GetDistArtifactVersionRunContext( @@ -39,6 +40,8 @@ class GetDistArtifactVersionAction( GetDistArtifactVersionRunResult, ] ): + """Read the version from a distribution artifact.""" + PAYLOAD_TYPE = GetDistArtifactVersionRunPayload RUN_CONTEXT_TYPE = GetDistArtifactVersionRunContext RESULT_TYPE = GetDistArtifactVersionRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/init_repository_provider.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/init_repository_provider_action.py similarity index 86% rename from finecode_extension_api/src/finecode_extension_api/actions/init_repository_provider.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/init_repository_provider_action.py index e65216bf..6e22efd4 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/init_repository_provider.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/publishing/init_repository_provider_action.py @@ -1,3 +1,4 @@ +# docs: docs/reference/actions.md import dataclasses from finecode_extension_api import code_action, textstyler @@ -29,7 +30,9 @@ def update(self, other: code_action.RunActionResult) -> None: def to_text(self) -> str | textstyler.StyledText: if self.initialized_repositories: - return f"Initialized repositories: {', '.join(self.initialized_repositories)}" + return ( + f"Initialized repositories: {', '.join(self.initialized_repositories)}" + ) return "No repositories initialized" @property @@ -44,6 +47,8 @@ class InitRepositoryProviderAction( InitRepositoryProviderRunResult, ] ): + """Initialize repository credentials for package registries.""" + PAYLOAD_TYPE = InitRepositoryProviderRunPayload RUN_CONTEXT_TYPE = InitRepositoryProviderRunContext RESULT_TYPE = InitRepositoryProviderRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/is_artifact_published_to_registry.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/is_artifact_published_to_registry_action.py similarity index 85% rename from finecode_extension_api/src/finecode_extension_api/actions/is_artifact_published_to_registry.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/is_artifact_published_to_registry_action.py index f07faa17..f612b0fe 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/is_artifact_published_to_registry.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/publishing/is_artifact_published_to_registry_action.py @@ -1,13 +1,14 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class IsArtifactPublishedToRegistryRunPayload(code_action.RunActionPayload): - src_artifact_def_path: pathlib.Path - dist_artifact_paths: list[pathlib.Path] + src_artifact_def_path: ResourceUri + dist_artifact_paths: list[ResourceUri] version: str registry_name: str @@ -19,7 +20,7 @@ class IsArtifactPublishedToRegistryRunContext( @dataclasses.dataclass class IsArtifactPublishedToRegistryRunResult(code_action.RunActionResult): - is_published_by_dist_path: dict[pathlib.Path, bool] + is_published_by_dist_path: dict[ResourceUri, bool] def update(self, other: code_action.RunActionResult) -> None: if not isinstance(other, IsArtifactPublishedToRegistryRunResult): @@ -53,6 +54,8 @@ class IsArtifactPublishedToRegistryAction( IsArtifactPublishedToRegistryRunResult, ] ): + """Check whether artifact distributions are already published to a registry.""" + PAYLOAD_TYPE = IsArtifactPublishedToRegistryRunPayload RUN_CONTEXT_TYPE = IsArtifactPublishedToRegistryRunContext RESULT_TYPE = IsArtifactPublishedToRegistryRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/publish_artifact.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/publish_artifact_action.py similarity index 85% rename from finecode_extension_api/src/finecode_extension_api/actions/publish_artifact.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/publish_artifact_action.py index 8aa9deee..31ebc16b 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/publish_artifact.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/publishing/publish_artifact_action.py @@ -1,13 +1,14 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class PublishArtifactRunPayload(code_action.RunActionPayload): - src_artifact_def_path: pathlib.Path - dist_artifact_paths: list[pathlib.Path] + src_artifact_def_path: ResourceUri + dist_artifact_paths: list[ResourceUri] force: bool = False @@ -47,6 +48,8 @@ class PublishArtifactAction( PublishArtifactRunResult, ] ): + """Publish a distribution artifact to all configured registries.""" + PAYLOAD_TYPE = PublishArtifactRunPayload RUN_CONTEXT_TYPE = PublishArtifactRunContext RESULT_TYPE = PublishArtifactRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/publish_artifact_to_registry.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/publish_artifact_to_registry_action.py similarity index 81% rename from finecode_extension_api/src/finecode_extension_api/actions/publish_artifact_to_registry.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/publish_artifact_to_registry_action.py index 3f611c07..db02ad98 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/publish_artifact_to_registry.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/publishing/publish_artifact_to_registry_action.py @@ -1,13 +1,14 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class PublishArtifactToRegistryRunPayload(code_action.RunActionPayload): - src_artifact_def_path: pathlib.Path - dist_artifact_paths: list[pathlib.Path] + src_artifact_def_path: ResourceUri + dist_artifact_paths: list[ResourceUri] registry_name: str force: bool = False @@ -38,6 +39,8 @@ class PublishArtifactToRegistryAction( PublishArtifactToRegistryRunResult, ] ): + """Publish a distribution artifact to a specific registry.""" + PAYLOAD_TYPE = PublishArtifactToRegistryRunPayload RUN_CONTEXT_TYPE = PublishArtifactToRegistryRunContext RESULT_TYPE = PublishArtifactToRegistryRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/verify_artifact_published_to_registry.py b/finecode_extension_api/src/finecode_extension_api/actions/publishing/verify_artifact_published_to_registry_action.py similarity index 79% rename from finecode_extension_api/src/finecode_extension_api/actions/verify_artifact_published_to_registry.py rename to finecode_extension_api/src/finecode_extension_api/actions/publishing/verify_artifact_published_to_registry_action.py index 6816a4f8..c54f89e3 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/verify_artifact_published_to_registry.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/publishing/verify_artifact_published_to_registry_action.py @@ -1,12 +1,13 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass class VerifyArtifactPublishedToRegistryRunPayload(code_action.RunActionPayload): - dist_artifact_paths: list[pathlib.Path] + dist_artifact_paths: list[ResourceUri] registry_name: str version: str @@ -27,8 +28,9 @@ def update(self, other: code_action.RunActionResult) -> None: def to_text(self) -> str | textstyler.StyledText: if self.errors: - return f"Verification failed with {len(self.errors)} error(s):\n" + "\n".join( - f" - {e}" for e in self.errors + return ( + f"Verification failed with {len(self.errors)} error(s):\n" + + "\n".join(f" - {e}" for e in self.errors) ) return "Verification successful" @@ -46,6 +48,8 @@ class VerifyArtifactPublishedToRegistryAction( VerifyArtifactPublishedToRegistryRunResult, ] ): + """Verify that artifact distributions are available in a registry.""" + PAYLOAD_TYPE = VerifyArtifactPublishedToRegistryRunPayload RUN_CONTEXT_TYPE = VerifyArtifactPublishedToRegistryRunContext RESULT_TYPE = VerifyArtifactPublishedToRegistryRunResult diff --git a/tests/extension_runner/__init__.py b/finecode_extension_api/src/finecode_extension_api/actions/system/__init__.py similarity index 100% rename from tests/extension_runner/__init__.py rename to finecode_extension_api/src/finecode_extension_api/actions/system/__init__.py diff --git a/finecode_extension_api/src/finecode_extension_api/actions/clean_finecode_logs.py b/finecode_extension_api/src/finecode_extension_api/actions/system/clean_finecode_logs_action.py similarity index 94% rename from finecode_extension_api/src/finecode_extension_api/actions/clean_finecode_logs.py rename to finecode_extension_api/src/finecode_extension_api/actions/system/clean_finecode_logs_action.py index 297390f2..ae34d4df 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/clean_finecode_logs.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/system/clean_finecode_logs_action.py @@ -1,3 +1,4 @@ +# docs: docs/reference/actions.md import dataclasses from finecode_extension_api import code_action, textstyler @@ -39,6 +40,8 @@ class CleanFinecodeLogsAction( CleanFinecodeLogsRunResult, ] ): + """Remove FineCode log files.""" + PAYLOAD_TYPE = CleanFinecodeLogsRunPayload RUN_CONTEXT_TYPE = CleanFinecodeLogsRunContext RESULT_TYPE = CleanFinecodeLogsRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/actions/dump_config.py b/finecode_extension_api/src/finecode_extension_api/actions/system/dump_config_action.py similarity index 89% rename from finecode_extension_api/src/finecode_extension_api/actions/dump_config.py rename to finecode_extension_api/src/finecode_extension_api/actions/system/dump_config_action.py index 519eb6f3..3bd47455 100644 --- a/finecode_extension_api/src/finecode_extension_api/actions/dump_config.py +++ b/finecode_extension_api/src/finecode_extension_api/actions/system/dump_config_action.py @@ -1,5 +1,5 @@ +# docs: docs/reference/actions.md import dataclasses -import pathlib import pprint import sys import typing @@ -10,6 +10,7 @@ from typing_extensions import override from finecode_extension_api import code_action, textstyler +from finecode_extension_api.resource_uri import ResourceUri @dataclasses.dataclass @@ -17,9 +18,9 @@ class DumpConfigRunPayload(code_action.RunActionPayload): # `source_file_path` is not for reading, config is already read and its content is # in `project_raw_config`, but for providing config path to allow for example to # resolve relative pathes in project config - source_file_path: pathlib.Path + source_file_path: ResourceUri project_raw_config: dict[str, typing.Any] - target_file_path: pathlib.Path + target_file_path: ResourceUri class DumpConfigRunContext(code_action.RunActionContext[DumpConfigRunPayload]): @@ -62,6 +63,8 @@ def to_text(self) -> str | textstyler.StyledText: class DumpConfigAction( code_action.Action[DumpConfigRunPayload, DumpConfigRunContext, DumpConfigRunResult] ): + """Resolve and dump the merged project configuration.""" + PAYLOAD_TYPE = DumpConfigRunPayload RUN_CONTEXT_TYPE = DumpConfigRunContext RESULT_TYPE = DumpConfigRunResult diff --git a/finecode_extension_api/src/finecode_extension_api/code_action.py b/finecode_extension_api/src/finecode_extension_api/code_action.py index cc2b1789..ba6cc61a 100644 --- a/finecode_extension_api/src/finecode_extension_api/code_action.py +++ b/finecode_extension_api/src/finecode_extension_api/code_action.py @@ -1,3 +1,4 @@ +# docs: docs/concepts.md, docs/guides/creating-extension.md from __future__ import annotations import collections.abc @@ -5,7 +6,7 @@ import dataclasses import enum import typing -from typing import Generic, Protocol, TypeVar +from typing import ClassVar, Generic, Protocol, TypeVar from finecode_extension_api import partialresultscheduler, textstyler @@ -29,7 +30,7 @@ class DevEnv(enum.StrEnum): CLI = "cli" AI = "ai" PRECOMMIT = "precommit" - CI_CD = "cicd" + CI = "ci" @dataclasses.dataclass @@ -87,6 +88,20 @@ def update(self, result: RunActionResult) -> None: self._current_result.update(result) +class PartialResultSender(typing.Protocol): + """Handler-facing interface for sending partial results to the client.""" + + async def send(self, result: RunActionResult) -> None: ... + + +class _NoOpPartialResultSender: + async def send(self, result: RunActionResult) -> None: + pass + + +_NOOP_SENDER = _NoOpPartialResultSender() + + class RunActionContext(typing.Generic[RunPayloadType]): # data object to save data between action steps(only during one run, after run data # is removed). Keep it simple, without business logic, just data storage, but you @@ -100,12 +115,14 @@ def __init__( initial_payload: RunPayloadType, meta: RunActionMeta, info_provider: RunContextInfoProvider, + partial_result_sender: PartialResultSender = _NOOP_SENDER, ) -> None: self.run_id = run_id self.initial_payload = initial_payload self.meta = meta self.exit_stack = contextlib.AsyncExitStack() self._info_provider = info_provider + self.partial_result_sender = partial_result_sender @property def current_result(self) -> RunActionResult | None: @@ -147,12 +164,14 @@ def __init__( initial_payload: RunPayloadType, meta: RunActionMeta, info_provider: RunContextInfoProvider, + partial_result_sender: PartialResultSender = _NOOP_SENDER, ) -> None: super().__init__( run_id=run_id, initial_payload=initial_payload, meta=meta, info_provider=info_provider, + partial_result_sender=partial_result_sender, ) self.partial_result_scheduler = partialresultscheduler.PartialResultScheduler() @@ -167,6 +186,8 @@ class Action(Generic[RunPayloadType, RunContextType, RunResultType]): RUN_CONTEXT_TYPE: type[RunActionContext[RunPayloadType]] = RunActionContext RESULT_TYPE: type[RunActionResult] = RunActionResult CONFIG_TYPE: type[ActionConfig] = ActionConfig + LANGUAGE: ClassVar[str | None] = None + PARENT_ACTION: ClassVar[type[Action] | None] = None class StopActionRunWithResult(Exception): diff --git a/finecode_extension_api/src/finecode_extension_api/contrib/lsp_service.py b/finecode_extension_api/src/finecode_extension_api/contrib/lsp_service.py index 8fda6d40..d5cd5bc0 100644 --- a/finecode_extension_api/src/finecode_extension_api/contrib/lsp_service.py +++ b/finecode_extension_api/src/finecode_extension_api/contrib/lsp_service.py @@ -6,8 +6,8 @@ from typing import Any, override from finecode_extension_api import service -from finecode_extension_api.actions import lint_files as lint_files_action -from finecode_extension_api.interfaces import ifileeditor, ilspclient, ilogger +from finecode_extension_api.actions.code_quality import lint_files_action +from finecode_extension_api.interfaces import ifileeditor, ilogger, ilspclient class LspService(service.DisposableService): @@ -116,7 +116,9 @@ async def start( cmd=self._cmd, root_uri=root_uri, workspace_folders=[{"uri": root_uri, "name": root_uri}], - initialization_options={"settings": self._settings} if self._settings else None, + initialization_options={"settings": self._settings} + if self._settings + else None, readable_id=self._readable_id, ) await session.__aenter__() @@ -214,9 +216,7 @@ async def check_file( was_set = await asyncio.to_thread(event.wait, timeout) if not was_set: - self._logger.warning( - f"Timeout waiting for LSP diagnostics for {file_path}" - ) + self._logger.warning(f"Timeout waiting for LSP diagnostics for {file_path}") elif not self._diagnostics_data.get(uri): # Got empty initial diagnostics; some servers (e.g. pyrefly) send # an empty ack first, then the real diagnostics after analysis. @@ -235,6 +235,65 @@ async def check_file( return self._diagnostics_data.get(uri, []) + async def format_file( + self, + file_path: Path, + content: str, + options: dict[str, Any] | None = None, + timeout: float = 30.0, + ) -> list[dict[str, Any]]: + """Format a file and return raw LSP TextEdits. + + ``content`` is the file text to format — callers provide it explicitly + so that the LSP server sees the same content the caller is working with + (e.g. from the run context after a previous handler already modified it). + """ + assert self._session is not None, "LspService not started" + + uri = file_path.as_uri() + + lsp_version = self._next_version(uri) + if uri not in self._open_documents: + await self._session.send_notification( + "textDocument/didOpen", + { + "textDocument": { + "uri": uri, + "languageId": self._language_id, + "version": lsp_version, + "text": content, + }, + }, + ) + self._open_documents.add(uri) + else: + await self._session.send_notification( + "textDocument/didChange", + { + "textDocument": {"uri": uri, "version": lsp_version}, + "contentChanges": [{"text": content}], + }, + ) + + formatting_options = options or {"tabSize": 4, "insertSpaces": True} + result = await self._session.send_request( + "textDocument/formatting", + { + "textDocument": {"uri": uri}, + "options": formatting_options, + }, + timeout=timeout, + ) + + if file_path not in self._file_editor.get_opened_files(): + await self._session.send_notification( + "textDocument/didClose", + {"textDocument": {"uri": uri}}, + ) + self._open_documents.discard(uri) + + return result or [] + async def _run_event_loop(self, ready: asyncio.Event) -> None: async with self._file_editor.session( author=self._file_operation_author @@ -407,3 +466,37 @@ def map_diagnostics_to_lint_messages( ) ) return messages + + +def apply_text_edits(content: str, edits: list[dict[str, Any]]) -> str: + """Apply LSP TextEdits to content and return the new text. + + Edits are applied in reverse order (bottom-to-top) so that earlier + offsets remain valid after each replacement. + """ + lines = content.split("\n") + + def offset_of(pos: dict[str, int]) -> int: + line = pos.get("line", 0) + char = pos.get("character", 0) + o = sum(len(lines[i]) + 1 for i in range(min(line, len(lines)))) + if line < len(lines): + o += min(char, len(lines[line])) + return o + + sorted_edits = sorted( + edits, + key=lambda e: ( + e["range"]["start"]["line"], + e["range"]["start"]["character"], + ), + reverse=True, + ) + + result = content + for edit in sorted_edits: + start = offset_of(edit["range"]["start"]) + end = offset_of(edit["range"]["end"]) + result = result[:start] + edit["newText"] + result[end:] + + return result diff --git a/finecode_extension_api/src/finecode_extension_api/interfaces/iactionrunner.py b/finecode_extension_api/src/finecode_extension_api/interfaces/iactionrunner.py index e5eb7977..2b699882 100644 --- a/finecode_extension_api/src/finecode_extension_api/interfaces/iactionrunner.py +++ b/finecode_extension_api/src/finecode_extension_api/interfaces/iactionrunner.py @@ -1,3 +1,4 @@ +import collections.abc import typing from finecode_extension_api import code_action, service @@ -15,19 +16,18 @@ class ActionDeclaration(typing.Generic[ActionT]): ... class IActionRunner(service.Service, typing.Protocol): - def get_actions_by_source( - self, source: str, expected_type: type[ActionT] - ) -> list[ActionDeclaration[ActionT]]: ... + def get_action_by_source( + self, action_type: type[ActionT] + ) -> ActionDeclaration[ActionT]: ... - def get_actions_for_language( - self, source: str, language: str, expected_type: type[ActionT] - ) -> list[ActionDeclaration[ActionT]]: ... + def get_actions_for_parent( + self, parent_action_type: type[ActionT] + ) -> dict[str, ActionDeclaration[ActionT]]: ... def get_action_by_name( - self, name: str, expected_type: type[ActionT] + self, name: str, action_type: type[ActionT] ) -> ActionDeclaration[ActionT]: - # use it only if you are sure you need it. In most cases get_actions_by_source - # should be preferred + """Prefer `get_action_by_source`""" ... async def run_action( @@ -37,6 +37,13 @@ async def run_action( meta: code_action.RunActionMeta, ) -> ResultT: ... + def run_action_iter( + self, + action: ActionDeclaration[code_action.Action[PayloadT, typing.Any, ResultT]], + payload: PayloadT, + meta: code_action.RunActionMeta, + ) -> collections.abc.AsyncIterator[ResultT]: ... + def get_actions_names(self) -> list[str]: ... diff --git a/finecode_extension_api/src/finecode_extension_api/resource_uri.py b/finecode_extension_api/src/finecode_extension_api/resource_uri.py new file mode 100644 index 00000000..021c9bde --- /dev/null +++ b/finecode_extension_api/src/finecode_extension_api/resource_uri.py @@ -0,0 +1,63 @@ +""" +ResourceUri — a semantic type for resource locations in action payloads and results. + +See ADR-0005 for the full rationale. In short: action boundary DTOs must not +use ``pathlib.Path``; they carry ``ResourceUri`` values instead. Local files +use ``file://`` URIs (RFC 8089). Future non-local resources may use other +schemes. + +Typical usage in a handler:: + + from finecode_extension_api.resource_uri import ( + ResourceUri, + path_to_resource_uri, + resource_uri_to_path, + ) + + # Path → ResourceUri (when populating a payload or result field) + uri = path_to_resource_uri(some_absolute_path) + + # ResourceUri → Path (when you need a local filesystem path) + local_path = resource_uri_to_path(uri) +""" +from __future__ import annotations + +import pathlib +import sys +from typing import NewType +from urllib.parse import unquote, urlparse + +ResourceUri = NewType("ResourceUri", str) +"""A URI string identifying a resource. Local files use the ``file://`` scheme.""" + + +def path_to_resource_uri(path: pathlib.Path) -> ResourceUri: + """Convert an absolute *path* to a ``file://`` :class:`ResourceUri`. + + The path **must** be absolute; call ``path.resolve()`` first if needed. + + >>> path_to_resource_uri(pathlib.Path("/home/user/foo.py")) + 'file:///home/user/foo.py' + """ + return ResourceUri(path.as_uri()) + + +def resource_uri_to_path(uri: ResourceUri) -> pathlib.Path: + """Convert a ``file://`` :class:`ResourceUri` back to a local :class:`~pathlib.Path`. + + Raises :class:`ValueError` if the URI scheme is not ``file``. + """ + parsed = urlparse(uri) + if parsed.scheme != "file": + raise ValueError(f"Cannot convert non-file URI to Path: {uri}") + decoded_path = unquote(parsed.path) + # On Windows, file:///C:/foo is parsed as path="/C:/foo" — strip the + # leading slash so pathlib recognises the drive letter. + if ( + sys.platform == "win32" + and len(decoded_path) >= 3 + and decoded_path[0] == "/" + and decoded_path[2] == ":" + ): + decoded_path = decoded_path[1:] + return pathlib.Path(decoded_path) diff --git a/finecode_extension_api/src/finecode_extension_api/service.py b/finecode_extension_api/src/finecode_extension_api/service.py index a8afaf18..4d761a32 100644 --- a/finecode_extension_api/src/finecode_extension_api/service.py +++ b/finecode_extension_api/src/finecode_extension_api/service.py @@ -1,3 +1,4 @@ +# docs: docs/concepts.md, docs/guides/creating-extension.md import sys import typing diff --git a/finecode_extension_runner/pyproject.toml b/finecode_extension_runner/pyproject.toml index 571ccd18..da1db613 100644 --- a/finecode_extension_runner/pyproject.toml +++ b/finecode_extension_runner/pyproject.toml @@ -36,6 +36,9 @@ finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = fine_python_recommended = { path = "../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../presets/fine_python_lint", editable = true } fine_python_format = { path = "../presets/fine_python_format", editable = true } +fine_python_test = { path = "../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../extensions/fine_python_virtualenv", editable = true } [build-system] requires = ["setuptools>=64", "setuptools-scm>=8"] @@ -63,7 +66,7 @@ version_file = "src/finecode_extension_runner/_version.py" root = ".." [tool.finecode.action.get_src_artifact_version] -source = "finecode_extension_api.actions.get_src_artifact_version.GetSrcArtifactVersionAction" +source = "finecode_extension_api.actions.GetSrcArtifactVersionAction" handlers_mode = "replace" handlers = [ { name = 'get_src_artifact_version_setuptools_scm', source = 'fine_python_setuptools_scm.GetSrcArtifactVersionSetuptoolsScmHandler', env = "dev_no_runtime", dependencies = [ diff --git a/finecode_extension_runner/src/finecode_extension_runner/_services/merge_results.py b/finecode_extension_runner/src/finecode_extension_runner/_services/merge_results.py new file mode 100644 index 00000000..1454dcd8 --- /dev/null +++ b/finecode_extension_runner/src/finecode_extension_runner/_services/merge_results.py @@ -0,0 +1,52 @@ +import dataclasses + +from loguru import logger +from pydantic.dataclasses import dataclass as pydantic_dataclass + +from finecode_extension_api import code_action +from finecode_extension_runner import global_state, run_utils + + +async def merge_results(action_name: str, results: list[dict]) -> dict: + """Merge multiple serialized action results into one using the action's result type. + + Each entry in ``results`` must be a dict produced by ``dataclasses.asdict()`` + of the action's ``RESULT_TYPE``. Merging is delegated to + ``RunActionResult.update()``, the same mechanism the runner uses when + combining results from multiple handlers within a single run. + """ + if global_state.runner_context is None: + raise ValueError("Extension runner is not initialized yet") + + # Prefer cached result_type to avoid re-importing the action module. + action_cache = global_state.runner_context.action_cache_by_name.get(action_name) + if action_cache is not None and action_cache.exec_info is not None: + result_type = action_cache.exec_info.result_type + else: + # Cold cache: action hasn't been run yet in this runner; import the type. + try: + action = global_state.runner_context.project.actions[action_name] + except KeyError: + raise ValueError(f"Action '{action_name}' not found") + action_type = run_utils.import_module_member_by_source_str(action.source) + result_type = action_type.RESULT_TYPE + + non_empty = [r for r in results if r] + if result_type is None or not non_empty: + return {} + + result_type_pydantic = pydantic_dataclass(result_type) + + merged: code_action.RunActionResult | None = None + for result_dict in non_empty: + typed = result_type_pydantic(**result_dict) + if merged is None: + merged = typed + else: + merged.update(typed) + + if merged is None: + return {} + + logger.trace(f"merge_results: merged {len(non_empty)} results for action '{action_name}'") + return dataclasses.asdict(merged) diff --git a/finecode_extension_runner/src/finecode_extension_runner/_services/run_action.py b/finecode_extension_runner/src/finecode_extension_runner/_services/run_action.py index 90b114bf..17e77e94 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/_services/run_action.py +++ b/finecode_extension_runner/src/finecode_extension_runner/_services/run_action.py @@ -40,6 +40,25 @@ def __init__(self, response: schemas.RunActionResponse) -> None: self.response = response +class _TrackingPartialResultSender: + """Wraps partial_result_sender.schedule_sending with state tracking.""" + + def __init__( + self, + token: int | str, + send_func: collections.abc.Callable[ + [int | str, code_action.RunActionResult], collections.abc.Awaitable[None] + ], + ) -> None: + self._token = token + self._send_func = send_func + self.has_sent = False + + async def send(self, result: code_action.RunActionResult) -> None: + self.has_sent = True + await self._send_func(self._token, result) + + def set_partial_result_sender(send_func: typing.Callable) -> None: global partial_result_sender partial_result_sender = partial_result_sender_module.PartialResultSender( @@ -60,6 +79,7 @@ async def run_action( meta: code_action.RunActionMeta, partial_result_token: int | str | None = None, run_id: int | None = None, + partial_result_queue: asyncio.Queue | None = None, ) -> code_action.RunActionResult | None: # design decisions: # - keep payload unchanged between all subaction runs. @@ -76,7 +96,7 @@ async def run_action( last_run_id += 1 logger.trace( - f"Run action '{action_def.name}', run id: {run_id}, partial result token: {partial_result_token}" + f"run_action: action='{action_def.name}', run_id={run_id}, partial_result_token={partial_result_token}" ) # TODO: check whether config is set: this will be solved by passing initial @@ -105,6 +125,13 @@ async def run_action( run_context: code_action.RunActionContext | AsyncPlaceholderContext run_context_info = code_action.RunContextInfoProvider(is_concurrent_execution=execute_handlers_concurrently) + if partial_result_token is not None: + tracking_sender = _TrackingPartialResultSender( + token=partial_result_token, + send_func=partial_result_sender.schedule_sending, + ) + else: + tracking_sender = None if action_exec_info.run_context_type is not None: constructor_args = await resolve_func_args_with_di( action_exec_info.run_context_type.__init__, @@ -112,7 +139,8 @@ async def run_action( "run_id": lambda _: run_id, "initial_payload": lambda _: payload, "meta": lambda _: meta, - "info_provider": lambda _: run_context_info + "info_provider": lambda _: run_context_info, + "partial_result_sender": lambda _: tracking_sender or code_action._NOOP_SENDER, }, params_to_ignore=["self"], ) @@ -144,6 +172,7 @@ async def run_action( try: send_partial_results = partial_result_token is not None + logger.trace(f"R{run_id} | send_partial_results={send_partial_results}, partial_result_token={partial_result_token}, payload_type={type(payload).__name__}, is_iterable={isinstance(payload, collections.abc.AsyncIterable)}") with action_exec_info.process_executor.activate(): # action payload can be iterable or not if isinstance(payload, collections.abc.AsyncIterable): @@ -162,6 +191,8 @@ async def run_action( action_cache=action_cache, action_exec_info=action_exec_info, runner_context=runner_context, + partial_result_token=partial_result_token, + tracking_sender=tracking_sender, ) parts = [part async for part in payload] @@ -180,6 +211,12 @@ async def run_action( try: async with asyncio.TaskGroup() as tg: for part in parts: + if part not in run_context.partial_result_scheduler.coroutines_by_key: + logger.warning( + f"R{run_id} | No coroutines scheduled for part {part} " + f"of action '{action_def.name}', skipping" + ) + continue part_coros = ( run_context.partial_result_scheduler.coroutines_by_key[part] ) @@ -192,6 +229,7 @@ async def run_action( partial_result_sender, action_def.name, run_id, + partial_result_queue=partial_result_queue, ) else: coro = run_subresult_coros_sequentially( @@ -201,6 +239,7 @@ async def run_action( partial_result_sender, action_def.name, run_id, + partial_result_queue=partial_result_queue, ) subresult_task = tg.create_task(coro) subresults_tasks.append(subresult_task) @@ -245,6 +284,8 @@ async def run_action( action_cache=action_cache, action_exec_info=action_exec_info, runner_context=runner_context, + partial_result_token=partial_result_token, + tracking_sender=tracking_sender, ) ) handlers_tasks.append(handler_task) @@ -275,6 +316,8 @@ async def run_action( action_cache=action_cache, action_exec_info=action_exec_info, runner_context=runner_context, + partial_result_token=partial_result_token, + tracking_sender=tracking_sender, ) except ActionFailedException as exception: raise exception @@ -313,6 +356,10 @@ async def run_action( f"Unexpected result type: {type(action_result).__name__}" ) + if partial_result_queue is not None and action_result is not None: + await partial_result_queue.put(action_result) + return None + return action_result @@ -417,11 +464,12 @@ def create_action_exec_info(action: domain.ActionDeclaration) -> domain.ActionEx payload_type = action_type_def.PAYLOAD_TYPE run_context_type = action_type_def.RUN_CONTEXT_TYPE + result_type = action_type_def.RESULT_TYPE # TODO: validate that classes and correct subclasses? action_exec_info = domain.ActionExecInfo( - payload_type=payload_type, run_context_type=run_context_type + payload_type=payload_type, run_context_type=run_context_type, result_type=result_type ) return action_exec_info @@ -590,6 +638,8 @@ async def execute_action_handler( action_exec_info: domain.ActionExecInfo, action_cache: domain.ActionCache, runner_context: context.RunnerContext, + partial_result_token: int | str | None = None, + tracking_sender: _TrackingPartialResultSender | None = None, ) -> code_action.RunActionResult: logger.trace(f"R{run_id} | Run {handler.name} on {str(payload)[:100]}...") if handler.name in action_cache.handler_cache_by_name: @@ -645,8 +695,30 @@ def get_run_context(param_type): # there is also `inspect.iscoroutinefunction` but it cannot recognize coroutine # functions which are class methods. Use `isawaitable` on result instead. call_result = handler_run_func(**args) - if inspect.isawaitable(call_result): - execution_result = await call_result + if inspect.isasyncgen(call_result): + execution_result = None + async for partial_result in call_result: + if partial_result_token is not None: + await partial_result_sender.schedule_sending( + partial_result_token, partial_result + ) + if execution_result is None: + result_type_pydantic = pydantic_dataclass(type(partial_result)) + execution_result = result_type_pydantic( + **dataclasses.asdict(partial_result) + ) + else: + execution_result.update(partial_result) + if partial_result_token is not None: + await partial_result_sender.send_all_immediately() + execution_result = None # partials already sent + elif inspect.isawaitable(call_result): + handler_result = await call_result + if tracking_sender is not None and tracking_sender.has_sent: + await partial_result_sender.send_all_immediately() + execution_result = None + else: + execution_result = handler_result else: execution_result = call_result except Exception as exception: @@ -682,6 +754,7 @@ async def run_subresult_coros_concurrently( partial_result_sender: partial_result_sender_module.PartialResultSender, action_name: str, run_id: int, + partial_result_queue: asyncio.Queue | None = None, ) -> code_action.RunActionResult | None: coros_tasks: list[asyncio.Task] = [] try: @@ -723,7 +796,10 @@ async def run_subresult_coros_concurrently( else: action_subresult.update(coro_result) - if send_partial_results: + if partial_result_queue is not None: + await partial_result_queue.put(action_subresult) + return None + elif send_partial_results: await partial_result_sender.schedule_sending( partial_result_token, action_subresult ) @@ -739,6 +815,7 @@ async def run_subresult_coros_sequentially( partial_result_sender: partial_result_sender_module.PartialResultSender, action_name: str, run_id: int, + partial_result_queue: asyncio.Queue | None = None, ) -> code_action.RunActionResult | None: action_subresult: code_action.RunActionResult | None = None for coro in coros: @@ -759,7 +836,10 @@ async def run_subresult_coros_sequentially( else: action_subresult.update(coro_result) - if send_partial_results: + if partial_result_queue is not None: + await partial_result_queue.put(action_subresult) + return None + elif send_partial_results: await partial_result_sender.schedule_sending( partial_result_token, action_subresult ) diff --git a/finecode_extension_runner/src/finecode_extension_runner/cli.py b/finecode_extension_runner/src/finecode_extension_runner/cli.py index 0cbedc58..0d9f13be 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/cli.py +++ b/finecode_extension_runner/src/finecode_extension_runner/cli.py @@ -60,6 +60,7 @@ def start( / ".venvs" / env_name / "logs" + / "runner" / "runner.log") logs.setup_logging(log_level="INFO" if trace is False else "TRACE", log_file_path=log_file_path) diff --git a/finecode_extension_runner/src/finecode_extension_runner/di/bootstrap.py b/finecode_extension_runner/src/finecode_extension_runner/di/bootstrap.py index af41af9f..4316dc89 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/di/bootstrap.py +++ b/finecode_extension_runner/src/finecode_extension_runner/di/bootstrap.py @@ -45,8 +45,7 @@ def bootstrap( ], current_project_raw_config_version_getter: Callable[[], int], cache_dir_path_getter: Callable[[], pathlib.Path], - actions_names_getter: Callable[[], list[str]], - action_by_name_getter: Callable[[str], domain.ActionDeclaration], + actions_getter: Callable[[], dict[str, domain.ActionDeclaration]], current_env_name_getter: Callable[[], str], handler_packages: set[str], service_declarations: list, @@ -67,8 +66,7 @@ def bootstrap( ) action_runner_instance = action_runner.ActionRunner( run_action_func=run_action.run_action, - actions_names_getter=actions_names_getter, - action_by_name_getter=action_by_name_getter, + actions_getter=actions_getter, ) _state.container[ilogger.ILogger] = logger_instance _state.container[icommandrunner.ICommandRunner] = command_runner_instance diff --git a/finecode_extension_runner/src/finecode_extension_runner/domain.py b/finecode_extension_runner/src/finecode_extension_runner/domain.py index 834ede24..3b4848e0 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/domain.py +++ b/finecode_extension_runner/src/finecode_extension_runner/domain.py @@ -54,11 +54,13 @@ def __init__( self, payload_type: type[code_action.RunActionPayload] | None, run_context_type: type[code_action.RunActionContext] | None, + result_type: type[code_action.RunActionResult] | None = None, ) -> None: self.payload_type: type[code_action.RunActionPayload] | None = payload_type self.run_context_type: type[code_action.RunActionContext] | None = ( run_context_type ) + self.result_type: type[code_action.RunActionResult] | None = result_type # instantiation of process executor impl is cheap. To avoid analyzing all # action handlers and checking whether they need process executor, just # instantiate here. It will be started only if handlers need it. diff --git a/finecode_extension_runner/src/finecode_extension_runner/impls/action_runner.py b/finecode_extension_runner/src/finecode_extension_runner/impls/action_runner.py index 4e439618..cb6c9fe3 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/impls/action_runner.py +++ b/finecode_extension_runner/src/finecode_extension_runner/impls/action_runner.py @@ -1,18 +1,28 @@ +import asyncio import collections.abc import typing from finecode_extension_api import code_action from finecode_extension_api.interfaces import iactionrunner -from finecode_extension_runner import domain +from finecode_extension_runner import domain, run_utils + +_SENTINEL = object() class ActionRunner(iactionrunner.IActionRunner): def __init__(self, run_action_func: typing.Callable[[domain.ActionDeclaration, code_action.RunActionPayload, code_action.RunActionMeta], collections.abc.Coroutine[None, None, code_action.RunActionResult]], - actions_names_getter: typing.Callable[[], list[str]], - action_by_name_getter: typing.Callable[[str], domain.ActionDeclaration]): + actions_getter: typing.Callable[[], dict[str, domain.ActionDeclaration]]): self._run_action_func = run_action_func - self._actions_names_getter = actions_names_getter - self._action_by_name_getter = action_by_name_getter + self._actions_getter = actions_getter + self._source_cls_cache: dict[str, type] = {} + + def _get_cls(self, source: str) -> type: + # TODO: reset cache on ER config update? + cls = self._source_cls_cache.get(source) + if cls is None: + cls = run_utils.import_module_member_by_source_str(source) + self._source_cls_cache[source] = cls + return cls @typing.override async def run_action( @@ -23,27 +33,70 @@ async def run_action( except Exception as exception: raise iactionrunner.ActionRunFailed(str(exception)) from exception + @typing.override + async def run_action_iter( + self, + action: iactionrunner.ActionDeclaration[iactionrunner.ActionT], + payload: code_action.RunActionPayload, + meta: code_action.RunActionMeta, + ) -> collections.abc.AsyncIterator[code_action.RunActionResult]: + queue: asyncio.Queue = asyncio.Queue() + + async def producer(): + try: + await self._run_action_func(action, payload, meta, partial_result_queue=queue) + finally: + await queue.put(_SENTINEL) + + task = asyncio.create_task(producer()) + try: + while True: + item = await queue.get() + if item is _SENTINEL: + break + yield item + finally: + if not task.done(): + task.cancel() + try: + await task + except (asyncio.CancelledError, Exception): + pass + @typing.override def get_actions_names(self) -> list[str]: - return self._actions_names_getter() - + return list(self._actions_getter().keys()) + @typing.override - def get_actions_by_source(self, source: str, expected_type: type[iactionrunner.ActionT]) -> list[iactionrunner.ActionDeclaration[iactionrunner.ActionT]]: - return [ - action - for name in self._actions_names_getter() - if (action := self._action_by_name_getter(name)).source == source - ] + def get_action_by_source(self, action_type: type[iactionrunner.ActionT]) -> iactionrunner.ActionDeclaration[iactionrunner.ActionT]: + for action in self._actions_getter().values(): + try: + cls = self._get_cls(action.source) + except Exception: + continue + if cls is action_type: + return action + raise iactionrunner.ActionNotFound(f"Action '{action_type.__name__}' not found") @typing.override - def get_action_by_name(self, name: str, expected_type: type[iactionrunner.ActionT]) -> iactionrunner.ActionDeclaration[iactionrunner.ActionT]: - try: - return self._action_by_name_getter(name) - except KeyError as exception: - raise iactionrunner.ActionNotFound(f"Action '{name}' not found") from exception + def get_action_by_name(self, name: str, action_type: type[iactionrunner.ActionT]) -> iactionrunner.ActionDeclaration[iactionrunner.ActionT]: + actions = self._actions_getter() + if name not in actions: + raise iactionrunner.ActionNotFound(f"Action '{name}' not found") + return actions[name] @typing.override - def get_actions_for_language(self, source: str, language: str, expected_type: type[iactionrunner.ActionT]) -> list[iactionrunner.ActionDeclaration[iactionrunner.ActionT]]: - return [ - action for action in self.get_actions_by_source(source=source, expected_type=expected_type) if action.name.endswith('_' + language) - ] + def get_actions_for_parent( + self, parent_action_type: type[iactionrunner.ActionT] + ) -> dict[str, iactionrunner.ActionDeclaration[iactionrunner.ActionT]]: + result: dict[str, iactionrunner.ActionDeclaration[iactionrunner.ActionT]] = {} + for action in self._actions_getter().values(): + try: + cls = self._get_cls(action.source) + except Exception: + continue + if getattr(cls, "PARENT_ACTION", None) is parent_action_type: + lang = getattr(cls, "LANGUAGE", None) + if lang is not None: + result[lang] = action + return result diff --git a/finecode_extension_runner/src/finecode_extension_runner/impls/file_editor.py b/finecode_extension_runner/src/finecode_extension_runner/impls/file_editor.py index b160695d..80a9b81c 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/impls/file_editor.py +++ b/finecode_extension_runner/src/finecode_extension_runner/impls/file_editor.py @@ -231,8 +231,6 @@ async def change_file( new_file_content = FileEditorSession.apply_change_to_file_content( change=change, file_content=file_content ) - self.logger.info(str(change)) - self.logger.info(f"||{file_content}||{new_file_content}||") self._update_opened_file_info( file_path=file_path, new_file_content=new_file_content ) @@ -368,7 +366,7 @@ def _notify_subscribers_about_file_change( file_change_event = ifileeditor.FileChangeEvent( file_path=file_path, author=self.author, change=change ) - for subscription in self._file_change_subscriptions[file_path].values(): + for subscription in self._file_change_subscriptions.get(file_path, {}).values(): subscription.event_queue.put_nowait(file_change_event) for subscription in self._all_events_subscriptions.values(): diff --git a/finecode_extension_runner/src/finecode_extension_runner/logs.py b/finecode_extension_runner/src/finecode_extension_runner/logs.py index c9d4d301..b9710888 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/logs.py +++ b/finecode_extension_runner/src/finecode_extension_runner/logs.py @@ -43,7 +43,7 @@ def save_logs_to_file( rotation: str = "10 MB", retention: int = 3, stdout: bool = True, -): +) -> Path: if stdout is True: if isinstance(sys.stdout, io.TextIOWrapper): # reconfigure to be able to handle special symbols @@ -54,21 +54,24 @@ def save_logs_to_file( # Find the file with the largest ID in the log directory log_dir_path = file_path.parent max_id = 0 + base_stem = file_path.stem # e.g., "my_logfile" log_files_with_ids: list[tuple[int, Path]] = [] if log_dir_path.exists(): for log_file in log_dir_path.iterdir(): - if log_file.is_file() and log_file.suffix == '.log': - # Extract numeric ID from the end of the filename (before extension) - # first split by dot because loguru adds datetime after dot: - # ..log , we need stem without datetime - stem = log_file.stem.split('.')[0] - parts = stem.split('_') - last_part = parts[-1] - if last_part.isdigit(): - file_id = int(last_part) - max_id = max(max_id, file_id) - log_files_with_ids.append((file_id, log_file)) + if log_file.is_file(): + stem = log_file.stem + # Extract numeric ID from the pattern: base_stem_ + # stem might be something like "my_logfile_1.2025-03-04_12-00-00" + if stem.startswith(base_stem + '_'): + # Get the part after "base_stem_" + id_part = stem[len(base_stem) + 1:] + # Split by '.' to handle datetime added by loguru + potential_id = id_part.split('.')[0] + if potential_id.isdigit(): + file_id = int(potential_id) + max_id = max(max_id, file_id) + log_files_with_ids.append((file_id, log_file)) # Remove the oldest files if there are more than 10 if len(log_files_with_ids) >= 10: @@ -98,7 +101,8 @@ def save_logs_to_file( encoding="utf8", filter=filter_logs, ) - logger.trace(f"Log file: {file_path}") + logger.trace(f"Log file: {file_path_with_id}") + return file_path_with_id def set_log_level_for_group(group: str, level: LogLevel | None): diff --git a/finecode_extension_runner/src/finecode_extension_runner/lsp_server.py b/finecode_extension_runner/src/finecode_extension_runner/lsp_server.py index e0c8c429..50fe70ad 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/lsp_server.py +++ b/finecode_extension_runner/src/finecode_extension_runner/lsp_server.py @@ -25,6 +25,7 @@ from finecode_extension_runner import schemas, services from finecode_extension_runner._services import run_action as run_action_service +from finecode_extension_runner._services import merge_results as merge_results_service from finecode_extension_runner.di import resolver import sys @@ -191,6 +192,38 @@ async def tcp_server(h: str, p: int): except asyncio.CancelledError: logger.debug("Server was cancelled") + async def start_tcp_async(self, host: str, port: int) -> None: + """Starts TCP server from within an existing event loop.""" + logger.info("Starting TCP server on %s:%s", host, port) + + self._stop_event = stop_event = threading.Event() + + async def lsp_connection( + reader: asyncio.StreamReader, writer: asyncio.StreamWriter + ): + logger.debug("Connected to client") + self.protocol.set_writer(writer) # type: ignore + await run_async( + stop_event=stop_event, + reader=reader, + protocol=self.protocol, + logger=logger, + error_handler=self.report_server_error, + ) + logger.debug("Main loop finished") + self.shutdown() + + self._server = await asyncio.start_server(lsp_connection, host, port) + + addrs = ", ".join(str(sock.getsockname()) for sock in self._server.sockets) + logger.info(f"Serving on {addrs}") + + try: + async with self._server: + await self._server.serve_forever() + finally: + await self._finecode_exit_stack.aclose() + def file_editor_file_change_to_lsp_text_edit(file_change: ifileeditor.FileChange) -> types.TextEdit: @@ -258,6 +291,12 @@ def create_lsp_server() -> lsp_server.LanguageServer: register_resolve_package_path_cmd = server.command("packages/resolvePath") register_resolve_package_path_cmd(resolve_package_path) + register_merge_results_cmd = server.command("actions/mergeResults") + register_merge_results_cmd(merge_results_cmd) + + register_get_payload_schemas_cmd = server.command("actions/getPayloadSchemas") + register_get_payload_schemas_cmd(get_payload_schemas_cmd) + def on_process_exit(): logger.info("Exit extension runner") services.shutdown_all_action_handlers() @@ -270,8 +309,8 @@ def send_partial_result( ) -> None: partial_result_dict = dataclasses.asdict(partial_result) partial_result_json = json.dumps(partial_result_dict) - logger.debug( - f"Send partial result for {token}, length {len(partial_result_json)}" + logger.trace( + f"send_partial_result: token={token}, length={len(partial_result_json)}, preview={partial_result_json[:200]}" ) server.progress(types.ProgressParams(token=token, value=partial_result_json)) @@ -509,8 +548,8 @@ async def run_action( result_str = json.dumps(converted_result_by_format, cls=CustomJSONEncoder) return { "status": status, - "result_by_format": result_str, - "return_code": response.return_code, + "resultByFormat": result_str, + "returnCode": response.return_code, } @@ -526,3 +565,18 @@ async def resolve_package_path(ls: lsp_server.LanguageServer, package_name: str) result = services.resolve_package_path(package_name) logger.trace(f"Resolved {package_name} to {result}") return {"packagePath": result} + + +async def get_payload_schemas_cmd(ls: lsp_server.LanguageServer): + logger.trace("Get payload schemas") + return services.get_payload_schemas() + + +async def merge_results_cmd(ls: lsp_server.LanguageServer, action_name: str, results: list): + logger.trace(f"Merge results: action={action_name}, count={len(results)}") + try: + merged = await merge_results_service.merge_results(action_name=action_name, results=results) + return {"merged": merged} + except Exception as exception: + logger.exception(f"Merge results error: {exception}") + return {"error": str(exception)} diff --git a/finecode_extension_runner/src/finecode_extension_runner/partial_result_sender.py b/finecode_extension_runner/src/finecode_extension_runner/partial_result_sender.py index fd9b8c77..487f68e2 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/partial_result_sender.py +++ b/finecode_extension_runner/src/finecode_extension_runner/partial_result_sender.py @@ -1,6 +1,7 @@ import asyncio import collections.abc +from loguru import logger from finecode_extension_api import code_action @@ -19,6 +20,7 @@ def __init__(self, sender: collections.abc.Callable, wait_time_ms: int) -> None: async def schedule_sending( self, token: int | str, value: code_action.RunActionResult ) -> None: + logger.trace(f"PartialResultSender: schedule_sending for token={token}, value_type={type(value).__name__}") if token not in self.results_scheduled_to_send_by_token: self.results_scheduled_to_send_by_token[token] = value else: @@ -28,6 +30,7 @@ async def schedule_sending( self.scheduled_task = asyncio.create_task(self._wait_and_send()) async def send_all_immediately(self) -> None: + logger.trace(f"PartialResultSender: send_all_immediately, pending_tokens={list(self.results_scheduled_to_send_by_token.keys())}") if self.scheduled_task is not None: self.scheduled_task.cancel() self.scheduled_task = None @@ -40,10 +43,14 @@ async def _wait_and_send(self) -> None: self.scheduled_task = None def _send_all(self) -> None: + count = 0 while True: try: token, value = self.results_scheduled_to_send_by_token.popitem() except KeyError: break + count += 1 + logger.trace(f"PartialResultSender: _send_all sending token={token}") self.sender(token, value) + logger.trace(f"PartialResultSender: _send_all done, sent {count} results") diff --git a/finecode_extension_runner/src/finecode_extension_runner/schema_utils.py b/finecode_extension_runner/src/finecode_extension_runner/schema_utils.py new file mode 100644 index 00000000..e133aa12 --- /dev/null +++ b/finecode_extension_runner/src/finecode_extension_runner/schema_utils.py @@ -0,0 +1,154 @@ +"""Utilities for extracting JSON Schema descriptions from RunActionPayload dataclasses. + +Used by the ``actions/getPayloadSchemas`` ER command to report parameter schemas +to the WM so that MCP clients can present real tool parameters. +""" +from __future__ import annotations + +import ast +import dataclasses +import enum +import inspect +import pathlib +import textwrap +import typing + + +def extract_payload_schema(payload_cls: type) -> dict: + """Return a JSON Schema fragment describing the fields of a RunActionPayload subclass. + + The result has two keys: + + - ``properties``: mapping of field name → JSON Schema type object. + - ``required``: list of field names that have no default value (both + ``field.default`` and ``field.default_factory`` are ``dataclasses.MISSING``). + + Type mapping: + + ======================== ===================================================== + Python type JSON Schema + ======================== ===================================================== + ``bool`` ``{"type": "boolean"}`` + ``str`` ``{"type": "string"}`` + ``int`` ``{"type": "integer"}`` + ``float`` ``{"type": "number"}`` + ``pathlib.Path`` ``{"type": "string"}`` + ``enum.Enum`` subclass ``{"type": "string", "enum": []}`` + ``list[T]`` ``{"type": "array", "items": }`` + ``T | None`` same schema as ``T`` (optionality via ``required``) + unknown ``{}`` + ======================== ===================================================== + + Args: + payload_cls: A ``RunActionPayload`` subclass decorated with + ``@dataclasses.dataclass``. + + Returns: + A dict with ``"properties"`` and ``"required"`` keys, suitable for + embedding directly into an MCP ``Tool.inputSchema``. + """ + try: + hints = typing.get_type_hints(payload_cls) + except Exception: + hints = {} + + field_descriptions = _extract_field_descriptions(payload_cls) + properties: dict[str, dict] = {} + required: list[str] = [] + + for field in dataclasses.fields(payload_cls): + prop = _type_to_schema(hints.get(field.name, type(None))) + desc = field_descriptions.get(field.name) + if desc: + prop["description"] = desc + properties[field.name] = prop + + if ( + field.default is dataclasses.MISSING + and field.default_factory is dataclasses.MISSING # type: ignore[misc] + ): + required.append(field.name) + + return {"properties": properties, "required": required} + + +def _extract_field_descriptions(cls: type) -> dict[str, str]: + """Extract attribute docstrings from a dataclass class body via AST. + + An attribute docstring is a bare string literal on the line immediately + after an annotated assignment (``ast.AnnAssign``). This is the pattern + recognised by Sphinx autodoc and used throughout the FineCode action API. + + Returns an empty dict if source inspection fails (e.g. built-ins, .pyc-only + installs) so callers always get a safe result. + """ + try: + source = inspect.getsource(cls) + source = textwrap.dedent(source) + tree = ast.parse(source) + except Exception: + return {} + + class_def = next( + (node for node in ast.walk(tree) if isinstance(node, ast.ClassDef)), + None, + ) + if class_def is None: + return {} + + descriptions: dict[str, str] = {} + body = class_def.body + for i, stmt in enumerate(body): + if not isinstance(stmt, ast.AnnAssign): + continue + if not isinstance(stmt.target, ast.Name): + continue + field_name = stmt.target.id + if i + 1 < len(body): + next_stmt = body[i + 1] + if ( + isinstance(next_stmt, ast.Expr) + and isinstance(next_stmt.value, ast.Constant) + and isinstance(next_stmt.value.value, str) + ): + descriptions[field_name] = next_stmt.value.value.strip() + + return descriptions + + +def _type_to_schema(t: type) -> dict: + """Convert a single Python type annotation to a JSON Schema type object.""" + args = typing.get_args(t) + + # Union / Optional: T | None or typing.Optional[T] + # Both forms produce args that include NoneType. + if args and type(None) in args: + non_none = [a for a in args if a is not type(None)] + if len(non_none) == 1: + return _type_to_schema(non_none[0]) + return {} + + origin = typing.get_origin(t) + + # list[T] + if origin is list: + item_schema = _type_to_schema(args[0]) if args else {} + return {"type": "array", "items": item_schema} + + # Enum subclasses (check before str — StrEnum is also a str subclass) + if isinstance(t, type) and issubclass(t, enum.Enum): + return {"type": "string", "enum": [e.value for e in t]} + + # Primitives — bool before int (bool is a subclass of int) + if t is bool: + return {"type": "boolean"} + if t is int: + return {"type": "integer"} + if t is float: + return {"type": "number"} + if t is str: + return {"type": "string"} + if t is pathlib.Path: + return {"type": "string"} + + return {} diff --git a/finecode_extension_runner/src/finecode_extension_runner/services.py b/finecode_extension_runner/src/finecode_extension_runner/services.py index aa75fa8f..fe1ec842 100644 --- a/finecode_extension_runner/src/finecode_extension_runner/services.py +++ b/finecode_extension_runner/src/finecode_extension_runner/services.py @@ -10,7 +10,7 @@ from loguru import logger from finecode_extension_api import service -from finecode_extension_runner import context, domain, global_state, schemas +from finecode_extension_runner import context, domain, global_state, schemas, run_utils, schema_utils from finecode_extension_runner._services.run_action import ( ActionFailedException, StopWithResponse, @@ -60,6 +60,17 @@ async def update_config( handlers=handlers, source=action_schema_obj.source, ) + if action_schema_obj.source is not None: + duplicate = next( + (a for a in actions.values() if a.source == action.source), + None, + ) + if duplicate is not None: + raise ValueError( + f"Action source '{action.source}' is already registered as " + f"'{duplicate.name}'. Each action class may only be registered " + f"once (ADR-0007)." + ) actions[action_name] = action global_state.runner_context = context.RunnerContext( @@ -93,13 +104,9 @@ def cache_dir_path_getter() -> Path: def current_project_raw_config_version_getter() -> int: return global_state.runner_context.project_config_version - def actions_names_getter() -> list[str]: - assert global_state.runner_context is not None - return list(global_state.runner_context.project.actions.keys()) - - def action_by_name_getter(action_name: str) -> domain.ActionDeclaration: + def actions_getter() -> dict[str, domain.ActionDeclaration]: assert global_state.runner_context is not None - return global_state.runner_context.project.actions[action_name] + return global_state.runner_context.project.actions def current_env_name_getter() -> str: return global_state.env_name @@ -117,8 +124,7 @@ def current_env_name_getter() -> str: project_raw_config_getter=project_raw_config_getter, cache_dir_path_getter=cache_dir_path_getter, current_project_raw_config_version_getter=current_project_raw_config_version_getter, - actions_names_getter=actions_names_getter, - action_by_name_getter=action_by_name_getter, + actions_getter=actions_getter, current_env_name_getter=current_env_name_getter, handler_packages=handler_packages, service_declarations=request.services, @@ -354,3 +360,35 @@ def exit_all_action_handlers() -> None: action_handler_name=handler_name, exec_info=exec_info ) action_cache.handler_cache_by_name = {} + + +def get_payload_schemas() -> dict[str, dict | None]: + """Return a payload schema for every action currently known to the runner. + + Called by the WM via the ``actions/getPayloadSchemas`` command to populate + the schema cache used when building MCP tool descriptions. + + Returns a mapping of action name → JSON Schema fragment (or ``None`` if the + action class could not be imported or has no ``PAYLOAD_TYPE``). + """ + if global_state.runner_context is None: + return {} + + result: dict[str, dict | None] = {} + for action_name, action in global_state.runner_context.project.actions.items(): + try: + action_cls = run_utils.import_module_member_by_source_str(action.source) + payload_cls = getattr(action_cls, "PAYLOAD_TYPE", None) + if payload_cls is None: + result[action_name] = None + else: + schema = schema_utils.extract_payload_schema(payload_cls) + doc = getattr(action_cls, "__doc__", None) + if doc: + schema["description"] = doc.strip() + result[action_name] = schema + except Exception as exception: + logger.debug(f"Could not extract payload schema for action '{action_name}': {exception}") + result[action_name] = None + + return result diff --git a/finecode_httpclient/pyproject.toml b/finecode_httpclient/pyproject.toml index 38776e4b..58425a3e 100644 --- a/finecode_httpclient/pyproject.toml +++ b/finecode_httpclient/pyproject.toml @@ -24,3 +24,6 @@ finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = fine_python_recommended = { path = "../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../presets/fine_python_lint", editable = true } fine_python_format = { path = "../presets/fine_python_format", editable = true } +fine_python_test = { path = "../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../extensions/fine_python_virtualenv", editable = true } diff --git a/finecode_jsonrpc/pyproject.toml b/finecode_jsonrpc/pyproject.toml index c0230dff..71158dd4 100644 --- a/finecode_jsonrpc/pyproject.toml +++ b/finecode_jsonrpc/pyproject.toml @@ -5,7 +5,12 @@ description = "JSON-RPC client implementation for FineCode" authors = [{ name = "Vladyslav Hnatiuk", email = "aders1234@gmail.com" }] readme = "README.md" requires-python = ">=3.11, <= 3.14" -dependencies = ["loguru==0.7.*", "culsans==0.11.*", "apischema==0.19.*", "finecode_extension_api~=0.4.0a0"] +dependencies = [ + "loguru==0.7.*", + "culsans==0.11.*", + "apischema==0.19.*", + "finecode_extension_api~=0.4.0a0", +] [dependency-groups] dev_workspace = ["finecode~=0.4.0a0", "finecode_dev_common_preset~=0.3.0a0"] @@ -23,3 +28,6 @@ finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = fine_python_recommended = { path = "../presets/fine_python_recommended", editable = true } fine_python_lint = { path = "../presets/fine_python_lint", editable = true } fine_python_format = { path = "../presets/fine_python_format", editable = true } +fine_python_test = { path = "../presets/fine_python_test", editable = true } +fine_python_pip = { path = "../extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "../extensions/fine_python_virtualenv", editable = true } diff --git a/finecode_jsonrpc/src/finecode_jsonrpc/client.py b/finecode_jsonrpc/src/finecode_jsonrpc/client.py index 0aef7c86..65b29946 100644 --- a/finecode_jsonrpc/src/finecode_jsonrpc/client.py +++ b/finecode_jsonrpc/src/finecode_jsonrpc/client.py @@ -160,6 +160,7 @@ def __init__( self._stop_event: typing.Final = threading.Event() self._sync_request_futures: dict[str, concurrent.futures.Future] = {} self._async_request_futures: dict[str, asyncio.Future] = {} + self._stderr_buffer: list[str] = [] self._expected_result_type_by_msg_id: dict[str, typing.Any] = {} self.feature_impls: dict[str, collections.abc.Callable] = {} @@ -192,6 +193,7 @@ async def start( full_cmd=server_cmd, io_thread=io_thread, debug_port_future=debug_port_future, + stderr_buffer=self._stderr_buffer, ) if connect: await self.connect_to_server(io_thread=io_thread) @@ -206,6 +208,7 @@ async def _start_server( full_cmd: str, io_thread: _io_thread.AsyncIOThread, debug_port_future: concurrent.futures.Future[int] | None, + stderr_buffer: list[str] | None = None, ) -> None: server_future = io_thread.run_coroutine( start_server( @@ -217,6 +220,7 @@ async def _start_server( server_id=self.readable_id, async_tasks=self._async_tasks_in_io_thread, debug_port_future=debug_port_future, + stderr_buffer=stderr_buffer, ) ) @@ -782,8 +786,12 @@ async def _connect_to_server_io(self, timeout: float | None) -> None: for task in self._async_tasks_in_io_thread: task.cancel() + stderr_hint = "" + if self._stderr_buffer: + recent = "\n".join(self._stderr_buffer[-30:]) + stderr_hint = f"\nRunner stderr output:\n{recent}" raise RunnerFailedToStart( - "Didn't get port in 30 seconds" + f"Didn't get port in 30 seconds{stderr_hint}" ) from exception port = self._tcp_port_future.result() @@ -846,6 +854,7 @@ async def start_server( server_id: str, async_tasks: list[asyncio.Task[typing.Any]], debug_port_future: concurrent.futures.Future[int] | None, + stderr_buffer: list[str] | None = None, ) -> tuple[ asyncio.StreamReader | None, asyncio.StreamWriter | None, asyncio.Future[int] | None ]: @@ -890,7 +899,7 @@ async def start_server( logger.debug(f"{server_id} - process id: {server.pid}") - task = asyncio.create_task(log_stderr(server.stderr, stop_event)) + task = asyncio.create_task(log_stderr(server.stderr, stop_event, stderr_buffer)) task.add_done_callback( functools.partial(task_done_log_callback, task_id=f"log_stderr|{server_id}") ) @@ -983,7 +992,11 @@ async def wait_for_stop_event_and_clean( logger.debug("Cleaned resources of client") -async def log_stderr(stderr: asyncio.StreamReader, stop_event: threading.Event) -> None: +async def log_stderr( + stderr: asyncio.StreamReader, + stop_event: threading.Event, + stderr_buffer: list[str] | None = None, +) -> None: """Read and log stderr output from the subprocess.""" logger.debug("Start reading logs from stderr") try: @@ -991,9 +1004,9 @@ async def log_stderr(stderr: asyncio.StreamReader, stop_event: threading.Event) line = await stderr.readline() if not line: break - logger.debug( - f"Server stderr: {line.decode('utf-8', errors='replace').rstrip()}" - ) + decoded = line.decode("utf-8", errors="replace").rstrip() + if stderr_buffer is not None: + stderr_buffer.append(decoded) except asyncio.CancelledError: pass diff --git a/mkdocs.yml b/mkdocs.yml index 6addf0a3..71ae3412 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -25,6 +25,9 @@ theme: - content.code.copy - content.code.annotate +exclude_docs: | + adr/template.md + plugins: - search - mkdocstrings: @@ -63,16 +66,27 @@ markdown_extensions: permalink: true nav: - - Home: index.md - - Getting Started: getting-started.md + - Getting Started: + - Welcome to FineCode: index.md + - Setup: getting-started.md + - IDE and MCP Setup: getting-started-ide-mcp.md - Concepts: concepts.md - Configuration: configuration.md - CLI Reference: cli.md - - IDE Integration: ide-integration.md - Guides: - Creating an Extension: guides/creating-extension.md - Creating a Preset: guides/creating-preset.md - Multi-Project Workspace: guides/workspace.md - Reference: - Built-in Actions: reference/actions.md + - Services: reference/services.md - Extensions: reference/extensions.md + - LSP and MCP Architecture: reference/lsp-mcp-architecture.md + - LSP Client Protocol: reference/lsp-protocol.md + - Glossary: glossary.md + - Development: + - Overview: development.md + - WM Protocol: wm-protocol.md + - WM-ER Protocol: wm-er-protocol.md + - Developing FineCode: guides/developing-finecode.md + - Architecture Decisions: adr/README.md diff --git a/presets/fine_python_format/.gitignore b/presets/fine_python_format/.gitignore new file mode 100644 index 00000000..cbb2a25b --- /dev/null +++ b/presets/fine_python_format/.gitignore @@ -0,0 +1,5 @@ +.venvs +build/ +*.egg-info/ +__pycache__ +finecode_config_dump/ diff --git a/presets/fine_python_format/fine_python_format/preset.toml b/presets/fine_python_format/fine_python_format/preset.toml index 4a560286..1dd07883 100644 --- a/presets/fine_python_format/fine_python_format/preset.toml +++ b/presets/fine_python_format/fine_python_format/preset.toml @@ -1,6 +1,5 @@ -[tool.finecode.action.format_files_python] -source = "finecode_extension_api.actions.format_files.FormatFilesAction" -languages = ["python"] +[tool.finecode.action.format_python_files] +source = "finecode_extension_api.actions.FormatPythonFilesAction" handlers = [ { name = "ruff", source = "fine_python_ruff.RuffFormatFilesHandler", env = "dev_no_runtime", dependencies = [ "fine_python_ruff~=0.2.0a0", diff --git a/presets/fine_python_lint/.gitignore b/presets/fine_python_lint/.gitignore new file mode 100644 index 00000000..cbb2a25b --- /dev/null +++ b/presets/fine_python_lint/.gitignore @@ -0,0 +1,5 @@ +.venvs +build/ +*.egg-info/ +__pycache__ +finecode_config_dump/ diff --git a/presets/fine_python_lint/fine_python_lint/preset.toml b/presets/fine_python_lint/fine_python_lint/preset.toml index 01fd2d24..e7967698 100644 --- a/presets/fine_python_lint/fine_python_lint/preset.toml +++ b/presets/fine_python_lint/fine_python_lint/preset.toml @@ -1,6 +1,5 @@ -[tool.finecode.action.lint_files_python] -source = "finecode_extension_api.actions.lint_files.LintFilesAction" -languages = ["python"] +[tool.finecode.action.lint_python_files] +source = "finecode_extension_api.actions.LintPythonFilesAction" handlers = [ { name = "ruff", source = "fine_python_ruff.RuffLintFilesHandler", env = "dev_no_runtime", dependencies = [ "fine_python_ruff[jsonrpc]~=0.2.0a0", diff --git a/presets/fine_python_recommended/.gitignore b/presets/fine_python_recommended/.gitignore new file mode 100644 index 00000000..cbb2a25b --- /dev/null +++ b/presets/fine_python_recommended/.gitignore @@ -0,0 +1,5 @@ +.venvs +build/ +*.egg-info/ +__pycache__ +finecode_config_dump/ diff --git a/presets/fine_python_recommended/fine_python_recommended/preset.toml b/presets/fine_python_recommended/fine_python_recommended/preset.toml index f43e74de..2eed31ec 100644 --- a/presets/fine_python_recommended/fine_python_recommended/preset.toml +++ b/presets/fine_python_recommended/fine_python_recommended/preset.toml @@ -1,2 +1,6 @@ [tool.finecode] -presets = [{ source = "fine_python_format" }, { source = "fine_python_lint" }] +presets = [ + { source = "fine_python_format" }, + { source = "fine_python_lint" }, + { source = "fine_python_test" }, +] diff --git a/presets/fine_python_recommended/pyproject.toml b/presets/fine_python_recommended/pyproject.toml index 1dd40a3b..91701180 100644 --- a/presets/fine_python_recommended/pyproject.toml +++ b/presets/fine_python_recommended/pyproject.toml @@ -5,7 +5,11 @@ description = "" authors = [{ name = "Vladyslav Hnatiuk", email = "aders1234@gmail.com" }] readme = "README.md" requires-python = ">=3.11, <= 3.14" -dependencies = ["fine_python_format~=0.4.0a0", "fine_python_lint~=0.5.0a0"] +dependencies = [ + "fine_python_format~=0.4.0a0", + "fine_python_lint~=0.5.0a0", + "fine_python_test~=0.1.0a0", +] [build-system] requires = ["setuptools>=64"] diff --git a/presets/fine_python_test/.gitignore b/presets/fine_python_test/.gitignore new file mode 100644 index 00000000..cbb2a25b --- /dev/null +++ b/presets/fine_python_test/.gitignore @@ -0,0 +1,5 @@ +.venvs +build/ +*.egg-info/ +__pycache__ +finecode_config_dump/ diff --git a/tests/extension_runner/client/finecode/__init__.py b/presets/fine_python_test/fine_python_test/__init__.py similarity index 100% rename from tests/extension_runner/client/finecode/__init__.py rename to presets/fine_python_test/fine_python_test/__init__.py diff --git a/presets/fine_python_test/fine_python_test/preset.toml b/presets/fine_python_test/fine_python_test/preset.toml new file mode 100644 index 00000000..843ca475 --- /dev/null +++ b/presets/fine_python_test/fine_python_test/preset.toml @@ -0,0 +1,15 @@ +[tool.finecode.action.run_tests] +source = "finecode_extension_api.actions.RunTestsAction" +handlers = [ + { name = "pytest", source = "fine_python_pytest.PytestRunTestsHandler", env = "dev", dependencies = [ + "fine_python_pytest~=0.1.0a0", + ] }, +] + +[tool.finecode.action.list_tests] +source = "finecode_extension_api.actions.ListTestsAction" +handlers = [ + { name = "pytest", source = "fine_python_pytest.PytestListTestsHandler", env = "dev", dependencies = [ + "fine_python_pytest~=0.1.0a0", + ] }, +] diff --git a/presets/fine_python_test/pyproject.toml b/presets/fine_python_test/pyproject.toml new file mode 100644 index 00000000..78d060ab --- /dev/null +++ b/presets/fine_python_test/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "fine_python_test" +version = "0.1.0a0" +description = "" +authors = [{ name = "Vladyslav Hnatiuk", email = "aders1234@gmail.com" }] +readme = "README.md" +requires-python = ">=3.11, <= 3.14" +dependencies = ["finecode_extension_api~=0.4.0a0"] + +[build-system] +requires = ["setuptools>=64"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.package-data] +fine_python_test = ["preset.toml"] diff --git a/pyproject.toml b/pyproject.toml index 075ffac2..dcc4d19d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ dependencies = [ "finecode_builtin_handlers~=0.2.0a0", "finecode_jsonrpc~=0.1.0a0", "ordered-set==4.1.*", - "mcp==1.13.*", + "mcp>=1.0.0", "fine_python_virtualenv~=0.2.0a0", "fine_python_pip~=0.2.0a0", "culsans==0.11.*", @@ -48,19 +48,31 @@ build-backend = "setuptools.build_meta" presets = [{ source = "finecode_dev_common_preset" }] [tool.finecode.env.dev_workspace.dependencies] +finecode = { path = "./", editable = true } finecode_dev_common_preset = { path = "./finecode_dev_common_preset", editable = true } finecode_extension_runner = { path = "./finecode_extension_runner", editable = true } finecode_extension_api = { path = "./finecode_extension_api", editable = true } finecode_jsonrpc = { path = "./finecode_jsonrpc", editable = true } +fine_python_recommended = { path = "./presets/fine_python_recommended", editable = true } fine_python_lint = { path = "./presets/fine_python_lint", editable = true } fine_python_format = { path = "./presets/fine_python_format", editable = true } -finecode_builtin_handlers = { path = "../finecode_builtin_handlers", editable = true } +fine_python_test = { path = "./presets/fine_python_test", editable = true } +finecode_builtin_handlers = { path = "./finecode_builtin_handlers", editable = true } +fine_python_pip = { path = "./extensions/fine_python_pip", editable = true } +fine_python_virtualenv = { path = "./extensions/fine_python_virtualenv", editable = true } [tool.finecode.env.runtime.dependencies] finecode_extension_api = { path = "./finecode_extension_api", editable = true } finecode_jsonrpc = { path = "./finecode_jsonrpc", editable = true } finecode_builtin_handlers = { path = "./finecode_builtin_handlers", editable = true } +[tool.finecode.env.docs.dependencies] +finecode_extension_runner = { path = "./finecode_extension_runner", editable = true } +finecode_extension_api = { path = "./finecode_extension_api", editable = true } +finecode_jsonrpc = { path = "./finecode_jsonrpc", editable = true } +finecode_builtin_handlers = { path = "./finecode_builtin_handlers", editable = true } + + [tool.importlinter] root_package = "finecode" include_external_packages = true @@ -100,7 +112,7 @@ finecode = ["base_config.toml"] reportUnusedCallResult = false [tool.finecode.action.get_src_artifact_version] -source = "finecode_extension_api.actions.get_src_artifact_version.GetSrcArtifactVersionAction" +source = "finecode_extension_api.actions.GetSrcArtifactVersionAction" handlers_mode = "replace" handlers = [ { name = 'get_src_artifact_version_setuptools_scm', source = 'fine_python_setuptools_scm.GetSrcArtifactVersionSetuptoolsScmHandler', env = "dev_no_runtime", dependencies = [ diff --git a/src/finecode/base_config.toml b/src/finecode/base_config.toml index 46b151bf..9d7bba8d 100644 --- a/src/finecode/base_config.toml +++ b/src/finecode/base_config.toml @@ -1,73 +1,63 @@ -[tool.finecode.action.prepare_envs] -source = "finecode_extension_api.actions.prepare_envs.PrepareEnvsAction" +[tool.finecode.action.create_env] +source = "finecode_extension_api.actions.CreateEnvAction" -[[tool.finecode.action.prepare_envs.handlers]] -name = "prepare_envs_dump_configs" -source = "finecode_builtin_handlers.PrepareEnvsReadConfigsHandler" +[[tool.finecode.action.create_env.handlers]] +name = "create_env_create_venv" +source = "fine_python_virtualenv.VirtualenvCreateEnvHandler" env = "dev_workspace" -dependencies = ["finecode_builtin_handlers~=0.2.0a0"] +dependencies = ["fine_python_virtualenv~=0.2.0a0"] + +[tool.finecode.action.create_envs] +source = "finecode_extension_api.actions.CreateEnvsAction" -[[tool.finecode.action.prepare_envs.handlers]] -name = "prepare_envs_install_deps" -source = "finecode_builtin_handlers.PrepareEnvsInstallDepsHandler" +[[tool.finecode.action.create_envs.handlers]] +name = "create_envs_discover_envs" +source = "finecode_builtin_handlers.CreateEnvsDiscoverEnvsHandler" env = "dev_workspace" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] +[[tool.finecode.action.create_envs.handlers]] +name = "create_envs_dispatch" +source = "finecode_builtin_handlers.CreateEnvsDispatchHandler" +env = "dev_workspace" +dependencies = ["finecode_builtin_handlers~=0.2.0a0"] -# preparing dev workspaces doesn't need dumping config for two reasons: -# - dependencies in `dev_workspace` are expected to be simple and installable -# without dump -# - dumping is modifiable as action, so it can be correctly done only in -# dev_workspace env of the project and we just create it here, it doesn't -# exist yet -[tool.finecode.action.prepare_dev_workspaces_envs] -source = "finecode_extension_api.actions.prepare_envs.PrepareEnvsAction" -[[tool.finecode.action.prepare_dev_workspaces_envs.handlers]] -name = "prepare_venvs" -source = "fine_python_virtualenv.VirtualenvPrepareEnvHandler" -env = "dev_workspace" -dependencies = ["fine_python_virtualenv~=0.2.0a0"] +[tool.finecode.action.install_env] +source = "finecode_extension_api.actions.InstallEnvAction" -[[tool.finecode.action.prepare_dev_workspaces_envs.handlers]] -name = "prepare_envs_read_configs" -source = "finecode_builtin_handlers.PrepareEnvsReadConfigsHandler" +[[tool.finecode.action.install_env.handlers]] +name = "install_env_read_config" +source = "finecode_builtin_handlers.InstallEnvReadConfigHandler" env = "dev_workspace" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] - -[[tool.finecode.action.prepare_dev_workspaces_envs.handlers]] -name = "prepare_envs_install_deps" -source = "finecode_builtin_handlers.PrepareEnvsInstallDepsHandler" +[[tool.finecode.action.install_env.handlers]] +name = "install_env_install_deps" +source = "finecode_builtin_handlers.InstallEnvInstallDepsHandler" env = "dev_workspace" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] -[tool.finecode.action.prepare_runners] -source = "finecode_extension_api.actions.prepare_runners.PrepareRunnersAction" -[[tool.finecode.action.prepare_runners.handlers]] -name = "prepare_runners_venvs" -source = "fine_python_virtualenv.VirtualenvPrepareRunnersHandler" -env = "dev_workspace" -dependencies = ["fine_python_virtualenv~=0.2.0a0"] +[tool.finecode.action.install_envs] +source = "finecode_extension_api.actions.InstallEnvsAction" -[[tool.finecode.action.prepare_runners.handlers]] -name = "prepare_runners_read_configs" -source = "finecode_builtin_handlers.PrepareRunnersReadConfigsHandler" +[[tool.finecode.action.install_envs.handlers]] +name = "install_envs_discover_envs" +source = "finecode_builtin_handlers.InstallEnvsDiscoverEnvsHandler" env = "dev_workspace" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] - -[[tool.finecode.action.prepare_runners.handlers]] -name = "prepare_runners_install_runner_and_presets" -source = "finecode_builtin_handlers.PrepareRunnersInstallRunnerAndPresetsHandler" +[[tool.finecode.action.install_envs.handlers]] +name = "install_envs_dispatch" +source = "finecode_builtin_handlers.InstallEnvsDispatchHandler" env = "dev_workspace" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] [tool.finecode.action.dump_config] -source = "finecode_extension_api.actions.dump_config.DumpConfigAction" +source = "finecode_extension_api.actions.DumpConfigAction" [[tool.finecode.action.dump_config.handlers]] name = "dump_config" @@ -84,7 +74,7 @@ dependencies = ["finecode_builtin_handlers~=0.2.0a0"] [tool.finecode.action.install_deps_in_env] -source = "finecode_extension_api.actions.install_deps_in_env.InstallDepsInEnvAction" +source = "finecode_extension_api.actions.InstallDepsInEnvAction" [[tool.finecode.action.install_deps_in_env.handlers]] name = "install_deps_with_pip" @@ -94,7 +84,7 @@ dependencies = ["fine_python_pip~=0.2.0a0"] [tool.finecode.action.list_src_artifact_files_by_lang] -source = "finecode_extension_api.actions.list_src_artifact_files_by_lang.ListSrcArtifactFilesByLangAction" +source = "finecode_extension_api.actions.ListSrcArtifactFilesByLangAction" [[tool.finecode.action.list_src_artifact_files_by_lang.handlers]] name = "list_src_artifact_files_by_lang_python" @@ -103,7 +93,7 @@ env = "dev_no_runtime" dependencies = ["fine_python_package_info~=0.2.0a0"] [tool.finecode.action.group_src_artifact_files_by_lang] -source = "finecode_extension_api.actions.group_src_artifact_files_by_lang.GroupSrcArtifactFilesByLangAction" +source = "finecode_extension_api.actions.GroupSrcArtifactFilesByLangAction" [[tool.finecode.action.group_src_artifact_files_by_lang.handlers]] name = "group_src_artifact_files_by_lang_python" @@ -113,7 +103,7 @@ dependencies = ["fine_python_package_info~=0.2.0a0"] [tool.finecode.action.lint] -source = "finecode_extension_api.actions.lint.LintAction" +source = "finecode_extension_api.actions.LintAction" [[tool.finecode.action.lint.handlers]] name = "lint" @@ -121,8 +111,17 @@ source = "finecode_builtin_handlers.LintHandler" env = "dev_no_runtime" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] +[tool.finecode.action.lint_files] +source = "finecode_extension_api.actions.LintFilesAction" + +[[tool.finecode.action.lint_files.handlers]] +name = "lint_files_dispatch" +source = "finecode_builtin_handlers.LintFilesDispatchHandler" +env = "dev_no_runtime" +dependencies = ["finecode_builtin_handlers~=0.2.0a0"] + [tool.finecode.action.format] -source = "finecode_extension_api.actions.format.FormatAction" +source = "finecode_extension_api.actions.FormatAction" [[tool.finecode.action.format.handlers]] name = "format" @@ -130,8 +129,17 @@ source = "finecode_builtin_handlers.FormatHandler" env = "dev_no_runtime" dependencies = ["finecode_builtin_handlers~=0.2.0a0"] +[tool.finecode.action.format_files] +source = "finecode_extension_api.actions.FormatFilesAction" + +[[tool.finecode.action.format_files.handlers]] +name = "format_files_dispatch" +source = "finecode_builtin_handlers.FormatFilesDispatchHandler" +env = "dev_no_runtime" +dependencies = ["finecode_builtin_handlers~=0.2.0a0"] + [tool.finecode.action.clean_finecode_logs] -source = "finecode_extension_api.actions.clean_finecode_logs.CleanFinecodeLogsAction" +source = "finecode_extension_api.actions.CleanFinecodeLogsAction" [[tool.finecode.action.clean_finecode_logs.handlers]] name = "clean_finecode_logs_dev_workspace" diff --git a/src/finecode/cli.py b/src/finecode/cli.py index 5178072c..033913f4 100644 --- a/src/finecode/cli.py +++ b/src/finecode/cli.py @@ -1,3 +1,4 @@ +# docs: docs/cli.md import asyncio import json import os @@ -8,13 +9,19 @@ import click from loguru import logger -import finecode.lsp_server.main as wm_lsp_server -from finecode import communication_utils, logger_utils, user_messages -from finecode.cli_app.commands import dump_config_cmd, prepare_envs_cmd, run_cmd -from finecode.config.config_models import ConfigurationError +from finecode import logger_utils, user_messages +from finecode.wm_server.config.config_models import ConfigurationError FINECODE_CONFIG_ENV_PREFIX = "FINECODE_CONFIG_" +_VALID_DEV_ENVS = {"ide", "cli", "ai", "precommit", "ci"} + + +def detect_dev_env() -> str: + """Detect dev environment from context. CI env var overrides the default 'cli'.""" + if os.environ.get("CI"): + return "ci" + return "cli" # TODO: unify possibilities of CLI options and env vars def parse_handler_config_from_env() -> dict[str, dict[str, dict[str, str]]]: @@ -171,7 +178,7 @@ def cli(): ... @cli.command() -@click.option("--trace", "trace", is_flag=True, default=False) +@click.option("--log-level", "log_level", default="INFO", type=click.Choice(["TRACE", "DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), show_default=True) @click.option("--debug", "debug", is_flag=True, default=False) @click.option( "--socket", "tcp", default=None, type=int, help="start a TCP server" @@ -180,25 +187,27 @@ def cli(): ... @click.option( "--stdio", "stdio", is_flag=True, default=False, help="Use stdio communication" ) -@click.option("--host", "host", default=None, help="Host for TCP and WS server") @click.option( - "--port", "port", default=None, type=int, help="Port for TCP and WS server" + "--tcp", "tcp_auto", is_flag=True, default=False, + help="Start TCP server on a random free port; prints 'port:' to stdout for client discovery" ) -@click.option("--mcp", "mcp", is_flag=True, default=False) +@click.option("--host", "host", default=None, help="Host for TCP and WS server") @click.option( - "--mcp-port", "mcp_port", default=None, type=int, help="Port for MCP server" + "--port", "port", default=None, type=int, help="Port for TCP and WS server" ) -def start_api( - trace: bool, +def start_lsp( + log_level: str, debug: bool, tcp: int | None, ws: bool, stdio: bool, + tcp_auto: bool, host: str | None, port: int | None, - mcp: bool, - mcp_port: int | None, ): + import finecode.lsp_server.main as wm_lsp_server + from finecode.lsp_server import communication_utils + if debug is True: import debugpy @@ -208,7 +217,11 @@ def start_api( except Exception as e: logger.info(e) - if tcp is not None: + if tcp_auto: + comm_type = communication_utils.CommunicationType.TCP + host = "127.0.0.1" + port = None # main.start() will pick a free port and print it + elif tcp is not None: comm_type = communication_utils.CommunicationType.TCP port = tcp host = "127.0.0.1" @@ -217,10 +230,10 @@ def start_api( elif stdio is True: comm_type = communication_utils.CommunicationType.STDIO else: - raise ValueError("Specify either --tcp, --ws or --stdio") + raise ValueError("Specify either --tcp, --socket, --ws or --stdio") asyncio.run( - wm_lsp_server.start(comm_type=comm_type, host=host, port=port, trace=trace) + wm_lsp_server.start(comm_type=comm_type, host=host, port=port, log_level=log_level) ) @@ -246,16 +259,20 @@ def deserialize_action_payload(raw_payload: dict[str, str]) -> dict[str, typing. @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True)) @click.pass_context def run(ctx) -> None: + from finecode.cli_app.commands import run_cmd + args: list[str] = ctx.args actions_to_run: list[str] = [] projects: list[str] | None = None workdir_path: pathlib.Path = pathlib.Path(os.getcwd()) processed_args_count: int = 0 concurrently: bool = False - trace: bool = False + log_level: str = "INFO" no_env_config: bool = False save_results: bool = True map_payload_fields: set[str] = set() + shared_server: bool = False + dev_env: str = detect_dev_env() # finecode run parameters for arg in args: @@ -276,8 +293,8 @@ def run(ctx) -> None: projects.append(project) elif arg == "--concurrently": concurrently = True - elif arg == "--trace": - trace = True + elif arg.startswith("--log-level"): + log_level = arg.removeprefix("--log-level=").upper() elif arg == "--no-env-config": no_env_config = True elif arg == "--no-save-results": @@ -285,11 +302,21 @@ def run(ctx) -> None: elif arg.startswith("--map-payload-fields"): fields = arg.removeprefix("--map-payload-fields=") map_payload_fields = {f.replace("-", "_") for f in fields.split(",")} + elif arg == "--shared-server": + shared_server = True + elif arg.startswith("--dev-env"): + dev_env = arg.removeprefix("--dev-env=") + if dev_env not in _VALID_DEV_ENVS: + click.echo( + f"Invalid --dev-env value '{dev_env}'. Valid values: {', '.join(sorted(_VALID_DEV_ENVS))}", + err=True, + ) + sys.exit(1) elif not arg.startswith("--"): break processed_args_count += 1 - logger_utils.init_logger(trace=trace, stdout=True) + logger_utils.init_logger(log_name="cli", log_level=log_level, stdout=True) # Parse handler config from env vars handler_config_overrides: dict[str, dict[str, dict[str, str]]] = {} @@ -360,6 +387,9 @@ def run(ctx) -> None: handler_config_overrides, save_results, map_payload_fields, + own_server=not shared_server, + log_level=log_level, + dev_env=dev_env, ) ) click.echo(result.output) @@ -385,14 +415,19 @@ def run(ctx) -> None: @cli.command() -@click.option("--trace", "trace", is_flag=True, default=False) +@click.option("--log-level", "log_level", default="INFO", type=click.Choice(["TRACE", "DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), show_default=True) @click.option("--debug", "debug", is_flag=True, default=False) @click.option("--recreate", "recreate", is_flag=True, default=False) -def prepare_envs(trace: bool, debug: bool, recreate: bool) -> None: +@click.option("--shared-server", "shared_server", is_flag=True, default=False) +@click.option("--dev-env", "dev_env", default=None, type=click.Choice(sorted(_VALID_DEV_ENVS)), help="Override detected dev environment") +@click.option("--env", "env_names", multiple=True, metavar="ENV_NAME", help="Limit to specific environment(s). Can be specified multiple times.") +@click.option("--project", "project_names", multiple=True, metavar="PROJECT_NAME", help="Limit to specific project(s). Can be specified multiple times.") +def prepare_envs(log_level: str, debug: bool, recreate: bool, shared_server: bool, dev_env: str | None, env_names: tuple[str, ...], project_names: tuple[str, ...]) -> None: """ `prepare-envs` should be called from workspace/project root directory. """ - # idea: project parameter to allow to run from other directories? + from finecode.cli_app.commands import prepare_envs_cmd + if debug is True: import debugpy @@ -402,17 +437,23 @@ def prepare_envs(trace: bool, debug: bool, recreate: bool) -> None: except Exception as e: logger.info(e) - logger_utils.init_logger(trace=trace, stdout=True) + logger_utils.init_logger(log_name="cli", log_level=log_level, stdout=True) user_messages._notification_sender = show_user_message try: asyncio.run( prepare_envs_cmd.prepare_envs( - workdir_path=pathlib.Path(os.getcwd()), recreate=recreate + workdir_path=pathlib.Path(os.getcwd()), + recreate=recreate, + own_server=not shared_server, + log_level=log_level, + dev_env=dev_env or detect_dev_env(), + env_names=list(env_names) if env_names else None, + project_names=list(project_names) if project_names else None, ) ) except prepare_envs_cmd.PrepareEnvsFailed as exception: - click.echo(exception.args[0], err=True) + click.echo(exception.message, err=True) sys.exit(1) except Exception as exception: logger.exception(exception) @@ -421,10 +462,14 @@ def prepare_envs(trace: bool, debug: bool, recreate: bool) -> None: @cli.command() -@click.option("--trace", "trace", is_flag=True, default=False) +@click.option("--log-level", "log_level", default="INFO", type=click.Choice(["TRACE", "DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), show_default=True) @click.option("--debug", "debug", is_flag=True, default=False) @click.option("--project", "project", type=str) -def dump_config(trace: bool, debug: bool, project: str | None): +@click.option("--shared-server", "shared_server", is_flag=True, default=False) +@click.option("--dev-env", "dev_env", default=None, type=click.Choice(sorted(_VALID_DEV_ENVS)), help="Override detected dev environment") +def dump_config(log_level: str, debug: bool, project: str | None, shared_server: bool, dev_env: str | None): + from finecode.cli_app.commands import dump_config_cmd + if debug is True: import debugpy @@ -438,13 +483,17 @@ def dump_config(trace: bool, debug: bool, project: str | None): click.echo("--project parameter is required", err=True) return - logger_utils.init_logger(trace=trace, stdout=True) + logger_utils.init_logger(log_name="cli", log_level=log_level, stdout=True) user_messages._notification_sender = show_user_message try: asyncio.run( dump_config_cmd.dump_config( - workdir_path=pathlib.Path(os.getcwd()), project_name=project + workdir_path=pathlib.Path(os.getcwd()), + project_name=project, + own_server=not shared_server, + log_level=log_level, + dev_env=dev_env or detect_dev_env(), ) ) except dump_config_cmd.DumpFailed as exception: @@ -452,5 +501,53 @@ def dump_config(trace: bool, debug: bool, project: str | None): sys.exit(1) +@cli.command() +@click.option("--workdir", "workdir", default=None, type=str, help="Workspace root directory") +@click.option("--log-level", "log_level", default="INFO", type=click.Choice(["TRACE", "DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), show_default=True) +@click.option( + "--wm-port-file", + "wm_port_file", + default=None, + type=str, + help="Start a dedicated WM server and write its port to this file. ", +) +def start_mcp(workdir: str | None, log_level: str, wm_port_file: str | None): + """Start the FineCode MCP server (stdio). Connects to a running FineCode WM Server.""" + from finecode import mcp_server + + logger_utils.init_logger(log_name="mcp_server", log_level=log_level, stdout=False) + workdir_path = pathlib.Path(workdir) if workdir else pathlib.Path(os.getcwd()) + port_file_path = pathlib.Path(wm_port_file) if wm_port_file else None + mcp_server.start(workdir_path, port_file=port_file_path) + + +@cli.command() +@click.option("--log-level", "log_level", default="INFO", type=click.Choice(["TRACE", "DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), show_default=True) +@click.option( + "--port-file", + "port_file", + default=None, + type=str, + help="Write the listening port to this file instead of the shared discovery file. " + "Used by dedicated instances started without --shared-server.", +) +@click.option( + "--disconnect-timeout", + "disconnect_timeout", + default=30, + type=int, + show_default=True, + help="Seconds to wait after the last client disconnects before shutting down.", +) +def start_wm_server(log_level: str, port_file: str | None, disconnect_timeout: int): + """Start the FineCode WM Server standalone (TCP JSON-RPC). Auto-stops when all clients disconnect.""" + from finecode.wm_server import wm_server + + log_file_path = logger_utils.init_logger(log_name="wm_server", log_level=log_level, stdout=False) + wm_server._log_file_path = log_file_path + port_file_path = pathlib.Path(port_file) if port_file else None + asyncio.run(wm_server.start_standalone(port_file=port_file_path, disconnect_timeout=disconnect_timeout)) + + if __name__ == "__main__": cli() diff --git a/src/finecode/cli_app/commands/dump_config_cmd.py b/src/finecode/cli_app/commands/dump_config_cmd.py index c14d2182..801ac943 100644 --- a/src/finecode/cli_app/commands/dump_config_cmd.py +++ b/src/finecode/cli_app/commands/dump_config_cmd.py @@ -1,11 +1,8 @@ +# docs: docs/cli.md import pathlib -from loguru import logger - -from finecode import context -from finecode.services import run_service, shutdown_service -from finecode.config import config_models, read_configs -from finecode.runner import runner_manager +from finecode.wm_client import ApiClient, ApiError +from finecode.wm_server import wm_lifecycle class DumpFailed(Exception): @@ -13,79 +10,60 @@ def __init__(self, message: str) -> None: self.message = message -async def dump_config(workdir_path: pathlib.Path, project_name: str): - ws_context = context.WorkspaceContext([workdir_path]) - # it could be optimized by looking for concrete project instead of all - await read_configs.read_projects_in_dir( - dir_path=workdir_path, ws_context=ws_context - ) - - # project is provided. Filter out other projects if there are more, they would - # not be used (run can be started in a workspace with also other projects) - ws_context.ws_projects = { - project_dir_path: project - for project_dir_path, project in ws_context.ws_projects.items() - if project.name == project_name - } - - # read configs without presets, this is required to be able to start runners in - # the next step - for project in ws_context.ws_projects.values(): - try: - await read_configs.read_project_config( - project=project, ws_context=ws_context, resolve_presets=False - ) - except config_models.ConfigurationError as exception: - raise DumpFailed( - f"Reading project configs(without presets) in {project.dir_path} failed: {exception.message}" - ) from exception - - # Some tools like IDE extensions for syntax highlighting rely on - # file name. Keep file name of config the same and save in subdirectory - project_dir_path = list(ws_context.ws_projects.keys())[0] - dump_dir_path = project_dir_path / "finecode_config_dump" - dump_file_path = dump_dir_path / "pyproject.toml" - project_def = ws_context.ws_projects[project_dir_path] - actions_by_projects = {project_dir_path: ["dump_config"]} - - # start runner to init project config +async def dump_config( + workdir_path: pathlib.Path, project_name: str, own_server: bool = True, log_level: str = "INFO", dev_env: str = "cli" +): + port_file = None try: - # reread projects configs, now with resolved presets - # to be able to resolve presets, start runners with presets first - try: - await runner_manager.start_runners_with_presets( - projects=[project_def], ws_context=ws_context - ) - except runner_manager.RunnerFailedToStart as exception: - raise DumpFailed( - f"Starting runners with presets failed: {exception.message}" - ) from exception + if own_server: + port_file = wm_lifecycle.start_own_server(workdir_path, log_level=log_level) + try: + port = await wm_lifecycle.wait_until_ready_from_file(port_file) + except TimeoutError as exc: + raise DumpFailed(str(exc)) from exc + else: + wm_lifecycle.ensure_running(workdir_path) + try: + port = await wm_lifecycle.wait_until_ready() + except TimeoutError as exc: + raise DumpFailed(str(exc)) from exc + client = ApiClient() + await client.connect("127.0.0.1", port) try: - await run_service.start_required_environments( - actions_by_projects, ws_context + result = await client.add_dir(workdir_path) + projects = result.get("projects", []) + project = next( + (p for p in projects if p["name"] == project_name), None ) - except run_service.StartingEnvironmentsFailed as exception: - raise DumpFailed( - f"Failed to start environments for running 'dump_config': {exception.message}" - ) from exception + if project is None: + raise DumpFailed(f"Project '{project_name}' not found") - project_raw_config = ws_context.ws_projects_raw_configs[project_dir_path] + project_path = project["path"] + project_dir_path = pathlib.Path(project_path) + source_file_path = project_dir_path / "pyproject.toml" + target_file_path = project_dir_path / "finecode_config_dump" / "pyproject.toml" - await run_service.run_action( - action_name="dump_config", - params={ - "source_file_path": project_def.def_path, - "project_raw_config": project_raw_config, - "target_file_path": dump_file_path, - }, - project_def=project_def, - ws_context=ws_context, - result_formats=[run_service.RunResultFormat.STRING], - preprocess_payload=False, - run_trigger=run_service.RunActionTrigger.USER, - dev_env=run_service.DevEnv.CLI, - ) - logger.info(f"Dumped config into {dump_file_path}") + try: + project_raw_config = await client.get_project_raw_config(project_path) + await client.run_action( + action="dump_config", + project=project_path, + params={ + "source_file_path": str(source_file_path), + "project_raw_config": project_raw_config, + "target_file_path": str(target_file_path), + }, + options={ + "resultFormats": ["string"], + "trigger": "user", + "devEnv": dev_env, + }, + ) + except ApiError as exc: + raise DumpFailed(str(exc)) from exc + finally: + await client.close() finally: - shutdown_service.on_shutdown(ws_context) + if port_file is not None and port_file.exists(): + port_file.unlink(missing_ok=True) diff --git a/src/finecode/cli_app/commands/prepare_envs_cmd.py b/src/finecode/cli_app/commands/prepare_envs_cmd.py index e8d5fbbe..2033532f 100644 --- a/src/finecode/cli_app/commands/prepare_envs_cmd.py +++ b/src/finecode/cli_app/commands/prepare_envs_cmd.py @@ -1,277 +1,254 @@ +# docs: docs/cli.md +import asyncio import pathlib -import shutil +from finecode.wm_client import ApiClient, ApiError +from finecode.wm_server import wm_lifecycle from loguru import logger -from finecode import context, domain -from finecode.services import run_service, shutdown_service -from finecode.cli_app import utils -from finecode.config import collect_actions, config_models, read_configs -from finecode.runner import runner_manager +class PrepareEnvsFailed(Exception): + def __init__(self, message: str) -> None: + self.message = message -class PrepareEnvsFailed(Exception): ... - - -async def prepare_envs(workdir_path: pathlib.Path, recreate: bool) -> None: - # similar to `run_actions`, but with certain differences: - # - prepare_envs doesn't support presets because `dev_workspace` env most - # probably doesn't exist yet - # - we don't need to check missing actions, because prepare_envs is a builtin action - # and it exists always - ws_context = context.WorkspaceContext([workdir_path]) - await read_configs.read_projects_in_dir( - dir_path=workdir_path, ws_context=ws_context - ) - - # `prepare_envs` can be run only from workspace/project root. Validate this - if workdir_path not in ws_context.ws_projects: - raise PrepareEnvsFailed( - "prepare_env can be run only from workspace/project root" - ) - - invalid_projects = [ - project - for project in ws_context.ws_projects.values() - if project.status == domain.ProjectStatus.CONFIG_INVALID - ] - if len(invalid_projects) > 0: - raise PrepareEnvsFailed( - f"Projects have invalid configuration: {invalid_projects}" - ) - - # prepare envs only in projects with valid configurations and which use finecode - projects = [ - project - for project in ws_context.ws_projects.values() - if project.status == domain.ProjectStatus.CONFIG_VALID - ] - - # Collect actions in relevant projects - for project in projects: - try: - await read_configs.read_project_config( - project=project, ws_context=ws_context, resolve_presets=False - ) - collect_actions.collect_actions( - project_path=project.dir_path, ws_context=ws_context - ) - except config_models.ConfigurationError as exception: - raise PrepareEnvsFailed( - f"Reading project config and collecting actions in {project.dir_path} failed: {exception.message}" - ) from exception +async def prepare_envs( + workdir_path: pathlib.Path, + recreate: bool, + own_server: bool = True, + log_level: str = "INFO", + dev_env: str = "cli", + env_names: list[str] | None = None, + project_names: list[str] | None = None, +) -> None: + """Prepare all virtual environments for a workspace. + + Orchestration steps: + 1. Discover projects (without starting runners — envs may not exist yet). + 2. Check / remove dev_workspace environments as needed. + 3. Run ``create_envs`` + ``install_envs`` to create / update dev_workspace envs. + 4. Start dev_workspace runners (resolves preset actions). + 5. Run ``create_envs`` to create all virtualenvs. + 6. Run ``install_envs`` to install all dependencies. + + When ``env_names`` is given only those named environments are prepared in + step 6 (step 5 still runs for all envs). + When ``project_names`` is given only those projects are prepared in steps 3, 5, and 6. + """ + port_file = None try: - # try to start runner in 'dev_workspace' env of each project. If venv doesn't - # exist or doesn't work, recreate it by running actions in the current env. - if recreate: - remove_dev_workspace_envs(projects=projects, workdir_path=workdir_path) - - await check_or_recreate_all_dev_workspace_envs( - projects=projects, - workdir_path=workdir_path, - recreate=recreate, - ws_context=ws_context, - ) - - # reread projects configs, now with resolved presets - # to be able to resolve presets, start runners with presets first - try: - await runner_manager.start_runners_with_presets( - projects=projects, ws_context=ws_context - ) - except runner_manager.RunnerFailedToStart as exception: - raise PrepareEnvsFailed( - f"Starting runners with presets failed: {exception.message}" - ) from exception - - # now all 'dev_workspace' envs are valid, run 'prepare_runners' in them to create - # venvs and install runners and presets in them - actions_by_projects: dict[pathlib.Path, list[str]] = { - project.dir_path: ["prepare_runners"] for project in projects - } - # action payload can be kept empty because it will be filled in payload preprocessor - action_payload: dict[str, str | bool] = {"recreate": recreate} - - try: - await run_service.start_required_environments( - actions_by_projects, ws_context - ) - except run_service.StartingEnvironmentsFailed as exception: - raise PrepareEnvsFailed( - f"Failed to start environments for running 'prepare_runners': {exception.message}" - ) - - try: - ( - result_output, - result_return_code, - _ - ) = await utils.run_actions_in_projects_and_concat_results( - actions_by_projects, - action_payload, - ws_context, - concurrently=True, - run_trigger=run_service.RunActionTrigger.USER, - dev_env=run_service.DevEnv.CLI, - ) - except run_service.ActionRunFailed as error: - logger.error(error.message) - result_output = error.message - result_return_code = 1 - - if result_return_code != 0: - raise PrepareEnvsFailed(result_output) - - actions_by_projects: dict[pathlib.Path, list[str]] = { - project.dir_path: ["prepare_envs"] for project in projects - } - # action payload can be kept empty because it will be filled in payload preprocessor - action_payload: dict[str, str | bool] = {"recreate": recreate} - + if own_server: + port_file = wm_lifecycle.start_own_server(workdir_path, log_level=log_level) + try: + port = await wm_lifecycle.wait_until_ready_from_file(port_file) + except TimeoutError as exc: + raise PrepareEnvsFailed(str(exc)) from exc + else: + wm_lifecycle.ensure_running(workdir_path) + try: + port = await wm_lifecycle.wait_until_ready() + except TimeoutError as exc: + raise PrepareEnvsFailed(str(exc)) from exc + + client = ApiClient() + await client.connect("127.0.0.1", port) try: - ( - result_output, - result_return_code, - _ - ) = await utils.run_actions_in_projects_and_concat_results( - actions_by_projects, - action_payload, - ws_context, - concurrently=True, - run_trigger=run_service.RunActionTrigger.USER, - dev_env=run_service.DevEnv.CLI, + await _run( + client, workdir_path, recreate, dev_env, env_names, project_names ) - except run_service.ActionRunFailed as error: - logger.error(error.message) - result_output = error.message - result_return_code = 1 - - if result_return_code != 0: - raise PrepareEnvsFailed(result_output) + finally: + await client.close() finally: - shutdown_service.on_shutdown(ws_context) + if port_file is not None and port_file.exists(): + port_file.unlink(missing_ok=True) -def remove_dev_workspace_envs( - projects: list[domain.Project], workdir_path: pathlib.Path -) -> None: - for project in projects: - if project.dir_path == workdir_path: - # skip removing `dev_workspace` env of the current project, because user - # is responsible for keeping it correct - continue +def _check_batch_result(batch_result: dict, error_prefix: str) -> None: + if batch_result.get("returnCode", 0) != 0: + output_parts = [] + for actions_result in batch_result.get("results", {}).values(): + for response in actions_result.values(): + text = (response.get("resultByFormat") or {}).get("string", "") + if text: + output_parts.append(text) + raise PrepareEnvsFailed(error_prefix + ":\n" + "\n".join(output_parts)) - runner_manager.remove_runner_venv( - runner_dir=project.dir_path, env_name="dev_workspace" - ) - -async def check_or_recreate_all_dev_workspace_envs( - projects: list[domain.Project], +async def _run( + client: ApiClient, workdir_path: pathlib.Path, recreate: bool, - ws_context: context.WorkspaceContext, + dev_env: str = "cli", + env_names: list[str] | None = None, + project_names: list[str] | None = None, ) -> None: - # NOTE: this function can start new extensions runner, don't forget to call - # on_shutdown if you use it - projects_dirs_with_valid_envs: list[pathlib.Path] = [] - projects_dirs_with_invalid_envs: list[pathlib.Path] = [] - - for project in projects: - if project.dir_path == workdir_path: - # skip checking `dev_workspace` env of the current project, because user - # is responsible for keeping it correct - continue - - runner_is_valid = await runner_manager.check_runner( - runner_dir=project.dir_path, env_name="dev_workspace" - ) - if runner_is_valid: - projects_dirs_with_valid_envs.append(project.dir_path) - else: - if recreate: - logger.trace( - f"Recreate runner for env 'dev_workspace' in project '{project.name}'" - ) - else: - logger.warning( - f"Runner for env 'dev_workspace' in project '{project.name}' is invalid, recreate it" - ) - projects_dirs_with_invalid_envs.append(project.dir_path) - - # to recreate dev_workspace env, run `prepare_envs` in runner of current project - current_project_dir_path = ws_context.ws_dirs_paths[0] - current_project = ws_context.ws_projects[current_project_dir_path] - try: - await runner_manager._start_dev_workspace_runner(project_def=current_project, ws_context=ws_context) - except runner_manager.RunnerFailedToStart as exception: + # Step 1 — discover projects without starting runners (envs may not exist). + logger.info("Discovering projects...") + result = await client.add_dir(workdir_path, start_runners=False) + projects: list[dict] = result.get("projects", []) + + workdir_str = str(workdir_path) + current_project = next((p for p in projects if p["path"] == workdir_str), None) + if current_project is None: raise PrepareEnvsFailed( - f"Failed to start `dev_workspace` runner in {current_project.name}: {exception.message}" - ) from exception + "prepare-envs can be run only from workspace/project root" + ) - envs = [] + invalid_status_projects = [p for p in projects if p["status"] == "CONFIG_INVALID"] + if invalid_status_projects: + names = [p["name"] for p in invalid_status_projects] + raise PrepareEnvsFailed(f"Projects have invalid configuration: {names}") - # run pip install in dev_workspace even if env exists to make sure that correct - # dependencies are installed - for project_dir_path in projects_dirs_with_valid_envs: - if project_dir_path == workdir_path: - # skip installation of dependencies in `dev_workspace` env of the - # current project, because user is responsible for keeping them - # up-to-date - continue + other_projects = [ + p + for p in projects + if p["path"] != workdir_str and p["status"] == "CONFIG_VALID" + ] - # dependencies in `dev_workspace` should be simple and installable without - # dumping - envs.append( - { - "name": "dev_workspace", - "venv_dir_path": project_dir_path / ".venvs" / "dev_workspace", - "project_def_path": project_dir_path / "pyproject.toml", - } - ) + project_paths: list[str] | None = None + if project_names is not None: + unknown = [ + n for n in project_names if not any(p["name"] == n for p in projects) + ] + if unknown: + raise PrepareEnvsFailed(f"Unknown project(s): {unknown}") + other_projects = [p for p in other_projects if p["name"] in project_names] + # Resolve names to paths for all subsequent API calls (canonical identifier) + project_paths = [p["path"] for p in projects if p["name"] in project_names] - if len(projects_dirs_with_invalid_envs) > 0: - invalid_envs = [] + logger.info(f"Found {len(projects)} project(s): {[p['name'] for p in projects]}") - for project_dir_path in projects_dirs_with_invalid_envs: - # dependencies in `dev_workspace` should be simple and installable without - # dumping - invalid_envs.append( - { - "name": "dev_workspace", - "venv_dir_path": project_dir_path / ".venvs" / "dev_workspace", - "project_def_path": project_dir_path / "pyproject.toml", - } - ) + # Step 2 — check / remove dev_workspace environments (parallelized). + logger.info("Checking dev workspace environments...") - # remove existing invalid envs - for env_info in invalid_envs: - if env_info["venv_dir_path"].exists(): - logger.trace(f"{env_info['venv_dir_path']} was invalid, remove it") - shutil.rmtree(env_info["venv_dir_path"]) + async def _check_or_remove_dw(project: dict) -> None: + if recreate: + logger.trace(f"Recreate env 'dev_workspace' in project '{project['name']}'") + try: + await client.remove_env(project["path"], "dev_workspace") + except ApiError as exc: + raise PrepareEnvsFailed( + f"Failed to remove env for '{project['name']}': {exc}" + ) from exc + else: + try: + valid = await client.check_env(project["path"], "dev_workspace") + except ApiError as exc: + raise PrepareEnvsFailed( + f"Failed to check env for '{project['name']}': {exc}" + ) from exc + if not valid: + logger.warning( + f"Env 'dev_workspace' in project '{project['name']}' is " + f"invalid, recreating it" + ) + try: + await client.remove_env(project["path"], "dev_workspace") + except ApiError as exc: + raise PrepareEnvsFailed( + f"Failed to remove invalid env for '{project['name']}': {exc}" + ) from exc - envs += invalid_envs + try: + async with asyncio.TaskGroup() as tg: + for project in other_projects: + tg.create_task(_check_or_remove_dw(project)) + except* PrepareEnvsFailed as eg: + raise eg.exceptions[0] + + # Step 3 — create / update dev_workspace environments. + logger.info("Creating/updating dev workspace environments...") + dw_envs = [ + { + "name": "dev_workspace", + "venv_dir_path": str(pathlib.Path(p["path"]) / ".venvs" / "dev_workspace"), + "project_def_path": str(pathlib.Path(p["path"]) / "pyproject.toml"), + } + for p in other_projects + ] + dw_options = { + "resultFormats": ["string"], + "trigger": "user", + "devEnv": dev_env, + } + # Step 3a — create the dev_workspace virtualenvs. try: - action_result = await run_service.run_action( - action_name="prepare_dev_workspaces_envs", - params={ - "envs": envs, - }, - project_def=current_project, - ws_context=ws_context, - result_formats=[run_service.RunResultFormat.STRING], - preprocess_payload=False, - run_trigger=run_service.RunActionTrigger.USER, - dev_env=run_service.DevEnv.CLI, + create_dw_result = await client.run_action( + action="create_envs", + project=current_project["path"], + # 'recreate' is handled for dev_workspace envs above, no need to pass here + params={"envs": dw_envs}, + options=dw_options, ) - except run_service.ActionRunFailed as exception: + except ApiError as exc: + raise PrepareEnvsFailed(f"'create_envs' (dev_workspace) failed: {exc}") from exc + if create_dw_result.get("returnCode", 0) != 0: + output = (create_dw_result.get("resultByFormat") or {}).get("string", "") raise PrepareEnvsFailed( - f"'prepare_dev_workspaces_env' failed in {current_project.name}: {exception.message}" - ) from exception + f"'create_envs' (dev_workspace) failed with return code " + f"{create_dw_result['returnCode']}: {output}" + ) - if action_result.return_code != 0: + # Step 3b — install dev_workspace dependencies. + try: + prepare_dw_result = await client.run_action( + action="install_envs", + project=current_project["path"], + params={"envs": dw_envs}, + options=dw_options, + ) + except ApiError as exc: + raise PrepareEnvsFailed( + f"'install_envs' (dev_workspace) failed: {exc}" + ) from exc + if prepare_dw_result.get("returnCode", 0) != 0: + output = (prepare_dw_result.get("resultByFormat") or {}).get("string", "") raise PrepareEnvsFailed( - f"'prepare_dev_workspaces_env' ended in {current_project.name} with return code {action_result.return_code}: {action_result.result_by_format['string']}" + f"'install_envs' (dev_workspace) failed with return code " + f"{prepare_dw_result['returnCode']}: {output}" ) + + # Step 4 — start dev_workspace runners (resolves preset-defined actions). + logger.info("Starting dev_workspace runners...") + try: + await client.start_runners() + except ApiError as exc: + raise PrepareEnvsFailed(f"Starting runners failed: {exc}") from exc + + # Steps 5 & 6 — create envs and install dependencies. + logger.info("Creating envs and installing dependencies...") + # Each step runs across all projects concurrently. + common_options = { + "concurrently": False, + "resultFormats": ["string"], + "trigger": "user", + "devEnv": dev_env, + } + + # Step 5 — create all virtualenvs (no env filter). + try: + create_result = await client.run_batch( + actions=["create_envs"], + projects=project_paths, + options=common_options, + ) + except ApiError as exc: + raise PrepareEnvsFailed(f"'create_envs' failed: {exc}") from exc + _check_batch_result(create_result, "'create_envs' failed") + + # Step 6 — install dependencies (with optional env_names filter). + handler_params = {"env_names": env_names} if env_names is not None else {} + try: + batch_result = await client.run_batch( + actions=["install_envs"], + projects=project_paths, + params=handler_params, + options=common_options, + ) + except ApiError as exc: + raise PrepareEnvsFailed(f"'install_envs' failed: {exc}") from exc + _check_batch_result(batch_result, "'install_envs' failed") + + +__all__ = ["prepare_envs", "PrepareEnvsFailed"] diff --git a/src/finecode/cli_app/commands/run_cmd.py b/src/finecode/cli_app/commands/run_cmd.py index 88ea1acd..acb35599 100644 --- a/src/finecode/cli_app/commands/run_cmd.py +++ b/src/finecode/cli_app/commands/run_cmd.py @@ -1,16 +1,14 @@ +# docs: docs/cli.md import json import pathlib import sys import typing -import ordered_set -from loguru import logger - -from finecode import context, domain -from finecode.services import run_service, shutdown_service -from finecode.config import collect_actions, config_models, read_configs -from finecode.runner import runner_manager +import click +from finecode.wm_client import ApiClient, ApiError +from finecode.wm_server import wm_lifecycle +from finecode.wm_server.runner import runner_client from finecode.cli_app import utils @@ -23,207 +21,151 @@ async def run_actions( workdir_path: pathlib.Path, projects_names: list[str] | None, actions: list[str], - action_payload: dict[str, str], + action_payload: dict[str, typing.Any], concurrently: bool, handler_config_overrides: dict[str, dict[str, dict[str, str]]] | None = None, save_results: bool = True, map_payload_fields: set[str] | None = None, + own_server: bool = False, + log_level: str = "INFO", + dev_env: str = "cli", ) -> utils.RunActionsResult: - ws_context = context.WorkspaceContext([workdir_path]) - if handler_config_overrides: - ws_context.handler_config_overrides = handler_config_overrides - await read_configs.read_projects_in_dir( - dir_path=workdir_path, ws_context=ws_context - ) - - if projects_names is not None: - # projects are provided. Filter out other projects if there are more, they would - # not be used (run can be started in a workspace with also other projects) - ws_context.ws_projects = { - project_dir_path: project - for project_dir_path, project in ws_context.ws_projects.items() - if project.name in projects_names - } - - # make sure all projects use finecode - config_problem_found = False - for project in ws_context.ws_projects.values(): - if project.status != domain.ProjectStatus.CONFIG_VALID: - if project.status == domain.ProjectStatus.NO_FINECODE: - logger.error( - f"You asked to run action in project '{project.name}', but finecode is not used in it(=there is no 'dev_workspace' environment with 'finecode' package in it)" - ) - config_problem_found = True - elif project.status == domain.ProjectStatus.CONFIG_INVALID: - logger.error( - f"You asked to run action in project '{project.name}', but its configuration is invalid(see logs above for more details)" - ) - config_problem_found = True + port_file = None + try: + if own_server: + port_file = wm_lifecycle.start_own_server(workdir_path, log_level=log_level) + try: + port = await wm_lifecycle.wait_until_ready_from_file(port_file) + except TimeoutError as exc: + raise RunFailed(str(exc)) from exc + else: + wm_lifecycle.ensure_running(workdir_path) + try: + port = await wm_lifecycle.wait_until_ready() + except TimeoutError as exc: + raise RunFailed(str(exc)) from exc + + client = ApiClient() + await client.connect("127.0.0.1", port) + try: + if handler_config_overrides: + if own_server: + await client.set_config_overrides(handler_config_overrides) else: - logger.error( - f"You asked to run action in project '{project.name}', but it has unexpected status: {project.status}" + click.echo( + "Warning: --config overrides are ignored in --shared-server mode. ", + err=True, ) - config_problem_found = True - - if config_problem_found: - raise RunFailed( - "There is a problem with configuration. See previous messages for more details" - ) - else: - # filter out packages that don't use finecode - ws_context.ws_projects = { - project_dir_path: project - for project_dir_path, project in ws_context.ws_projects.items() - if project.status != domain.ProjectStatus.NO_FINECODE - } - - # check that configuration of packages that use finecode is valid - config_problem_found = False - for project in ws_context.ws_projects.values(): - if project.status == domain.ProjectStatus.CONFIG_VALID: - continue - elif project.status == domain.ProjectStatus.CONFIG_INVALID: - logger.error( - f"Project '{project.name}' has invalid config, see messages above for more details" + await client.add_dir(workdir_path) + + # Resolve project names (CLI option) to paths (canonical API identifier). + project_paths: list[str] | None = None + if projects_names is not None: + all_projects = await client.list_projects() + unknown = [ + n for n in projects_names + if not any(p["name"] == n for p in all_projects) + ] + if unknown: + raise RunFailed(f"Unknown project(s): {unknown}") + project_paths = [ + p["path"] for p in all_projects if p["name"] in projects_names + ] + + params_by_project: dict[str, dict[str, typing.Any]] = {} + if map_payload_fields: + params_by_project = _resolve_mapped_payload_fields( + map_payload_fields=map_payload_fields, + action_payload=action_payload, ) - config_problem_found = True - else: - logger.error( - f"Project '{project.name}' has unexpected status: {project.status}" + + result_formats = ["string", "json"] if save_results else ["string"] + + try: + batch_result = await client.run_batch( + actions=actions, + projects=project_paths, + params=action_payload, + params_by_project=params_by_project or None, + options={ + "concurrently": concurrently, + "resultFormats": result_formats, + "trigger": "user", + "devEnv": dev_env, + }, ) - config_problem_found = True + except ApiError as exc: + raise RunFailed(str(exc)) from exc - if config_problem_found: - raise RunFailed( - "There is a problem with configuration. See previous messages for more details" - ) + return _build_run_result(batch_result) + finally: + await client.close() + finally: + if port_file is not None and port_file.exists(): + port_file.unlink(missing_ok=True) - projects: list[domain.Project] = [] - if projects_names is not None: - projects = get_projects_by_names(projects_names, ws_context, workdir_path) - else: - projects = list(ws_context.ws_projects.values()) - # first read configs without presets to be able to start runners with presets - for project in projects: - try: - await read_configs.read_project_config( - project=project, ws_context=ws_context, resolve_presets=False - ) - collect_actions.collect_actions( - project_path=project.dir_path, ws_context=ws_context - ) - except config_models.ConfigurationError as exception: - raise RunFailed( - f"Reading project config and collecting actions in {project.dir_path} failed: {exception.message}" - ) from exception +def _build_run_result(batch_result: dict) -> utils.RunActionsResult: + """Convert the actions/runBatch API response to RunActionsResult.""" + raw_results: dict[str, dict] = batch_result.get("results", {}) + overall_return_code: int = batch_result.get("returnCode", 0) - try: - # 1. Start runners with presets to be able to resolve presets. Presets are - # required to be able to collect all actions, actions handlers and configs. - try: - await runner_manager.start_runners_with_presets(projects, ws_context) - except runner_manager.RunnerFailedToStart as exception: - raise RunFailed( - "One or more projects are misconfigured, runners for them didn't" - + f" start: {exception.message}. Check logs for details." - ) from exception - except Exception as exception: - logger.error("Unexpected exception:") - logger.exception(exception) - - actions_by_projects: dict[pathlib.Path, list[str]] = {} - if projects_names is not None: - # check that all projects have all actions to detect problem and provide - # feedback as early as possible - actions_set: ordered_set.OrderedSet[str] = ordered_set.OrderedSet(actions) - for project in projects: - project_actions_set: ordered_set.OrderedSet[str] = ( - ordered_set.OrderedSet([action.name for action in project.actions]) - ) - missing_actions = actions_set - project_actions_set - if len(missing_actions) > 0: - raise RunFailed( - f"Actions {', '.join(missing_actions)} not found in project '{project.name}'" - ) - actions_by_projects[project.dir_path] = actions - else: - # no explicit project, run in `workdir`, it's expected to be a ws dir and - # actions will be run in all projects inside - actions_by_projects = run_service.find_projects_with_actions( - ws_context, actions - ) + result_by_project: dict[pathlib.Path, dict[str, runner_client.RunActionResponse]] = {} + output_parts: list[str] = [] - try: - await run_service.start_required_environments( - actions_by_projects, - ws_context, - update_config_in_running_runners=True, - ) - except run_service.StartingEnvironmentsFailed as exception: - raise RunFailed( - f"Failed to start environments for running actions: {exception.message}" - ) from exception - - payload_overrides_by_project: dict[str, dict[str, typing.Any]] = {} - if map_payload_fields: - payload_overrides_by_project = resolve_mapped_payload_fields( - map_payload_fields=map_payload_fields, - action_payload=action_payload, - ) + run_in_many_projects = len(raw_results) > 1 - try: - return await utils.run_actions_in_projects_and_concat_results( - actions_by_projects, - action_payload, - ws_context, - concurrently, - run_trigger=run_service.RunActionTrigger.USER, - dev_env=run_service.DevEnv.CLI, - output_json=save_results, - payload_overrides_by_project=payload_overrides_by_project, + for project_path_str, actions_results in raw_results.items(): + project_path = pathlib.Path(project_path_str) + project_responses: dict[str, runner_client.RunActionResponse] = {} + + project_output_parts: list[str] = [] + run_many_actions = len(actions_results) > 1 + + for action_name, action_data in actions_results.items(): + result_by_format = action_data.get("resultByFormat", {}) + return_code = action_data.get("returnCode", 0) + + response = runner_client.RunActionResponse( + result_by_format=result_by_format, + return_code=return_code, ) - except run_service.ActionRunFailed as exception: - raise RunFailed( - f"Failed to run actions: {exception.message}" - ) from exception - finally: - shutdown_service.on_shutdown(ws_context) + project_responses[action_name] = response + action_output = "" + if run_many_actions: + action_output += f"{click.style(action_name, bold=True)}:" + action_output += utils.run_result_to_str(response.text(), action_name) + project_output_parts.append(action_output) -def get_projects_by_names( - projects_names: list[str], - ws_context: context.WorkspaceContext, - workdir_path: pathlib.Path, -) -> list[domain.Project]: - projects: list[domain.Project] = [] - for project_name in projects_names: - try: - project = next( - project - for project in ws_context.ws_projects.values() - if project.name == project_name + result_by_project[project_path] = project_responses + + project_block = "".join(project_output_parts) + if run_in_many_projects: + project_block = ( + f"{click.style(project_path_str, bold=True, underline=True)}\n" + + project_block ) - except StopIteration as exception: - raise RunFailed( - f"Project '{projects_names}' not found in working directory '{workdir_path}'" - ) from exception + output_parts.append(project_block) - projects.append(project) - return projects + return utils.RunActionsResult( + output="\n".join(output_parts), + return_code=overall_return_code, + result_by_project=result_by_project, + ) -def resolve_mapped_payload_fields( +def _resolve_mapped_payload_fields( map_payload_fields: set[str], action_payload: dict[str, typing.Any], ) -> dict[str, dict[str, typing.Any]]: - """Resolve mapped payload fields from saved results. + """Resolve mapped payload fields from saved action results. Returns a dict keyed by project path string, where each value is a dict of field overrides for that project. """ results_dir = pathlib.Path(sys.executable).parent.parent / "cache" / "finecode" / "results" - payload_overrides_by_project: dict[str, dict[str, typing.Any]] = {} + params_by_project: dict[str, dict[str, typing.Any]] = {} for field_name in map_payload_fields: raw_value = action_payload.get(field_name) @@ -245,15 +187,16 @@ def resolve_mapped_payload_fields( for key in field_path.split("."): if not isinstance(resolved_value, dict): raise RunFailed( - f"Cannot resolve '{field_path}' in results of '{action_name}' for project '{project_path}'" + f"Cannot resolve '{field_path}' in results of '{action_name}'" + f" for project '{project_path}'" ) resolved_value = resolved_value.get(key) - if project_path not in payload_overrides_by_project: - payload_overrides_by_project[project_path] = {} - payload_overrides_by_project[project_path][field_name] = resolved_value + if project_path not in params_by_project: + params_by_project[project_path] = {} + params_by_project[project_path][field_name] = resolved_value - return payload_overrides_by_project + return params_by_project __all__ = ["run_actions"] diff --git a/src/finecode/cli_app/utils.py b/src/finecode/cli_app/utils.py index ae61e08d..55e01b7e 100644 --- a/src/finecode/cli_app/utils.py +++ b/src/finecode/cli_app/utils.py @@ -3,9 +3,9 @@ import click -from finecode import context -from finecode.runner import runner_client -from finecode.services import run_service +from finecode.wm_server import context +from finecode.wm_server.runner import runner_client +from finecode.wm_server.services import run_service class RunActionsResult(typing.NamedTuple): diff --git a/src/finecode/logger_utils.py b/src/finecode/logger_utils.py index 66fa883c..50f6bf01 100644 --- a/src/finecode/logger_utils.py +++ b/src/finecode/logger_utils.py @@ -1,3 +1,4 @@ +# docs: docs/guides/developing-finecode.md import inspect import logging import sys @@ -8,7 +9,7 @@ from finecode_extension_runner import logs -def init_logger(trace: bool, stdout: bool = False): +def init_logger(log_name: str, log_level: str = "INFO", stdout: bool = False) -> Path: venv_dir_path = Path(sys.executable).parent.parent logs_dir_path = venv_dir_path / "logs" @@ -23,9 +24,9 @@ def init_logger(trace: bool, stdout: bool = False): ] ) logs.set_log_level_for_group(group="finecode_jsonrpc.client", level=logs.LogLevel.INFO) - logs.save_logs_to_file( - file_path=logs_dir_path / "workspace_manager.log", - log_level="TRACE" if trace else "INFO", + log_file_path = logs.save_logs_to_file( + file_path=logs_dir_path / log_name / f"{log_name}.log", + log_level=log_level, stdout=stdout, ) @@ -52,3 +53,5 @@ def emit(self, record: logging.LogRecord) -> None: ) logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True) + + return log_file_path diff --git a/src/finecode/lsp_server/__init__.py b/src/finecode/lsp_server/__init__.py index e69de29b..7494b231 100644 --- a/src/finecode/lsp_server/__init__.py +++ b/src/finecode/lsp_server/__init__.py @@ -0,0 +1,3 @@ +""" +- all LSP handlers must wait on WM server start (e.g. `await global_state.server_initialized.wait()`) +""" \ No newline at end of file diff --git a/src/finecode/communication_utils.py b/src/finecode/lsp_server/communication_utils.py similarity index 100% rename from src/finecode/communication_utils.py rename to src/finecode/lsp_server/communication_utils.py diff --git a/src/finecode/lsp_server/endpoints/action_tree.py b/src/finecode/lsp_server/endpoints/action_tree.py index 0cbde886..406d1692 100644 --- a/src/finecode/lsp_server/endpoints/action_tree.py +++ b/src/finecode/lsp_server/endpoints/action_tree.py @@ -1,16 +1,8 @@ -import asyncio -from pathlib import Path - -import ordered_set -from loguru import logger -from pygls.lsp.server import LanguageServer - -from finecode import context, domain -from finecode import services as wm_services from finecode import user_messages from finecode.lsp_server import global_state, schemas from finecode.lsp_server.services import ActionNotFound, InternalError -from finecode.runner import runner_client +from loguru import logger +from pygls.lsp.server import LanguageServer async def list_actions(ls: LanguageServer, params): @@ -19,366 +11,509 @@ async def list_actions(ls: LanguageServer, params): # params is expected to be a list, but pygls seems to pass the first element of list # if the list contains only one element. Test after migration from pygls - parent_node_id = params # params[0] - request = schemas.ListActionsRequest(parent_node_id=parent_node_id) - result = await _list_actions(request=request) - return result.model_dump(by_alias=True) + parent_node_id = params # params[0] + + if global_state.wm_client is None: + raise Exception() + + response = await global_state.wm_client.get_tree(parent_node_id) + return response async def list_actions_for_position(ls: LanguageServer, params): - logger.info(f"list_actions for position {params}") + logger.info(f"list_actions_for_position {params}") await global_state.server_initialized.wait() - # position = params[0] - # TODO - request = schemas.ListActionsRequest(parent_node_id="") - result = await _list_actions(request=request) - return result.model_dump(by_alias=True) - - -def get_project_action_tree( - project: domain.Project, ws_context: context.WorkspaceContext -) -> list[schemas.ActionTreeNode]: - actions_nodes: list[schemas.ActionTreeNode] = [] - if project.status == domain.ProjectStatus.CONFIG_VALID: - assert project.actions is not None - action_nodes: list[schemas.ActionTreeNode] = [] - for action in project.actions: - node_id = f"{project.dir_path.as_posix()}::{action.name}" - handlers_nodes = [ - schemas.ActionTreeNode( - node_id=f"{project.dir_path.as_posix()}::{action.name}::{handler.name}", - name=handler.name, - node_type=schemas.ActionTreeNode.NodeType.ACTION, - subnodes=[], - status="", - ) - for handler in action.handlers - ] - action_nodes.append( - schemas.ActionTreeNode( - node_id=node_id, - name=action.name, - node_type=schemas.ActionTreeNode.NodeType.ACTION, - subnodes=handlers_nodes, - status="", - ) - ) - ws_context.cached_actions_by_id[node_id] = context.CachedAction( - action_id=node_id, - project_path=project.dir_path, - action_name=action.name, - ) - - node_id = f"{project.dir_path.as_posix()}::actions" - actions_nodes.append( - schemas.ActionTreeNode( - node_id=node_id, - name="Actions", - node_type=schemas.ActionTreeNode.NodeType.ACTION_GROUP, - subnodes=action_nodes, - status="", - ) - ) - - envs_nodes: list[schemas.ActionTreeNode] = [] - for env in project.envs: - node_id = f"{project.dir_path.as_posix()}::envs::{env}" - envs_nodes.append( - schemas.ActionTreeNode( - node_id=node_id, - name=env, - node_type=schemas.ActionTreeNode.NodeType.ENV, - subnodes=[], - status="", - ) - ) - - node_id = f"{project.dir_path.as_posix()}::envs" - actions_nodes.append( - schemas.ActionTreeNode( - node_id=node_id, - name="Environments", - node_type=schemas.ActionTreeNode.NodeType.ENV_GROUP, - subnodes=envs_nodes, - status="", - ) - ) - else: - logger.info( - f"Project has no valid config and finecode: {project.dir_path}, no actions will be shown" - ) + if global_state.wm_client is None: + raise Exception() - return actions_nodes - - -def create_node_list_for_ws( - ws_context: context.WorkspaceContext, -) -> list[schemas.ActionTreeNode]: - nodes: list[schemas.ActionTreeNode] = [] - projects_by_ws_dir: dict[Path, list[Path]] = {} - - all_ws_dirs = list(ws_context.ws_dirs_paths) - all_ws_dirs.sort() - - all_projects_paths = list(ws_context.ws_projects.keys()) - all_projects_paths.sort() - # use sets to assign each project path to a single workspace directory - all_projects_paths_set = ordered_set.OrderedSet(all_projects_paths) - - for ws_dir in all_ws_dirs: - ws_dir_project_paths = [project_path for project_path in all_projects_paths_set if project_path.is_relative_to(ws_dir)] - projects_by_ws_dir[ws_dir] = ws_dir_project_paths - all_projects_paths_set -= ordered_set.OrderedSet(ws_dir_project_paths) - - if len(all_projects_paths_set) > 0: - logger.warning(f"Unexpected setup: these projects {all_projects_paths_set} don't belong to any of workspace dirs: {all_ws_dirs}") - - # build node tree so that: - # - all ws dirs are in tree either as project or directory - # - all projects are shown with subprojects and actions and handlers - for ws_dir in ws_context.ws_dirs_paths: - ws_dir_projects = projects_by_ws_dir[ws_dir] - ws_dir_nodes_by_path: dict[Path, schemas.ActionTreeNode] = {} - - # process ws_dir separately, because only it can be directory - if ws_dir in ws_dir_projects: - dir_node_type = schemas.ActionTreeNode.NodeType.PROJECT - try: - project = ws_context.ws_projects[ws_dir] - except KeyError: - logger.trace(f"Project exists in {ws_dir}, but no config found") - project = None - - if project is not None: - status = project.status.name - else: - status = "" - else: - dir_node_type = schemas.ActionTreeNode.NodeType.DIRECTORY - status = "" - - actions_nodes = get_project_action_tree(project=project, ws_context=ws_context) - node = schemas.ActionTreeNode( - node_id=ws_dir.as_posix(), - name=ws_dir.name, - subnodes=actions_nodes, - node_type=dir_node_type, - status=status, - ) - nodes.append(node) - ws_dir_nodes_by_path[ws_dir] = node - - for project_path in ws_dir_projects: - try: - project = ws_context.ws_projects[project_path] - except KeyError: - logger.trace(f"Project exists in {project_path}, but no config found") - project = None - - status = "" - if project is not None: - status = project.status.name - - actions_nodes = get_project_action_tree( - project=project, ws_context=ws_context - ) - node = schemas.ActionTreeNode( - node_id=project_path.as_posix(), - name=project_path.name, - subnodes=actions_nodes, - node_type=schemas.ActionTreeNode.NodeType.PROJECT, - status=status, - ) - - # check from back(=from the deepest node) to find the nearest parent node - for ws_dir_node_path in list(ws_dir_nodes_by_path.keys())[::-1]: - if project_path.is_relative_to(ws_dir_node_path): - ws_dir_nodes_by_path[ws_dir_node_path].subnodes.append(node) - break - - ws_dir_nodes_by_path[project_path] = node - - return nodes - - -async def __list_actions( - ws_context: context.WorkspaceContext, parent_node_id: str | None = None -) -> list[schemas.ActionTreeNode]: - # currently it always returns full tree - # - # if parent_node_id is None: - # list ws dirs and first level - - # wait for start of all runners, this is required to be able to resolve presets - all_started_coros = [] - for envs in ws_context.ws_projects_extension_runners.values(): - # all presets are expected to be in `dev_workspace` env - dev_workspace_runner = envs["dev_workspace"] - all_started_coros.append(dev_workspace_runner.initialized_event.wait()) - await asyncio.gather(*all_started_coros) - - nodes: list[schemas.ActionTreeNode] = create_node_list_for_ws(ws_context) - return nodes - # else: - # # TODO - # return [] - - -async def _list_actions( - request: schemas.ListActionsRequest, -) -> schemas.ListActionsResponse: - if len(global_state.ws_context.ws_dirs_paths) == 0: - return schemas.ListActionsResponse(nodes=[]) - - return schemas.ListActionsResponse( - nodes=await __list_actions( - global_state.ws_context, - request.parent_node_id if request.parent_node_id != "" else None, - ) - ) + response = await global_state.wm_client.get_tree(None) + return response async def run_action_on_file(ls: LanguageServer, params): logger.info(f"run action on file {params}") await global_state.server_initialized.wait() + if global_state.wm_client is None: + raise Exception() + params_dict = params[0] action_node_id = params_dict["projectPath"] + action_node_id_parts = action_node_id.split("::") + project_path_str = action_node_id_parts[0] + action_name = action_node_id_parts[1] document_meta = await ls.protocol.send_request_async( - method="editor/documentMeta", params={}, msg_id=None + method="editor/documentMeta", params={} ) if document_meta is None: return None - action_node_id_parts = action_node_id.split("::") - action_name = action_node_id_parts[1] - params = {"file_paths": [document_meta.uri.path]} + run_params: dict = {"file_paths": [document_meta.uri], "target": "files"} if action_name == "format": - params["save"] = False + run_params["save"] = False - run_action_request = schemas.RunActionRequest( - action_node_id=action_node_id, - params=params, + response = await global_state.wm_client.run_action( + action=action_name, + project=project_path_str, + params=run_params, + options={"trigger": "user", "devEnv": "ide"}, ) - response = await run_action(run_action_request) - logger.debug(f"Response: {response}") - - return response.model_dump(by_alias=True) + return response async def run_action_on_project(ls: LanguageServer, params): logger.info(f"run action on project {params}") await global_state.server_initialized.wait() - # file_paths_by_projects = project_analyzer.get_files_by_projects( - # projects_dirs_paths=[project_dir_path] - # ) - # file_paths = file_paths_by_projects[project_dir_path] - # params = {"file_paths": file_paths} - # if action_name == "format": - # params["save"] = True + if global_state.wm_client is None: + raise Exception() - return {} - # params_dict = params[0] - # action_node_id = params_dict["projectPath"] - # apply_on = action_node_id.split("::")[0] - # run_action_request = schemas.RunActionRequest( - # action_node_id=action_node_id, apply_on=apply_on, apply_on_text="" - # ) - # response = await services.run_action(run_action_request) - # return response.model_dump(by_alias=True) + params_dict = params[0] + action_node_id = params_dict["projectPath"] + action_node_id_parts = action_node_id.split("::") + project_path_str = action_node_id_parts[0] + action_name = action_node_id_parts[1] + + response = await global_state.wm_client.run_action( + action=action_name, + project=project_path_str, + params={"target": "project"}, + options={"trigger": "user", "devEnv": "ide"}, + ) + return response -async def reload_action(ls: LanguageServer, params): - logger.info(f"reload action {params}") +async def list_projects(ls: LanguageServer): + logger.info("list_projects") await global_state.server_initialized.wait() - params_dict = params[0] - action_node_id = params_dict["projectPath"] - await __reload_action(action_node_id) + if global_state.wm_client is None: + raise Exception() - return {} + return await global_state.wm_client.list_projects() -async def __reload_action(action_node_id: str) -> None: - splitted_action_id = action_node_id.split("::") - project_path = Path(splitted_action_id[0]) - try: - project = global_state.ws_context.ws_projects[project_path] - except KeyError: - raise ActionNotFound() +async def run_batch(ls: LanguageServer, params): + logger.info( + f"run_batch actions={params.get('actions')} options={params.get('options')}" + ) + await global_state.server_initialized.wait() - if project.actions is None: - logger.error("Actions in project are not read yet, but expected") - raise InternalError() + if global_state.wm_client is None: + logger.error("run_batch: wm_client is None") + raise Exception("WM client not available") - action_name = splitted_action_id[1] try: - action = next( - action for action in project.actions if action.name == action_name + result = await global_state.wm_client.run_batch( + actions=params["actions"], + projects=params.get("projects"), + params=params.get("params"), + params_by_project=params.get("paramsByProject"), + options=params.get("options", {"trigger": "user", "devEnv": "ide"}), + ) + logger.info( + f"run_batch done, projects={list(result.get('results', {}).keys())}" ) - except StopIteration as error: - logger.error(f"Unexpected error, project or action not found: {error}") - raise InternalError() + return result + except Exception: + logger.exception("run_batch: WM request failed") + raise - all_handlers_envs = ordered_set.OrderedSet( - [handler.env for handler in action.handlers] - ) - for env in all_handlers_envs: - # parallel to speed up? - try: - runner = global_state.ws_context.ws_projects_extension_runners[ - project_path - ][env] - except KeyError: - continue - - try: - await runner_client.reload_action(runner, action_name) - except runner_client.BaseRunnerRequestException as error: - await user_messages.error( - f"Action {action_name} reload failed: {error.message}" - ) - - -async def run_action( - request: schemas.RunActionRequest, -) -> schemas.RunActionResponse: - # TODO: validate apply_on and apply_on_text - _action_node_id = request.action_node_id - splitted_action_id = _action_node_id.split("::") - project_path = Path(splitted_action_id[0]) - try: - project_def = global_state.ws_context.ws_projects[project_path] - except KeyError: - raise ActionNotFound() - if project_def.actions is None: - logger.error("Actions in project are not read yet, but expected") - raise InternalError() +async def run_action(ls: LanguageServer, params): + logger.info(f"run_action {params}") + await global_state.server_initialized.wait() - action_name = splitted_action_id[1] + if global_state.wm_client is None: + raise Exception() + + return await global_state.wm_client.run_action( + action=params["action"], + project=params["project"], + params=params.get("params"), + options=params.get("options", {"trigger": "user", "devEnv": "ide"}), + ) - try: - response = await wm_services.run_action( - action_name=action_name, - params=request.params, - project_def=project_def, - ws_context=global_state.ws_context, - ) - result = response.result - except wm_services.ActionRunFailed as exception: - logger.error(exception.message) - result = {} - return schemas.RunActionResponse(result=result) +async def reload_action(ls: LanguageServer, params): + logger.info(f"reload action {params}") + await global_state.server_initialized.wait() + if global_state.wm_client is None: + raise Exception() -async def notify_changed_action_node( - ls: LanguageServer, action: schemas.ActionTreeNode -) -> None: - ls.protocol.notify( - method="actionsNodes/changed", params=action.model_dump(by_alias=True) + params_dict = params[0] + action_node_id = params_dict["projectPath"] + + await global_state.wm_client.request( + "actions/reload", {"actionNodeId": action_node_id} ) + return {} + + +# async def list_actions_for_position(ls: LanguageServer, params): +# logger.info(f"list_actions for position {params}") +# await global_state.server_initialized.wait() + +# # position = params[0] +# # TODO +# parent_node_id = "" + +# if global_state.wm_client is not None: +# resp = await global_state.wm_client.get_tree(parent_node_id) +# return resp + +# request = schemas.ListActionsRequest(parent_node_id="") +# result = await _list_actions(request=request) +# return result.model_dump(by_alias=True) + + +# def get_project_action_tree( +# project: domain.Project, ws_context: context.WorkspaceContext +# ) -> list[schemas.ActionTreeNode]: +# actions_nodes: list[schemas.ActionTreeNode] = [] +# if project.status == domain.ProjectStatus.CONFIG_VALID: +# assert project.actions is not None +# action_nodes: list[schemas.ActionTreeNode] = [] +# for action in project.actions: +# node_id = f"{project.dir_path.as_posix()}::{action.name}" +# handlers_nodes = [ +# schemas.ActionTreeNode( +# node_id=f"{project.dir_path.as_posix()}::{action.name}::{handler.name}", +# name=handler.name, +# node_type=schemas.ActionTreeNode.NodeType.ACTION, +# subnodes=[], +# status="", +# ) +# for handler in action.handlers +# ] +# action_nodes.append( +# schemas.ActionTreeNode( +# node_id=node_id, +# name=action.name, +# node_type=schemas.ActionTreeNode.NodeType.ACTION, +# subnodes=handlers_nodes, +# status="", +# ) +# ) +# ws_context.cached_actions_by_id[node_id] = context.CachedAction( +# action_id=node_id, +# project_path=project.dir_path, +# action_name=action.name, +# ) + +# node_id = f"{project.dir_path.as_posix()}::actions" +# actions_nodes.append( +# schemas.ActionTreeNode( +# node_id=node_id, +# name="Actions", +# node_type=schemas.ActionTreeNode.NodeType.ACTION_GROUP, +# subnodes=action_nodes, +# status="", +# ) +# ) + +# envs_nodes: list[schemas.ActionTreeNode] = [] +# for env in project.envs: +# node_id = f"{project.dir_path.as_posix()}::envs::{env}" +# envs_nodes.append( +# schemas.ActionTreeNode( +# node_id=node_id, +# name=env, +# node_type=schemas.ActionTreeNode.NodeType.ENV, +# subnodes=[], +# status="", +# ) +# ) + +# node_id = f"{project.dir_path.as_posix()}::envs" +# actions_nodes.append( +# schemas.ActionTreeNode( +# node_id=node_id, +# name="Environments", +# node_type=schemas.ActionTreeNode.NodeType.ENV_GROUP, +# subnodes=envs_nodes, +# status="", +# ) +# ) +# else: +# logger.info( +# f"Project has no valid config and finecode: {project.dir_path}, no actions will be shown" +# ) + +# return actions_nodes + + +# def create_node_list_for_ws( +# ws_context: context.WorkspaceContext, +# ) -> list[schemas.ActionTreeNode]: +# nodes: list[schemas.ActionTreeNode] = [] +# projects_by_ws_dir: dict[Path, list[Path]] = {} + +# all_ws_dirs = list(ws_context.ws_dirs_paths) +# all_ws_dirs.sort() + +# all_projects_paths = list(ws_context.ws_projects.keys()) +# all_projects_paths.sort() +# # use sets to assign each project path to a single workspace directory +# all_projects_paths_set = ordered_set.OrderedSet(all_projects_paths) + +# for ws_dir in all_ws_dirs: +# ws_dir_project_paths = [project_path for project_path in all_projects_paths_set if project_path.is_relative_to(ws_dir)] +# projects_by_ws_dir[ws_dir] = ws_dir_project_paths +# all_projects_paths_set -= ordered_set.OrderedSet(ws_dir_project_paths) + +# if len(all_projects_paths_set) > 0: +# logger.warning(f"Unexpected setup: these projects {all_projects_paths_set} don't belong to any of workspace dirs: {all_ws_dirs}") + +# # build node tree so that: +# # - all ws dirs are in tree either as project or directory +# # - all projects are shown with subprojects and actions and handlers +# for ws_dir in ws_context.ws_dirs_paths: +# ws_dir_projects = projects_by_ws_dir[ws_dir] +# ws_dir_nodes_by_path: dict[Path, schemas.ActionTreeNode] = {} + +# # process ws_dir separately, because only it can be directory +# if ws_dir in ws_dir_projects: +# dir_node_type = schemas.ActionTreeNode.NodeType.PROJECT +# try: +# project = ws_context.ws_projects[ws_dir] +# except KeyError: +# logger.trace(f"Project exists in {ws_dir}, but no config found") +# project = None + +# if project is not None: +# status = project.status.name +# else: +# status = "" +# else: +# dir_node_type = schemas.ActionTreeNode.NodeType.DIRECTORY +# status = "" + +# actions_nodes = get_project_action_tree(project=project, ws_context=ws_context) +# node = schemas.ActionTreeNode( +# node_id=ws_dir.as_posix(), +# name=ws_dir.name, +# subnodes=actions_nodes, +# node_type=dir_node_type, +# status=status, +# ) +# nodes.append(node) +# ws_dir_nodes_by_path[ws_dir] = node + +# for project_path in ws_dir_projects: +# try: +# project = ws_context.ws_projects[project_path] +# except KeyError: +# logger.trace(f"Project exists in {project_path}, but no config found") +# project = None + +# status = "" +# if project is not None: +# status = project.status.name + +# actions_nodes = get_project_action_tree( +# project=project, ws_context=ws_context +# ) +# node = schemas.ActionTreeNode( +# node_id=project_path.as_posix(), +# name=project_path.name, +# subnodes=actions_nodes, +# node_type=schemas.ActionTreeNode.NodeType.PROJECT, +# status=status, +# ) + +# # check from back(=from the deepest node) to find the nearest parent node +# for ws_dir_node_path in list(ws_dir_nodes_by_path.keys())[::-1]: +# if project_path.is_relative_to(ws_dir_node_path): +# ws_dir_nodes_by_path[ws_dir_node_path].subnodes.append(node) +# break + +# ws_dir_nodes_by_path[project_path] = node + +# return nodes + + +# async def __list_actions( +# ws_context: context.WorkspaceContext, parent_node_id: str | None = None +# ) -> list[schemas.ActionTreeNode]: +# # currently it always returns full tree +# # +# # if parent_node_id is None: +# # list ws dirs and first level + +# # wait for start of all runners, this is required to be able to resolve presets +# # use TaskGroup instead of gather per request +# async with asyncio.TaskGroup() as tg: +# for envs in ws_context.ws_projects_extension_runners.values(): +# # all presets are expected to be in `dev_workspace` env +# dev_workspace_runner = envs.get("dev_workspace") +# if dev_workspace_runner is not None: +# tg.create_task(dev_workspace_runner.initialized_event.wait()) + +# nodes: list[schemas.ActionTreeNode] = create_node_list_for_ws(ws_context) +# return nodes +# # else: +# # # TODO +# # return [] + + +# async def _list_actions( +# request: schemas.ListActionsRequest, +# ) -> schemas.ListActionsResponse: +# if len(global_state.ws_context.ws_dirs_paths) == 0: +# return schemas.ListActionsResponse(nodes=[]) + +# return schemas.ListActionsResponse( +# nodes=await __list_actions( +# global_state.ws_context, +# request.parent_node_id if request.parent_node_id != "" else None, +# ) +# ) + + +# async def run_action_on_file(ls: LanguageServer, params): +# logger.info(f"run action on file {params}") +# await global_state.server_initialized.wait() + +# params_dict = params[0] +# action_node_id = params_dict["projectPath"] + +# document_meta = await ls.protocol.send_request_async( +# method="editor/documentMeta", params={}, msg_id=None +# ) +# if document_meta is None: +# return None + +# action_node_id_parts = action_node_id.split("::") +# action_name = action_node_id_parts[1] +# params = {"file_paths": [document_meta.uri.path]} +# if action_name == "format": +# params["save"] = False + +# run_action_request = schemas.RunActionRequest( +# action_node_id=action_node_id, +# params=params, +# ) +# response = await run_action(run_action_request) +# logger.debug(f"Response: {response}") + +# return response.model_dump(by_alias=True) + + +# async def run_action_on_project(ls: LanguageServer, params): +# logger.info(f"run action on project {params}") +# await global_state.server_initialized.wait() + +# # file_paths_by_projects = project_analyzer.get_files_by_projects( +# # projects_dirs_paths=[project_dir_path] +# # ) +# # file_paths = file_paths_by_projects[project_dir_path] +# # params = {"file_paths": file_paths} +# # if action_name == "format": +# # params["save"] = True + +# return {} +# # params_dict = params[0] +# # action_node_id = params_dict["projectPath"] +# # apply_on = action_node_id.split("::")[0] +# # run_action_request = schemas.RunActionRequest( +# # action_node_id=action_node_id, apply_on=apply_on, apply_on_text="" +# # ) +# # response = await services.run_action(run_action_request) +# # return response.model_dump(by_alias=True) + + +# async def reload_action(ls: LanguageServer, params): +# logger.info(f"reload action {params}") +# await global_state.server_initialized.wait() + +# params_dict = params[0] +# action_node_id = params_dict["projectPath"] +# await __reload_action(action_node_id) + +# return {} + + +# async def __reload_action(action_node_id: str) -> None: +# splitted_action_id = action_node_id.split("::") +# project_path = Path(splitted_action_id[0]) +# try: +# project = global_state.ws_context.ws_projects[project_path] +# except KeyError: +# raise ActionNotFound() + +# if project.actions is None: +# logger.error("Actions in project are not read yet, but expected") +# raise InternalError() + +# action_name = splitted_action_id[1] +# try: +# action = next( +# action for action in project.actions if action.name == action_name +# ) +# except StopIteration as error: +# logger.error(f"Unexpected error, project or action not found: {error}") +# raise InternalError() + +# all_handlers_envs = ordered_set.OrderedSet( +# [handler.env for handler in action.handlers] +# ) +# for env in all_handlers_envs: +# # parallel to speed up? +# try: +# runner = global_state.ws_context.ws_projects_extension_runners[ +# project_path +# ][env] +# except KeyError: +# continue + +# try: +# await runner_client.reload_action(runner, action_name) +# except runner_client.BaseRunnerRequestException as error: +# await user_messages.error( +# f"Action {action_name} reload failed: {error.message}" +# ) + + +# async def run_action( +# request: schemas.RunActionRequest, +# ) -> schemas.RunActionResponse: +# # TODO: validate apply_on and apply_on_text +# _action_node_id = request.action_node_id +# splitted_action_id = _action_node_id.split("::") +# project_path = Path(splitted_action_id[0]) +# try: +# project_def = global_state.ws_context.ws_projects[project_path] +# except KeyError: +# raise ActionNotFound() + +# if project_def.actions is None: +# logger.error("Actions in project are not read yet, but expected") +# raise InternalError() + +# action_name = splitted_action_id[1] + +# try: +# response = await wm_services.run_action( +# action_name=action_name, +# params=request.params, +# project_def=project_def, +# ws_context=global_state.ws_context, +# ) +# result = response.result +# except wm_services.ActionRunFailed as exception: +# logger.error(exception.message) +# result = {} + +# return schemas.RunActionResponse(result=result) + + +# async def notify_changed_action_node( +# ls: LanguageServer, action: schemas.ActionTreeNode +# ) -> None: +# ls.protocol.notify( +# method="actionsNodes/changed", params=action.model_dump(by_alias=True) +# ) diff --git a/src/finecode/lsp_server/endpoints/diagnostics.py b/src/finecode/lsp_server/endpoints/diagnostics.py index 2bd0e9f5..10d40c48 100644 --- a/src/finecode/lsp_server/endpoints/diagnostics.py +++ b/src/finecode/lsp_server/endpoints/diagnostics.py @@ -1,8 +1,6 @@ # TODO: handle all validation errors from __future__ import annotations -import asyncio -from dataclasses import dataclass, field from pathlib import Path from typing import TYPE_CHECKING @@ -10,13 +8,23 @@ from lsprotocol import types from pydantic.dataclasses import dataclass as pydantic_dataclass -from finecode import ( - context, - pygls_types_utils, -) -from finecode.services import run_service -from finecode.lsp_server import global_state -from finecode_extension_api.actions import lint as lint_action +from finecode.lsp_server import global_state, pygls_types_utils +from finecode_extension_api.actions.code_quality import lint_action + + +async def _find_project_dir_for_file(file_path: Path) -> str | None: + """Return the absolute directory path of the project containing *file_path*. + + This helper delegates the lookup to the WM server via + ``workspace/findProjectForFile``; the server applies the same logic that + would otherwise live locally. ``None`` is returned if the file does not + belong to any known project. + """ + # delegate the resolution to the WM server + assert global_state.wm_client is not None, "WM client required for project lookup" + project = await global_state.wm_client.find_project_for_file(str(file_path)) + return project + if TYPE_CHECKING: from pygls.lsp.server import LanguageServer @@ -29,11 +37,11 @@ def map_lint_message_to_diagnostic( return types.Diagnostic( range=types.Range( types.Position( - lint_message.range.start.line - 1, + lint_message.range.start.line, lint_message.range.start.character, ), types.Position( - lint_message.range.end.line - 1, + lint_message.range.end.line, lint_message.range.end.character, ), ), @@ -57,23 +65,32 @@ async def document_diagnostic_with_full_result( file_path: Path, ) -> types.DocumentDiagnosticReport | None: logger.trace(f"Document diagnostic with full result: {file_path}") + + if global_state.wm_client is None: + logger.error("Diagnostics requested but WM client not connected") + return None + + project_dir = await _find_project_dir_for_file(file_path) + if project_dir is None: + logger.error(f"Cannot determine project for diagnostics: {file_path}") + return None + + file_uri = file_path.as_uri() + try: - response = await run_service.find_action_project_and_run( - file_path=file_path, - action_name="lint", + response = await global_state.wm_client.run_action( + action="lint", + project=project_dir, params={ "target": "files", - "file_paths": [file_path], + "file_paths": [file_uri], }, - run_trigger=run_service.RunActionTrigger.SYSTEM, - dev_env=run_service.DevEnv.IDE, - ws_context=global_state.ws_context, - initialize_all_handlers=True, + options={"trigger": "system", "devEnv": "ide"}, ) - except run_service.ActionRunFailed as error: + except Exception as error: # catching any runtime error from client # don't throw error because vscode after a few sequential errors will stop # requesting diagnostics until restart. Show user message instead - logger.error(str(error)) # TODO: user message + logger.error(f"Diagnostics API request failed: {error}") return None if response is None: @@ -82,11 +99,14 @@ async def document_diagnostic_with_full_result( # use pydantic dataclass to convert dict to dataclass instance recursively # (default dataclass constructor doesn't handle nested items, it stores them just # as dict) + json_result = (response.get("resultByFormat") or {}).get("json") + if json_result is None: + return None result_type = pydantic_dataclass(lint_action.LintRunResult) - lint_result: lint_action.LintRunResult = result_type(**response.json()) + lint_result: lint_action.LintRunResult = result_type(**json_result) try: - requested_file_messages = lint_result.messages.pop(str(file_path)) + requested_file_messages = lint_result.messages.pop(file_uri) except KeyError: requested_file_messages = [] requested_files_diagnostic_items = [ @@ -98,17 +118,15 @@ async def document_diagnostic_with_full_result( ) related_files_diagnostics: dict[str, types.FullDocumentDiagnosticReport] = {} - for file_path_str, file_lint_messages in lint_result.messages.items(): + for related_file_uri, file_lint_messages in lint_result.messages.items(): file_report = types.FullDocumentDiagnosticReport( items=[ map_lint_message_to_diagnostic(lint_message) for lint_message in file_lint_messages ] ) - file_path = Path(file_path_str) - related_files_diagnostics[pygls_types_utils.path_to_uri_str(file_path)] = ( - file_report - ) + # ResourceUri is already a file:// URI string — use directly + related_files_diagnostics[related_file_uri] = file_report response.related_documents = related_files_diagnostics logger.trace(f"Document diagnostic with full result for {file_path} finished") @@ -119,85 +137,32 @@ async def document_diagnostic_with_partial_results( file_path: Path, partial_result_token: int | str ) -> None: logger.trace(f"Document diagnostic with partial results: {file_path}") - assert global_state.progress_reporter is not None, ( - "LSP Server in Workspace Manager was incorrectly initialized:" - " progress reporter not registered" - ) + + if global_state.wm_client is None: + logger.error("Diagnostics requested but WM client not connected") + return None + + project_dir = await _find_project_dir_for_file(file_path) + if project_dir is None: + logger.error(f"Cannot determine project for diagnostics: {file_path}") + return None + + # Store the expected response type for this token + global_state.partial_result_tokens[partial_result_token] = ("lint", "document_diagnostic") try: - async with run_service.find_action_project_and_run_with_partial_results( - file_path=file_path, - action_name="lint", - params={ - "file_paths": [file_path], + await global_state.wm_client.request( + "actions/runWithPartialResults", + { + "action": "lint", + "project": project_dir, + "params": {"file_paths": [file_path.as_uri()]}, + "partialResultToken": partial_result_token, + "options": {"resultFormats": ["json"], "trigger": "system", "devEnv": "ide"}, }, - partial_result_token=partial_result_token, - run_trigger=run_service.RunActionTrigger.SYSTEM, - dev_env=run_service.DevEnv.IDE, - ws_context=global_state.ws_context, - initialize_all_handlers=True, - ) as response: - # LSP defines that the first response should be `DocumentDiagnosticReport` - # with diagnostics information for requested file and then n responses - # with diagnostics for related documents using - # `DocumentDiagnosticReportPartialResult`. - # - # We get responses for all files in random order, first wait for response - # for requested file, send it and only then all other. - related_documents: dict[str, types.FullDocumentDiagnosticReport] = {} - got_response_for_requested_file: bool = False - requested_file_path_str = str(file_path) - # use pydantic dataclass to convert dict to dataclass instance recursively - # (default dataclass constructor doesn't handle nested items, it stores them just - # as dict) - result_type = pydantic_dataclass(lint_action.LintRunResult) - async for partial_response in response: - lint_subresult: lint_action.LintRunResult = result_type( - **partial_response - ) - for file_path_str, lint_messages in lint_subresult.messages.items(): - if requested_file_path_str == file_path_str: - if got_response_for_requested_file: - raise Exception( - "Unexpected behavior: got response for requested file twice" - ) - document_items = [ - map_lint_message_to_diagnostic(lint_message) - for lint_message in lint_messages - ] - document_report = types.RelatedFullDocumentDiagnosticReport( - items=document_items, related_documents=related_documents - ) - global_state.progress_reporter( - partial_result_token, document_report - ) - got_response_for_requested_file = True - else: - document_uri = pygls_types_utils.path_to_uri_str( - Path(file_path_str) - ) - document_items = [ - map_lint_message_to_diagnostic(lint_message) - for lint_message in lint_messages - ] - related_documents[document_uri] = ( - types.FullDocumentDiagnosticReport(items=document_items) - ) - - if got_response_for_requested_file and len(related_documents) > 0: - related_doc_diagnostics = ( - types.DocumentDiagnosticReportPartialResult( - related_documents=related_documents - ) - ) - global_state.progress_reporter( - partial_result_token, related_doc_diagnostics - ) - except run_service.ActionRunFailed as error: - # don't throw error because vscode after a few sequential errors will stop - # requesting diagnostics until restart. Show user message instead - logger.error(str(error)) # TODO: user message - + ) + except Exception as error: + logger.error(f"Diagnostics API request failed: {error}") return None @@ -235,123 +200,91 @@ async def document_diagnostic( return None -@dataclass -class LintActionExecInfo: - project_dir_path: Path - action_name: str - request_data: dict[str, str | list[str]] = field(default_factory=dict) - - async def run_workspace_diagnostic_with_partial_results( - exec_info: LintActionExecInfo, partial_result_token: str | int + partial_result_token: str | int ): - assert global_state.progress_reporter is not None + """Run lint with partial results on all projects. + + The WM server automatically runs the action in all relevant projects when + the 'project' field is empty. + """ + assert global_state.wm_client is not None, "WM client must be connected" + + # Store the expected response type for this token + global_state.partial_result_tokens[partial_result_token] = ("lint", "workspace_diagnostic") try: - async with run_service.run_with_partial_results( - action_name="lint", - params=exec_info.request_data, - partial_result_token=partial_result_token, - project_dir_path=exec_info.project_dir_path, - run_trigger=run_service.RunActionTrigger.SYSTEM, - dev_env=run_service.DevEnv.IDE, - ws_context=global_state.ws_context, - initialize_all_handlers=True, - ) as response: - # use pydantic dataclass to convert dict to dataclass instance recursively - # (default dataclass constructor doesn't handle nested items, it stores them just - # as dict) - result_type = pydantic_dataclass(lint_action.LintRunResult) - async for partial_response in response: - lint_subresult: lint_action.LintRunResult = result_type( - **partial_response - ) - lsp_subresult = types.WorkspaceDiagnosticReportPartialResult( - items=[ - types.WorkspaceFullDocumentDiagnosticReport( - uri=pygls_types_utils.path_to_uri_str(Path(file_path_str)), - items=[ - map_lint_message_to_diagnostic(lint_message) - for lint_message in lint_messages - ], - ) - for ( - file_path_str, - lint_messages, - ) in lint_subresult.messages.items() - ] - ) - global_state.progress_reporter(partial_result_token, lsp_subresult) - except run_service.ActionRunFailed as error: - # don't throw error because vscode after a few sequential errors will stop - # requesting diagnostics until restart. Show user message instead - logger.error(str(error)) # TODO: user message + # send request to WM server; notifications will trigger progress reporter + await global_state.wm_client.request( + "actions/runWithPartialResults", + { + "action": "lint", + "project": "", # empty project = all relevant projects + "params": {"target": "project"}, + "partialResultToken": partial_result_token, + "options": {"resultFormats": ["json"], "trigger": "system", "devEnv": "ide"}, + }, + ) + except Exception as error: + logger.error(f"Workspace diagnostics API request failed: {error}") async def workspace_diagnostic_with_partial_results( - exec_infos: list[LintActionExecInfo], partial_result_token: str | int + partial_result_token: str | int ) -> types.WorkspaceDiagnosticReport: - try: - async with asyncio.TaskGroup() as tg: - for exec_info in exec_infos: - tg.create_task( - run_workspace_diagnostic_with_partial_results( - exec_info=exec_info, partial_result_token=partial_result_token - ) - ) - except ExceptionGroup as eg: - logger.error(f"Error in workspace diagnostic: {eg.exceptions}") + """Request workspace diagnostics with partial results. + Returns an empty report; the actual results arrive via notifications. + """ + await run_workspace_diagnostic_with_partial_results( + partial_result_token=partial_result_token + ) # lsprotocol allows None as return value, but then vscode throws error # 'cannot read items of null'. keep empty report instead return types.WorkspaceDiagnosticReport(items=[]) -async def workspace_diagnostic_with_full_result( - exec_infos: list[LintActionExecInfo], ws_context: context.WorkspaceContext -): - send_tasks: list[asyncio.Task] = [] +async def workspace_diagnostic_with_full_result() -> types.WorkspaceDiagnosticReport: + """Run lint action on all projects via API and aggregate results. + + The WM server automatically runs in all relevant projects when 'project' + field is empty. + """ + assert global_state.wm_client is not None, "WM client must be connected" + try: - async with asyncio.TaskGroup() as tg: - for exec_info in exec_infos: - project = ws_context.ws_projects[exec_info.project_dir_path] - task = tg.create_task( - run_service.run_action( - action_name=exec_info.action_name, - params=exec_info.request_data, - project_def=project, - ws_context=ws_context, - run_trigger=run_service.RunActionTrigger.SYSTEM, - dev_env=run_service.DevEnv.IDE, - preprocess_payload=False, - initialize_all_handlers=True, - ) - ) - send_tasks.append(task) - except ExceptionGroup as eg: - logger.error(f"Error in workspace diagnostic: {eg.exceptions}") - - responses = [task.result().result for task in send_tasks] + response = await global_state.wm_client.run_action( + action="lint", + project="", # empty project = all relevant projects + params={"target": "project"}, + options={"trigger": "system", "devEnv": "ide"}, + ) + except Exception as error: + logger.error(f"Error in workspace diagnostic: {error}") + return types.WorkspaceDiagnosticReport(items=[]) + + if not response: + return types.WorkspaceDiagnosticReport(items=[]) # use pydantic dataclass to convert dict to dataclass instance recursively # (default dataclass constructor doesn't handle nested items, it stores them just # as dict) + json_result = (response.get("resultByFormat") or {}).get("json") + if not json_result: + return types.WorkspaceDiagnosticReport(items=[]) result_type = pydantic_dataclass(lint_action.LintRunResult) + lint_result: lint_action.LintRunResult = result_type(**json_result) + items: list[types.WorkspaceDocumentDiagnosticReport] = [] - for response in responses: - if response is None: - continue - else: - lint_result: lint_action.LintRunResult = result_type(**response) - for file_path_str, lint_messages in lint_result.messages.items(): - new_report = types.WorkspaceFullDocumentDiagnosticReport( - uri=pygls_types_utils.path_to_uri_str(Path(file_path_str)), - items=[ - map_lint_message_to_diagnostic(lint_message) - for lint_message in lint_messages - ], - ) - items.append(new_report) + for file_uri, lint_messages in lint_result.messages.items(): + new_report = types.WorkspaceFullDocumentDiagnosticReport( + uri=file_uri, # ResourceUri is already a file:// URI string + items=[ + map_lint_message_to_diagnostic(lint_message) + for lint_message in lint_messages + ], + ) + items.append(new_report) # lsprotocol allows None as return value, but then vscode throws error # 'cannot read items of null'. keep empty report instead @@ -361,47 +294,22 @@ async def workspace_diagnostic_with_full_result( async def _workspace_diagnostic( params: types.WorkspaceDiagnosticParams, ) -> types.WorkspaceDiagnosticReport | None: - relevant_projects_paths: list[Path] = run_service.find_all_projects_with_action( - # check lint_files, because 'lint' is builtin and exists in all projects by default - action_name="lint_files_python", - ws_context=global_state.ws_context, # TODO: correct check of name - ) - exec_info_by_project_dir_path: dict[Path, LintActionExecInfo] = {} - actions_by_projects: dict[Path, list[str]] = {} - - for project_dir_path in relevant_projects_paths: - exec_info_by_project_dir_path[project_dir_path] = LintActionExecInfo( - project_dir_path=project_dir_path, - action_name="lint", - request_data={"target": "project", "trigger": "system", "dev_env": "ide"}, - ) - actions_by_projects[project_dir_path] = ["lint"] + """Run workspace diagnostics for all projects via the WM server. - exec_infos = list(exec_info_by_project_dir_path.values()) - run_with_partial_results: bool = params.partial_result_token is not None + The WM server automatically selects relevant projects when the 'project' + field is empty. + """ + assert global_state.wm_client is not None, "WM client must be connected" - # linting is resource-intensive task. First start all runners and only then begin - # linting to avoid the case, when some of runners start first, take all available - # resources and other stay blocked. Starting of environment has timeout and the - # letter fail with timeout error. - try: - await run_service.start_required_environments( - actions_by_projects, global_state.ws_context, - initialize_all_handlers=True - ) - except run_service.StartingEnvironmentsFailed as exception: - logger.error( - f"Failed to start required environments for running workspace diagnostic: {exception.message}" + if params.partial_result_token is not None: + # fire off partial‑result request and return an empty placeholder; the + # progress reporter will handle streaming through notifications. + await workspace_diagnostic_with_partial_results( + partial_result_token=params.partial_result_token, ) + return types.WorkspaceDiagnosticReport(items=[]) - if run_with_partial_results: - return await workspace_diagnostic_with_partial_results( - exec_infos=exec_infos, partial_result_token=params.partial_result_token - ) - else: - return await workspace_diagnostic_with_full_result( - exec_infos=exec_infos, ws_context=global_state.ws_context - ) + return await workspace_diagnostic_with_full_result() async def workspace_diagnostic( diff --git a/src/finecode/lsp_server/endpoints/document_sync.py b/src/finecode/lsp_server/endpoints/document_sync.py index cec258e2..ae880885 100644 --- a/src/finecode/lsp_server/endpoints/document_sync.py +++ b/src/finecode/lsp_server/endpoints/document_sync.py @@ -1,162 +1,86 @@ -import asyncio -from pathlib import Path - from loguru import logger from lsprotocol import types from pygls.lsp.server import LanguageServer -from finecode import domain from finecode.lsp_server import global_state -from finecode.runner import runner_client async def document_did_open( ls: LanguageServer, params: types.DidOpenTextDocumentParams ): logger.trace(f"Document did open: {params.text_document.uri}") - global_state.ws_context.opened_documents[params.text_document.uri] = ( - domain.TextDocumentInfo( - uri=params.text_document.uri, version=str(params.text_document.version) - ) - ) + await global_state.server_initialized.wait() - file_path = Path(params.text_document.uri.replace("file://", "")) - projects_paths = [ - project_path - for project_path, project in global_state.ws_context.ws_projects.items() - if project.status == domain.ProjectStatus.CONFIG_VALID - and file_path.is_relative_to(project_path) - ] + if global_state.wm_client is None: + raise Exception("WM server not connected") - document_info = domain.TextDocumentInfo( - uri=params.text_document.uri, version=str(params.text_document.version) + await global_state.wm_client.notify_document_opened( + uri=params.text_document.uri, + version=params.text_document.version, + text=params.text_document.text, ) - try: - async with asyncio.TaskGroup() as tg: - for project_path in projects_paths: - runners_by_env = ( - global_state.ws_context.ws_projects_extension_runners.get( - project_path, {} - ) - ) - for runner in runners_by_env.values(): - if runner.status == runner_client.RunnerStatus.RUNNING: - tg.create_task( - runner_client.notify_document_did_open( - runner=runner, document_info=document_info - ) - ) - except ExceptionGroup as eg: - for exception in eg.exceptions: - logger.exception(exception) - logger.error(f"Error while sending opened document: {eg}") async def document_did_close( ls: LanguageServer, params: types.DidCloseTextDocumentParams ): logger.trace(f"Document did close: {params.text_document.uri}") - try: - del global_state.ws_context.opened_documents[params.text_document.uri] - except KeyError: - logger.error( - f"Document not found in opened documents: {params.text_document.uri}" - ) - return - - file_path = Path(params.text_document.uri.replace("file://", "")) - projects_paths = [ - project_path - for project_path, project in global_state.ws_context.ws_projects.items() - if project.status == domain.ProjectStatus.CONFIG_VALID - and file_path.is_relative_to(project_path) - ] - - try: - async with asyncio.TaskGroup() as tg: - for project_path in projects_paths: - runners_by_env = global_state.ws_context.ws_projects_extension_runners[ - project_path - ] - for runner in runners_by_env.values(): - if runner.status != runner_client.RunnerStatus.RUNNING: - logger.trace( - f"Runner {runner.readable_id} is not running, skip it" - ) - continue - - tg.create_task( - runner_client.notify_document_did_close( - runner=runner, document_uri=params.text_document.uri - ) - ) - except ExceptionGroup as e: - logger.error(f"Error while sending closed document: {e}") + await global_state.server_initialized.wait() + + if global_state.wm_client is None: + raise Exception("WM server not connected") + + await global_state.wm_client.notify_document_closed( + uri=params.text_document.uri + ) async def document_did_save( ls: LanguageServer, params: types.DidSaveTextDocumentParams ): logger.trace(f"Document did save: {params}") + await global_state.server_initialized.wait() async def document_did_change( ls: LanguageServer, params: types.DidChangeTextDocumentParams ): - global_state.ws_context.opened_documents[ - params.text_document.uri - ].version = params.text_document.version - logger.trace(f"Document did change: {params.text_document.uri}") - file_path = Path(params.text_document.uri.replace("file://", "")) - projects_paths = [ - project_path - for project_path, project in global_state.ws_context.ws_projects.items() - if project.status == domain.ProjectStatus.CONFIG_VALID - and file_path.is_relative_to(project_path) - ] - + await global_state.server_initialized.wait() + + if global_state.wm_client is None: + raise Exception("WM server not connected") + + # Convert content changes to API format (camelCase) content_changes = [] for change in params.content_changes: if isinstance(change, types.TextDocumentContentChangePartial): - mapped_change = runner_client.TextDocumentContentChangePartial( - range=runner_client.Range( - start=runner_client.Position(line=change.range.start.line, character=change.range.start.character), - end=runner_client.Position(line=change.range.end.line, character=change.range.end.character) - ), - text=change.text, - range_length=change.range_length + content_changes.append( + { + "range": { + "start": { + "line": change.range.start.line, + "character": change.range.start.character, + }, + "end": { + "line": change.range.end.line, + "character": change.range.end.character, + }, + }, + "text": change.text, + "rangeLength": change.range_length, + } ) - content_changes.append(mapped_change) elif isinstance(change, types.TextDocumentContentChangeWholeDocument): - mapped_change = runner_client.TextDocumentContentChangeWholeDocument(text=change.text) - content_changes.append(mapped_change) + content_changes.append({"text": change.text}) else: - logger.error(f"Got unsupported content change from LSP client: {type(change)}, skip it") + logger.error( + f"Got unsupported content change from LSP client: {type(change)}, skip it" + ) continue - change_params = runner_client.DidChangeTextDocumentParams( - text_document=runner_client.VersionedTextDocumentIdentifier(version=params.text_document.version, uri=params.text_document.uri), - content_changes=content_changes + await global_state.wm_client.notify_document_changed( + uri=params.text_document.uri, + version=params.text_document.version, + content_changes=content_changes, ) - - try: - async with asyncio.TaskGroup() as tg: - for project_path in projects_paths: - runners_by_env = global_state.ws_context.ws_projects_extension_runners[ - project_path - ] - for runner in runners_by_env.values(): - if runner.status != runner_client.RunnerStatus.RUNNING: - logger.trace( - f"Runner {runner.readable_id} is not running, skip it" - ) - continue - - tg.create_task( - runner_client.notify_document_did_change( - runner=runner, change_params=change_params - ) - ) - except ExceptionGroup as e: - logger.error(f"Error while sending changed document: {e}") diff --git a/src/finecode/lsp_server/endpoints/formatting.py b/src/finecode/lsp_server/endpoints/formatting.py index 05bae535..6759cca1 100644 --- a/src/finecode/lsp_server/endpoints/formatting.py +++ b/src/finecode/lsp_server/endpoints/formatting.py @@ -2,12 +2,11 @@ from typing import TYPE_CHECKING +from finecode.lsp_server import global_state, pygls_types_utils +from finecode_extension_api.actions.code_quality import format_files_action from loguru import logger from lsprotocol import types - -from finecode import pygls_types_utils -from finecode.services import run_service -from finecode.lsp_server import global_state +from pydantic.dataclasses import dataclass as pydantic_dataclass if TYPE_CHECKING: from pygls.lsp.server import LanguageServer @@ -19,30 +18,43 @@ async def format_document(ls: LanguageServer, params: types.DocumentFormattingPa file_path = pygls_types_utils.uri_str_to_path(params.text_document.uri) + if global_state.wm_client is None: + logger.error("Formatting requested but WM client not connected") + return None + + project_dir = await global_state.wm_client.find_project_for_file(str(file_path)) + if project_dir is None: + logger.error(f"Cannot determine project for formatting: {file_path}") + return [] + + file_uri = file_path.as_uri() + try: - response = await run_service.find_action_project_and_run( - file_path=file_path, - action_name="format", - params={"file_paths": [file_path], "save": False}, - run_trigger=run_service.RunActionTrigger.USER, - dev_env=run_service.DevEnv.IDE, - ws_context=global_state.ws_context, - initialize_all_handlers=True, + response = await global_state.wm_client.run_action( + action="format", + project=project_dir, + params={"file_paths": [file_uri], "save": False, "target": "files"}, + options={"trigger": "user", "devEnv": "ide"}, ) - except Exception as error: # TODO + except Exception as error: logger.error(f"Error document formatting {file_path}: {error}") return None if response is None: return [] - response_for_file = response.json().get("result_by_file_path", {}).get( - str(file_path), None - ) + json_result = (response.get("resultByFormat") or {}).get("json") + if json_result is None: + return [] + + result_type = pydantic_dataclass(format_files_action.FormatFilesRunResult) + format_result: format_files_action.FormatFilesRunResult = result_type(**json_result) + + response_for_file = format_result.result_by_file_path.get(file_uri) if response_for_file is None: return [] - if response_for_file.get("changed", True) is True: + if response_for_file.changed is True: doc = ls.workspace.get_text_document(params.text_document.uri) return [ types.TextEdit( @@ -50,7 +62,7 @@ async def format_document(ls: LanguageServer, params: types.DocumentFormattingPa start=types.Position(0, 0), end=types.Position(len(doc.lines), len(doc.lines[-1])), ), - new_text=response_for_file["code"], + new_text=response_for_file.code, ) ] diff --git a/src/finecode/lsp_server/endpoints/inlay_hints.py b/src/finecode/lsp_server/endpoints/inlay_hints.py index 74ca3d15..afa4bf58 100644 --- a/src/finecode/lsp_server/endpoints/inlay_hints.py +++ b/src/finecode/lsp_server/endpoints/inlay_hints.py @@ -5,9 +5,7 @@ from loguru import logger from lsprotocol import types -from finecode import find_project, pygls_types_utils -from finecode.services import run_service -from finecode.lsp_server import global_state +from finecode.lsp_server import global_state, pygls_types_utils if TYPE_CHECKING: from pygls.lsp.server import LanguageServer @@ -47,31 +45,39 @@ async def document_inlay_hint( ls: LanguageServer, params: types.InlayHintParams ) -> types.InlayHintResult: logger.trace(f"Document inlay hints requested: {params}") + await global_state.server_initialized.wait() + file_path = pygls_types_utils.uri_str_to_path(params.text_document.uri) + + if global_state.wm_client is None: + logger.error("Inlay hints requested but WM client not connected") + return None + + project_dir = await global_state.wm_client.find_project_for_file(str(file_path)) + if project_dir is None: + # Not all files belong to a project with this action — not an error. + return [] + try: - response = await run_service.find_action_project_and_run( - file_path=file_path, - action_name="text_document_inlay_hint", + response = await global_state.wm_client.run_action( + action="text_document_inlay_hint", + project=project_dir, params=inlay_hint_params_to_dict(params), - run_trigger=run_service.RunActionTrigger.SYSTEM, - dev_env=run_service.DevEnv.IDE, - ws_context=global_state.ws_context, - initialize_all_handlers=True, + options={"trigger": "system", "devEnv": "ide"}, ) - except find_project.FileHasNotActionException: - # ignore this exception because client requests inlay hints for all workspace - # files and not neccessary all projects in ws have this action. So this is not - # an real error. - return [] - except Exception as error: # TODO + except Exception as error: logger.error(f"Error getting document inlay hints {file_path}: {error}") return None if response is None: return [] - hints = response.json().get("hints", None) - return [dict_to_inlay_hint(hint) for hint in hints] if hints is not None else None + json_result = (response.get("resultByFormat") or {}).get("json") + if json_result is None: + return [] + + hints = json_result.get("hints") + return [dict_to_inlay_hint(hint) for hint in hints] if hints is not None else [] async def inlay_hint_resolve( diff --git a/src/finecode/lsp_server/global_state.py b/src/finecode/lsp_server/global_state.py index 61df4cef..3c3bbace 100644 --- a/src/finecode/lsp_server/global_state.py +++ b/src/finecode/lsp_server/global_state.py @@ -1,9 +1,10 @@ import asyncio -import collections.abc -from typing import Any +from pathlib import Path -from finecode import context +from finecode.wm_client import ApiClient -ws_context = context.WorkspaceContext([]) server_initialized = asyncio.Event() -progress_reporter: collections.abc.Callable[[str | int, Any], None] | None = None +wm_client: ApiClient | None = None +partial_result_tokens: dict[str | int, tuple[str, str]] = {} +wm_log_level: str = "INFO" +lsp_log_file_path: Path | None = None diff --git a/src/finecode/lsp_server/lsp_server.py b/src/finecode/lsp_server/lsp_server.py index 92f3235c..16935118 100644 --- a/src/finecode/lsp_server/lsp_server.py +++ b/src/finecode/lsp_server/lsp_server.py @@ -1,8 +1,6 @@ import asyncio import collections.abc -from functools import partial from pathlib import Path -from typing import Any from loguru import logger from lsprotocol import types @@ -10,9 +8,9 @@ from pygls.lsp.server import LanguageServer from finecode_extension_runner.lsp_server import CustomLanguageServer -from finecode.services import shutdown_service -from finecode.runner import runner_manager -from finecode.lsp_server import global_state, schemas, services +from finecode.wm_server import wm_lifecycle +from finecode.wm_client import ApiClient +from finecode.lsp_server import global_state from finecode.lsp_server.endpoints import action_tree as action_tree_endpoints from finecode.lsp_server.endpoints import code_actions as code_actions_endpoints from finecode.lsp_server.endpoints import code_lens as code_lens_endpoints @@ -112,7 +110,7 @@ def create_lsp_server() -> CustomLanguageServer: register_inlay_hint_feature = server.feature(types.INLAY_HINT_RESOLVE) register_inlay_hint_feature(inlay_hints_endpoints.inlay_hint_resolve) - # Finecode + # Finecode commands exposed to the IDE register_list_actions_cmd = server.command("finecode.getActions") register_list_actions_cmd(action_tree_endpoints.list_actions) @@ -123,11 +121,20 @@ def create_lsp_server() -> CustomLanguageServer: action_tree_endpoints.list_actions_for_position ) + register_list_projects_cmd = server.command("finecode.listProjects") + register_list_projects_cmd(action_tree_endpoints.list_projects) + + register_run_batch_cmd = server.command("finecode.runBatch") + register_run_batch_cmd(action_tree_endpoints.run_batch) + + register_run_action_cmd = server.command("finecode.runAction") + register_run_action_cmd(action_tree_endpoints.run_action) + register_run_action_on_file_cmd = server.command("finecode.runActionOnFile") register_run_action_on_file_cmd(action_tree_endpoints.run_action_on_file) - # register_run_action_on_project_cmd = server.command("finecode.runActionOnProject") - # register_run_action_on_project_cmd(action_tree_endpoints.run_action_on_project) + register_run_action_on_project_cmd = server.command("finecode.runActionOnProject") + register_run_action_on_project_cmd(action_tree_endpoints.run_action_on_project) register_reload_action_cmd = server.command("finecode.reloadAction") register_reload_action_cmd(action_tree_endpoints.reload_action) @@ -139,7 +146,7 @@ def create_lsp_server() -> CustomLanguageServer: "finecode.restartExtensionRunner" ) register_restart_extension_runner_cmd(restart_extension_runner) - + register_restart_and_debug_extension_runner_cmd = server.command( "finecode.restartAndDebugExtensionRunner" ) @@ -148,6 +155,9 @@ def create_lsp_server() -> CustomLanguageServer: register_shutdown_feature = server.feature(types.SHUTDOWN) register_shutdown_feature(_on_shutdown) + register_server_shutdown_feature = server.feature('server/shutdown') + register_server_shutdown_feature(_lsp_server_shutdown) + return server @@ -189,37 +199,141 @@ async def _on_initialized(ls: LanguageServer, params: types.InitializedParams): logger.info("initialized, adding workspace directories") - async def apply_workspace_edit(params): - return await ls.workspace_apply_edit_async(params) + # Determine workspace root for WM server startup. + workdir = Path.cwd() + if ls.workspace.folders: + first_folder = next(iter(ls.workspace.folders.values())) + workdir = Path(first_folder.uri.replace("file://", "")) - services.register_workspace_edit_applier(apply_workspace_edit) + # Ensure the FineCode WM server is running and connect to it. + # The TCP connection keeps the WM server alive for the LSP lifetime. + wm_lifecycle.ensure_running(workdir, log_level=global_state.wm_log_level) + try: + port = await wm_lifecycle.wait_until_ready() + except TimeoutError as exc: + logger.warning(f"FineCode WM server did not start: {exc}") + port = None - services.register_project_changed_callback( - partial(action_tree_endpoints.notify_changed_action_node, ls) - ) - services.register_send_user_message_notification_callback( - partial(send_user_message_notification, ls) - ) - services.register_send_user_message_request_callback( - partial(send_user_message_request, ls) - ) + if port is None: + logger.error("Cannot connect to FineCode WM server — no port available") + return - def report_progress(token: str | int, value: Any): - ls.progress(types.ProgressParams(token, value)) + try: + global_state.wm_client = ApiClient() + await global_state.wm_client.connect("127.0.0.1", port, client_id="lsp") + except (ConnectionRefusedError, OSError) as exc: + logger.error(f"Could not connect to FineCode WM server: {exc}") + global_state.wm_client = None + return + + if global_state.lsp_log_file_path: + ls.window_log_message( + types.LogMessageParams( + type=types.MessageType.Info, + message=f"FineCode LSP Server log: {global_state.lsp_log_file_path}", + ) + ) - services.register_progress_reporter(report_progress) - services.register_debug_session_starter(partial(start_debug_session, ls)) + log_path = global_state.wm_client.server_info.get("logFilePath") + if log_path: + ls.window_log_message( + types.LogMessageParams( + type=types.MessageType.Info, + message=f"FineCode WM Server log: {log_path}", + ) + ) + # Register notification handlers for server→client push messages. + async def on_tree_changed(params: dict) -> None: + # TODO + ... + # node = schemas.ActionTreeNode(**params["node"]) + # await action_tree_endpoints.notify_changed_action_node(ls, node) + + async def on_user_message(params: dict) -> None: + await send_user_message_notification(ls, params["message"], params["type"]) + + global_state.wm_client.on_notification("actions/treeChanged", on_tree_changed) + global_state.wm_client.on_notification("server/userMessage", on_user_message) + + # forward progress notifications to the LSP progress reporter + from finecode_extension_api.actions.code_quality import lint_action + from pydantic.dataclasses import dataclass as pydantic_dataclass + from finecode.lsp_server import pygls_types_utils + from finecode.lsp_server.endpoints.diagnostics import map_lint_message_to_diagnostic + + def _map_lint_to_document_diagnostic_partial(lint_result: lint_action.LintRunResult) -> types.DocumentDiagnosticReportPartialResult: + related_documents = {} + for file_path_str, lint_messages in lint_result.messages.items(): + file_report = types.FullDocumentDiagnosticReport( + items=[ + map_lint_message_to_diagnostic(lint_message) + for lint_message in lint_messages + ] + ) + uri = pygls_types_utils.path_to_uri_str(Path(file_path_str)) + related_documents[uri] = file_report + + return types.DocumentDiagnosticReportPartialResult(related_documents=related_documents) + + def _map_lint_to_workspace_diagnostic_partial(lint_result: lint_action.LintRunResult) -> types.WorkspaceDiagnosticReportPartialResult: + items = [ + types.WorkspaceFullDocumentDiagnosticReport( + uri=pygls_types_utils.path_to_uri_str(Path(file_path_str)), + items=[ + map_lint_message_to_diagnostic(lint_message) + for lint_message in lint_messages + ], + ) + for file_path_str, lint_messages in lint_result.messages.items() + ] + return types.WorkspaceDiagnosticReportPartialResult(items=items) + + async def on_partial_result(params: dict) -> None: + token = params.get("token") + value = params.get("value") + + if token is None or value is None: + logger.error("Invalid partial result notification: missing token or value") + return + + # TODO: remove mapping either after last partial or after final result + action, endpoint_type = global_state.partial_result_tokens.get(token, (None, None)) + if not action or not endpoint_type: + logger.error(f"No mapping found for partial result token {token}") + return + + if action == "lint": + result_by_format = value.get("resultByFormat") or {} + json_result = result_by_format.get("json") + if json_result is None: + logger.error(f"No json result in partial result for token {token}") + return + result_type = pydantic_dataclass(lint_action.LintRunResult) + lint_result: lint_action.LintRunResult = result_type(**json_result) + + if endpoint_type == "document_diagnostic": + lsp_partial = _map_lint_to_document_diagnostic_partial(lint_result) + elif endpoint_type == "workspace_diagnostic": + lsp_partial = _map_lint_to_workspace_diagnostic_partial(lint_result) + else: + logger.error(f"Unknown endpoint_type {endpoint_type} for action {action}") + return + + ls.progress(types.ProgressParams(token=token, value=lsp_partial)) + else: + logger.warning(f"Unsupported action for partial results: {action}") + + global_state.wm_client.on_notification("actions/partialResult", on_partial_result) + + # Add workspace directories via the WM server. try: async with asyncio.TaskGroup() as tg: for ws_dir in ls.workspace.folders.values(): - request = schemas.AddWorkspaceDirRequest( - dir_path=ws_dir.uri.replace("file://", "") - ) - tg.create_task(services.add_workspace_dir(request=request)) + dir_path = Path(ws_dir.uri.replace("file://", "")) + tg.create_task(global_state.wm_client.add_dir(dir_path)) except ExceptionGroup as error: logger.exception(error) - raise error from eg global_state.server_initialized.set() logger.trace("Workspace directories added, end of initialized handler") @@ -229,53 +343,95 @@ async def _workspace_did_change_workspace_folders( ls: LanguageServer, params: types.DidChangeWorkspaceFoldersParams ): logger.trace(f"Workspace dirs were changed: {params}") - await services.handle_changed_ws_dirs( - added=[ + if global_state.wm_client is None: + logger.warning("WM client not connected, ignoring workspace folder change") + return + + for ws_folder in params.event.removed: + await global_state.wm_client.remove_dir( Path(ws_folder.uri.removeprefix("file://")) - for ws_folder in params.event.added - ], - removed=[ + ) + + for ws_folder in params.event.added: + await global_state.wm_client.add_dir( Path(ws_folder.uri.removeprefix("file://")) - for ws_folder in params.event.removed - ], - ) + ) def _on_shutdown(ls: LanguageServer, params): logger.info("on shutdown handler", params) - shutdown_service.on_shutdown(global_state.ws_context) + # Close connection to the WM server. If this was the last client, + # the WM server will auto-stop after a short delay and clean up runners. + if global_state.wm_client is not None: + asyncio.ensure_future(global_state.wm_client.close()) + global_state.wm_client = None + + +async def _lsp_server_shutdown(ls: LanguageServer, params): + """Handle 'server/shutdown' — explicitly stop the WM server. + + Forwards the shutdown request to the WM server and then closes the + WM client connection. Used by the IDE when it wants to restart the + WM server (as opposed to a normal disconnect on deactivation). + """ + logger.info("server/shutdown request received, stopping WM server") + if global_state.wm_client is not None: + try: + await global_state.wm_client.request("server/shutdown", {}) + except Exception: + logger.warning("WM server did not respond to shutdown request") + await global_state.wm_client.close() + global_state.wm_client = None + return {} async def reset(ls: LanguageServer, params): logger.info("Reset WM") await global_state.server_initialized.wait() + if global_state.wm_client is None: + logger.error("Reset requested but WM client not connected") + return + + await global_state.wm_client.request("server/reset", {}) + async def restart_extension_runner(ls: LanguageServer, tree_node, param2): logger.info(f"restart extension runner {tree_node}") await global_state.server_initialized.wait() + if global_state.wm_client is None: + logger.error("Restart runner requested but WM client not connected") + return + runner_id = tree_node['projectPath'] splitted_runner_id = runner_id.split('::') runner_working_dir_str = splitted_runner_id[0] - runner_working_dir_path = Path(runner_working_dir_str) env_name = splitted_runner_id[-1] - await runner_manager.restart_extension_runner(runner_working_dir_path=runner_working_dir_path, env_name=env_name, ws_context=global_state.ws_context) + await global_state.wm_client.request( + "runners/restart", + {"runnerWorkingDir": runner_working_dir_str, "envName": env_name}, + ) async def restart_and_debug_extension_runner(ls: LanguageServer, tree_node, params2): logger.info(f"restart and debug extension runner {tree_node} {params2}") await global_state.server_initialized.wait() + if global_state.wm_client is None: + logger.error("Restart+debug runner requested but WM client not connected") + return + runner_id = tree_node['projectPath'] splitted_runner_id = runner_id.split('::') runner_working_dir_str = splitted_runner_id[0] - runner_working_dir_path = Path(runner_working_dir_str) env_name = splitted_runner_id[-1] - logger.info(f'start debugging {runner_working_dir_path} {runner_id} {env_name}') - await runner_manager.restart_extension_runner(runner_working_dir_path=runner_working_dir_path, env_name=env_name, ws_context=global_state.ws_context, debug=True) + await global_state.wm_client.request( + "runners/restart", + {"runnerWorkingDir": runner_working_dir_str, "envName": env_name, "debug": True}, + ) async def send_user_message_notification( diff --git a/src/finecode/lsp_server/main.py b/src/finecode/lsp_server/main.py index c6f2e833..7a9be43d 100644 --- a/src/finecode/lsp_server/main.py +++ b/src/finecode/lsp_server/main.py @@ -1,16 +1,34 @@ +# docs: docs/cli.md from __future__ import annotations -from finecode import communication_utils +import socket +import sys + +from finecode.lsp_server import communication_utils, global_state from finecode import logger_utils from finecode.lsp_server.lsp_server import create_lsp_server +def _find_free_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + return s.getsockname()[1] + + async def start( comm_type: communication_utils.CommunicationType, host: str | None = None, port: int | None = None, - trace: bool = False, + log_level: str = "INFO", ) -> None: - logger_utils.init_logger(trace=trace) + global_state.lsp_log_file_path = logger_utils.init_logger(log_name="lsp_server", log_level=log_level) + global_state.wm_log_level = log_level server = create_lsp_server() - await server.start_io_async() + if comm_type == communication_utils.CommunicationType.TCP: + if port is None: + port = _find_free_port() + sys.stdout.write(f"port:{port}\n") + sys.stdout.flush() + await server.start_tcp_async(host, port) + else: + await server.start_io_async() diff --git a/src/finecode/pygls_types_utils.py b/src/finecode/lsp_server/pygls_types_utils.py similarity index 100% rename from src/finecode/pygls_types_utils.py rename to src/finecode/lsp_server/pygls_types_utils.py diff --git a/src/finecode/lsp_server/schemas.py b/src/finecode/lsp_server/schemas.py index f9d023e8..6cb8d399 100644 --- a/src/finecode/lsp_server/schemas.py +++ b/src/finecode/lsp_server/schemas.py @@ -15,20 +15,6 @@ class BaseModel(pydantic.BaseModel): ) -class AddWorkspaceDirRequest(BaseModel): - dir_path: str - - -class AddWorkspaceDirResponse(BaseModel): ... - - -class DeleteWorkspaceDirRequest(BaseModel): - dir_path: str - - -class DeleteWorkspaceDirResponse(BaseModel): ... - - class ListActionsRequest(BaseModel): parent_node_id: str = "" diff --git a/src/finecode/lsp_server/services.py b/src/finecode/lsp_server/services.py index 976b1522..0c44d382 100644 --- a/src/finecode/lsp_server/services.py +++ b/src/finecode/lsp_server/services.py @@ -1,132 +1,4 @@ -from pathlib import Path - -from loguru import logger - -from finecode import domain, user_messages -from finecode.config import read_configs -from finecode.lsp_server import global_state, schemas -from finecode.runner import runner_manager - - class ActionNotFound(Exception): ... class InternalError(Exception): ... - - -def register_project_changed_callback(action_node_changed_callback): - async def project_changed_callback(project: domain.Project) -> None: - action_node = schemas.ActionTreeNode( - node_id=project.dir_path.as_posix(), - name=project.name, - subnodes=[], - node_type=schemas.ActionTreeNode.NodeType.PROJECT, - status=project.status.name, - ) - await action_node_changed_callback(action_node) - - runner_manager.project_changed_callback = project_changed_callback - - -def register_send_user_message_notification_callback( - send_user_message_notification_callback, -): - user_messages._notification_sender = send_user_message_notification_callback - - -def register_send_user_message_request_callback(send_user_message_request_callback): - user_messages._lsp_message_send = send_user_message_request_callback - - -def register_workspace_edit_applier(apply_workspace_edit_func): - runner_manager.apply_workspace_edit = apply_workspace_edit_func - - -def register_debug_session_starter(start_debug_session_func): - runner_manager.start_debug_session = start_debug_session_func - - -def register_progress_reporter(report_progress_func): - global_state.progress_reporter = report_progress_func - - -async def add_workspace_dir( - request: schemas.AddWorkspaceDirRequest, -) -> schemas.AddWorkspaceDirResponse: - logger.trace(f"Add workspace dir {request.dir_path}") - dir_path = Path(request.dir_path) - - if dir_path in global_state.ws_context.ws_dirs_paths: - await user_messages.error(f"Directory {dir_path} is already added") - - global_state.ws_context.ws_dirs_paths.append(dir_path) - new_projects = await read_configs.read_projects_in_dir( - dir_path, global_state.ws_context - ) - - for new_project in new_projects: - await read_configs.read_project_config( - project=new_project, - ws_context=global_state.ws_context, - resolve_presets=False, - ) - - try: - await runner_manager.start_runners_with_presets( - projects=new_projects, - ws_context=global_state.ws_context, - initialize_all_handlers=True, - ) - except runner_manager.RunnerFailedToStart as exception: - await user_messages.error(f"Starting runners with presets failed: {exception.message}. Did you run `finecode prepare-envs` ?") - - return schemas.AddWorkspaceDirResponse() - - -async def delete_workspace_dir( - request: schemas.DeleteWorkspaceDirRequest, -) -> schemas.DeleteWorkspaceDirResponse: - ws_dir_path_to_remove = Path(request.dir_path) - global_state.ws_context.ws_dirs_paths.remove(ws_dir_path_to_remove) - - # find all projects affected by removing of this ws dir - project_dir_pathes = global_state.ws_context.ws_projects.keys() - for project_dir_path in project_dir_pathes: - if not project_dir_path.is_relative_to(ws_dir_path_to_remove): - continue - - # project_dir_path is now candidate to remove - remove_project_dir_path = True - for ws_dir_path in global_state.ws_context.ws_dirs_paths: - if project_dir_path.is_relative_to(ws_dir_path): - # project is also in another ws_dir, keep it - remove_project_dir_path = False - break - - if remove_project_dir_path: - project_runners = global_state.ws_context.ws_projects_extension_runners[ - project_dir_path - ].values() - for runner in project_runners: - await runner_manager.stop_extension_runner(runner=runner) - del global_state.ws_context.ws_projects[project_dir_path] - try: - del global_state.ws_context.ws_projects_raw_configs[project_dir_path] - except KeyError: - ... - - return schemas.DeleteWorkspaceDirResponse() - - -async def handle_changed_ws_dirs(added: list[Path], removed: list[Path]) -> None: - for removed_ws_dir_path in removed: - delete_request = schemas.DeleteWorkspaceDirRequest( - dir_path=removed_ws_dir_path.as_posix() - ) - await delete_workspace_dir(request=delete_request) - - for added_ws_dir_path in added: - add_request = schemas.AddWorkspaceDirRequest( - dir_path=added_ws_dir_path.as_posix() - ) - await add_workspace_dir(request=add_request) diff --git a/src/finecode/mcp_server.py b/src/finecode/mcp_server.py new file mode 100644 index 00000000..1a4ed606 --- /dev/null +++ b/src/finecode/mcp_server.py @@ -0,0 +1,292 @@ +# docs: docs/cli.md +"""FineCode MCP Server — proxy to the FineCode WM server. + +Connects to the FineCode WM server over TCP JSON-RPC and translates MCP tool calls into +WM server requests. If no WM server is running, starts one as a subprocess. +""" + +from __future__ import annotations + +import asyncio +import json +import pathlib +import sys +import uuid + +from finecode.wm_client import ApiClient +from finecode.wm_server import wm_lifecycle +from loguru import logger +from mcp.server import Server +from mcp.server.stdio import stdio_server +from mcp.types import TextContent, Tool + +_wm_client = ApiClient() +server = Server("FineCode") + +_partial_result_queues: dict[str, asyncio.Queue] = {} + + +def _setup_partial_result_forwarding() -> None: + """Register the WM partial-result notification handler. + + Must be called once after ``_wm_client.connect()``. Each ``actions/partialResult`` + notification is routed by token to the matching per-call asyncio.Queue. + """ + + async def _on_partial_result(params: dict) -> None: + token = params.get("token") + value = params.get("value") + if token and value is not None: + queue = _partial_result_queues.get(token) + if queue is not None: + queue.put_nowait(value) + + _wm_client.on_notification("actions/partialResult", _on_partial_result) + + +async def _run_with_progress( + action: str, + project: str, + params: dict, + options: dict, + session, +) -> dict: + """Run a WM action with streaming partial results forwarded as MCP log messages. + + ``project`` may be ``""`` to run across all projects that expose the action. + Each ``actions/partialResult`` notification is forwarded to the MCP client as a + ``notifications/message`` log message while the call blocks waiting for the final result. + """ + token = str(uuid.uuid4()) + queue: asyncio.Queue = asyncio.Queue() + _partial_result_queues[token] = queue + + async def _forward() -> None: + try: + while True: + value = await queue.get() + await session.send_log_message( + level="info", data=value, logger="finecode" + ) + except asyncio.CancelledError: + pass + + result_task = asyncio.create_task( + _wm_client.run_action_with_partial_results( + action, project, token, params, options + ) + ) + forward_task = asyncio.create_task(_forward()) + try: + return await result_task + finally: + forward_task.cancel() + await asyncio.gather(forward_task, return_exceptions=True) + _partial_result_queues.pop(token, None) + + +@server.list_tools() +async def list_tools() -> list[Tool]: + """Build the MCP tool list from live WM data. + + Fetches all actions and their payload schemas from the WM, then + constructs one ``Tool`` per action with the real input schema. + A static ``list_projects`` tool is always included. + """ + tools: list[Tool] = [ + Tool( + name="list_projects", + description="List all projects in the FineCode workspace with their names, paths, and statuses", + inputSchema={"type": "object", "properties": {}}, + ), + Tool( + name="list_runners", + description="List all extension runners and their status (running, stopped, error). Use this to diagnose failures when actions do not respond.", + inputSchema={"type": "object", "properties": {}}, + ), + Tool( + name="list_actions", + description="List actions available in the workspace, optionally filtered to a single project. Returns action names and which projects expose them.", + inputSchema={ + "type": "object", + "properties": { + "project": { + "type": "string", + "description": "Absolute path to the project directory. Use the list_projects tool to see available projects. Omit to list actions across all projects.", + } + }, + }, + ), + Tool( + name="get_project_raw_config", + description="Return the resolved (post-preset-merge) configuration for a project. Use this to understand what actions and handlers are configured.", + inputSchema={ + "type": "object", + "properties": { + "project": { + "type": "string", + "description": "Absolute path to the project directory. Use the list_projects tool to see available projects.", + } + }, + "required": ["project"], + }, + ), + Tool( + name="dump_config", + description="Return the fully resolved project configuration with all presets applied and the presets key removed. Use this to understand the complete effective configuration a project runs with.", + inputSchema={ + "type": "object", + "properties": { + "project": { + "type": "string", + "description": "Absolute path to the project directory. Use the list_projects tool to see available projects.", + } + }, + "required": ["project"], + }, + ), + ] + + actions = await _wm_client.list_actions() + + # Deduplicate: first project that exposes an action owns its schema. + seen: dict[str, dict] = {} + for action in actions: + if action["name"] not in seen: + seen[action["name"]] = action + + # Group by project to keep schema requests batched. + unique_by_project: dict[str, list[dict]] = {} + for action in seen.values(): + unique_by_project.setdefault(action["project"], []).append(action) + + for project_path, project_actions in unique_by_project.items(): + action_names = [a["name"] for a in project_actions] + try: + schemas = await _wm_client.get_payload_schemas(project_path, action_names) + except Exception as exc: + logger.debug(f"Could not fetch payload schemas for {project_path}: {exc}") + schemas = {} + + for action in project_actions: + name = action["name"] + schema: dict | None = schemas.get(name) + description = ( + schema.get("description") if schema else None + ) or f"Run {name} on a project or the whole workspace" + input_schema: dict = { + "type": "object", + "properties": { + "project": { + "type": "string", + "description": "Absolute path to the project directory. Use the list_projects tool to see available projects. Omit to run on all projects in the workspace.", + }, + **(schema["properties"] if schema else {}), + }, + "required": schema.get("required", []) if schema else [], + } + tools.append( + Tool( + name=name, + description=description, + inputSchema=input_schema, + ) + ) + + return tools + + +@server.call_tool() +async def call_tool(name: str, arguments: dict) -> list[TextContent]: + """Dispatch an MCP tool call to the WM server.""" + if name == "list_projects": + result = await _wm_client.list_projects() + return [TextContent(type="text", text=json.dumps({"projects": result}))] + + if name == "list_runners": + result = await _wm_client.list_runners() + return [TextContent(type="text", text=json.dumps({"runners": result}))] + + if name == "list_actions": + project = arguments.get("project") + result = await _wm_client.list_actions(project=project) + return [TextContent(type="text", text=json.dumps({"actions": result}))] + + if name == "get_project_raw_config": + project = arguments["project"] + result = await _wm_client.get_project_raw_config(project) + return [TextContent(type="text", text=json.dumps({"rawConfig": result}))] + + if name == "dump_config": + project = arguments["project"] + project_path = pathlib.Path(project) + raw_config = await _wm_client.get_project_raw_config(project) + result = await _wm_client.run_action( + "dump_config", + project, + params={ + "source_file_path": str(project_path / "pyproject.toml"), + "project_raw_config": raw_config, + "target_file_path": str( + project_path / "finecode_config_dump" / "pyproject.toml" + ), + }, + options={"resultFormats": ["json"], "trigger": "user", "devEnv": "ai"}, + ) + return [TextContent(type="text", text=json.dumps(result))] + + from mcp.server.lowlevel.server import request_ctx + + session = request_ctx.get().session + project = arguments.pop("project", None) + options = {"resultFormats": ["json"], "trigger": "user", "devEnv": "ai"} + result = await _run_with_progress( + name, project or "", arguments or {}, options, session + ) + return [TextContent(type="text", text=json.dumps(result))] + + +def start(workdir: pathlib.Path, port_file: pathlib.Path | None = None) -> None: + """Start the MCP server on stdio, connecting to the FineCode API. + + If *port_file* is given, a dedicated WM server is started that writes its + port to that file instead of the shared discovery file. + """ + if port_file is not None: + wm_lifecycle.start_own_server(workdir, port_file=port_file) + try: + port = asyncio.run(wm_lifecycle.wait_until_ready_from_file(port_file)) + except TimeoutError as exc: + logger.error(str(exc)) + sys.exit(1) + else: + wm_lifecycle.ensure_running(workdir) + try: + port = asyncio.run(wm_lifecycle.wait_until_ready()) + except TimeoutError as exc: + logger.error(str(exc)) + sys.exit(1) + + async def _run() -> None: + try: + await _wm_client.connect("127.0.0.1", port, client_id="mcp") + except (ConnectionRefusedError, OSError) as exc: + logger.error( + f"Could not connect to FineCode WM server on port {port}: {exc}" + ) + sys.exit(1) + _setup_partial_result_forwarding() + logger.debug(f"Add dir to API Client: {workdir}") + await _wm_client.add_dir(workdir) + logger.debug("Added dir") + try: + async with stdio_server() as (read_stream, write_stream): + await server.run( + read_stream, + write_stream, + server.create_initialization_options(), + ) + finally: + await _wm_client.close() + + asyncio.run(_run()) diff --git a/src/finecode/services/run_service/payload_preprocessor.py b/src/finecode/services/run_service/payload_preprocessor.py deleted file mode 100644 index b95e57e1..00000000 --- a/src/finecode/services/run_service/payload_preprocessor.py +++ /dev/null @@ -1,46 +0,0 @@ -import pathlib -import typing - -from finecode import context - - -async def preprocess_for_project( - action_name: str, - payload: dict[str, typing.Any], - project_dir_path: pathlib.Path, - ws_context: context.WorkspaceContext, -) -> dict[str, typing.Any]: - processed_payload = payload.copy() - - if action_name == "prepare_envs" or action_name == "prepare_runners": - runtime_venv_path = project_dir_path / ".venvs" / "runtime" - project_def_path = project_dir_path / "pyproject.toml" - envs = [ - { - "name": "runtime", - "venv_dir_path": runtime_venv_path, - "project_def_path": project_def_path, - } - ] - # current approach: there are 4 default environments: runtime, dev_workspace, - # dev, dev_no_runtime. `runtime` is created always, all other only if dependency - # group for them exist. - # In future there will be possibility to create additional envs and to configure - # default ones. - project_raw_config = ws_context.ws_projects_raw_configs[project_dir_path] - deps_groups = project_raw_config.get("dependency-groups", {}) - # `dev_workspace` is handled separately in `prepare_env`, no need to include - # here - for default_env in ["dev", "dev_no_runtime"]: - if default_env in deps_groups: - venv_path = project_dir_path / ".venvs" / default_env - envs.append( - { - "name": default_env, - "venv_dir_path": venv_path, - "project_def_path": project_def_path, - } - ) - processed_payload["envs"] = envs - - return processed_payload diff --git a/src/finecode/watch_and_run.py b/src/finecode/watch_and_run.py index 1e083db5..fc8f6caf 100644 --- a/src/finecode/watch_and_run.py +++ b/src/finecode/watch_and_run.py @@ -1,7 +1,7 @@ from loguru import logger import finecode.context as context -import finecode.find_project as find_project +import finecode.wm_server.find_project as find_project import finecode.watcher as watcher diff --git a/src/finecode/wm_client.py b/src/finecode/wm_client.py new file mode 100644 index 00000000..b5e6d13a --- /dev/null +++ b/src/finecode/wm_client.py @@ -0,0 +1,494 @@ +"""FineCode WM client — JSON-RPC client for the FineCode WM server. + +Connects to the FineCode WM server over TCP using Content-Length framing. +Supports both request/response and server→client notifications via a +background reader loop. + +Used by LSP server, MCP server, and potentially CLI. +""" + +from __future__ import annotations + +import asyncio +import collections.abc +import json +import pathlib + +from loguru import logger + +CONTENT_LENGTH_HEADER = "Content-Length: " + + +class ApiError(Exception): + """Base class for API client errors.""" + + +class ApiServerError(ApiError): + """Server returned a JSON-RPC error response.""" + + def __init__(self, code: int, message: str) -> None: + self.code = code + super().__init__(f"API error ({code}): {message}") + + +class ApiResponseError(ApiError): + """Server returned an unexpected or malformed response.""" + + def __init__(self, method: str, detail: str) -> None: + self.method = method + super().__init__(f"Unexpected response for '{method}': {detail}") + + +async def _read_message(reader: asyncio.StreamReader) -> dict | None: + """Read one Content-Length framed JSON-RPC message. Returns None on EOF.""" + header_line = await reader.readline() + if not header_line: + return None + header_str = header_line.decode("utf-8").strip() + if not header_str.startswith(CONTENT_LENGTH_HEADER): + logger.warning(f"WmClient: unexpected header: {header_str!r}") + return None + content_length = int(header_str[len(CONTENT_LENGTH_HEADER):]) + + # Blank separator line + await reader.readline() + + body = await reader.readexactly(content_length) + return json.loads(body.decode("utf-8")) + + +class ApiClient: + """JSON-RPC client using Content-Length framing over TCP. + + After connect(), a background reader loop dispatches incoming messages: + - Responses (with ``id``) resolve the matching pending request future. + - Notifications (without ``id``) are dispatched to registered callbacks. + + Errors: + - ``ApiServerError``: the server returned a JSON-RPC error. + - ``ApiResponseError``: the server response was missing an expected field. + - ``ConnectionError``: the connection was lost. + """ + + def __init__(self) -> None: + self._reader: asyncio.StreamReader | None = None + self._writer: asyncio.StreamWriter | None = None + self._request_id = 0 + self._pending: dict[int, asyncio.Future] = {} + self._notification_handlers: dict[ + str, collections.abc.Callable[..., collections.abc.Coroutine] + ] = {} + self._reader_task: asyncio.Task | None = None + self.server_info: dict = {} + + # -- Connection lifecycle ----------------------------------------------- + + async def connect(self, host: str, port: int, client_id: str | None = None) -> None: + self._reader, self._writer = await asyncio.open_connection(host, port) + self._reader_task = asyncio.create_task(self._read_loop()) + logger.info(f"Connected to FineCode API at {host}:{port}") + try: + params: dict = {} + if client_id is not None: + params["clientId"] = client_id + self.server_info = await self.request("client/initialize", params) or {} + log_path = self.server_info.get("logFilePath") + if log_path: + logger.info(f"WM Server log file: {log_path}") + else: + logger.info("WM Server returned no log file path") + except Exception as exception: + logger.info(f"Failed to initialize with WM Server: {exception}") + + async def close(self) -> None: + if self._reader_task is not None: + self._reader_task.cancel() + try: + await self._reader_task + except asyncio.CancelledError: + pass + self._reader_task = None + + if self._writer is not None: + self._writer.close() + await self._writer.wait_closed() + self._writer = None + self._reader = None + + # Fail any pending requests. + for future in self._pending.values(): + if not future.done(): + future.set_exception(ConnectionError("Connection closed")) + self._pending.clear() + + # -- Notifications ------------------------------------------------------ + + def on_notification( + self, + method: str, + callback: collections.abc.Callable[..., collections.abc.Coroutine], + ) -> None: + """Register an async callback for a server→client notification.""" + self._notification_handlers[method] = callback + + # -- Server methods ----------------------------------------------------- + + async def get_info(self) -> dict: + """Return static info about the WM Server (e.g. log file path).""" + return await self.request("server/getInfo") + + # -- Workspace methods -------------------------------------------------- + + async def list_projects(self) -> list[dict]: + """List all projects in the workspace.""" + return await self.request("workspace/listProjects") + + async def find_project_for_file(self, file_path: str) -> str | None: + """Return the absolute directory path of the project containing a given file. + + An empty string or null result indicates that the file does not belong to + any project. This mirrors the server's + ``workspace/findProjectForFile`` handler. + """ + result = await self.request( + "workspace/findProjectForFile", {"filePath": file_path} + ) + # server returns {"project": name | None} + if not isinstance(result, dict): + raise ApiResponseError( + "workspace/findProjectForFile", f"expected dict, got {type(result).__name__}" + ) + return result.get("project") + + async def get_project_raw_config(self, project: str) -> dict: + """Return the resolved raw config for a project by name.""" + result = await self.request( + "workspace/getProjectRawConfig", {"project": project} + ) + if not isinstance(result, dict) or "rawConfig" not in result: + raise ApiResponseError( + "workspace/getProjectRawConfig", + f"missing 'rawConfig' field, got {result!r}", + ) + return result["rawConfig"] + + async def list_actions(self, project: str | None = None) -> list[dict]: + """List available actions, optionally filtered by project name.""" + params: dict = {} + if project is not None: + params["project"] = project + result = await self.request("actions/list", params) + if not isinstance(result, dict) or "actions" not in result: + raise ApiResponseError( + "actions/list", f"missing 'actions' field, got {result!r}" + ) + return result["actions"] + + async def get_payload_schemas( + self, project: str, action_names: list[str] + ) -> dict[str, dict | None]: + """Return payload schemas for the given actions in a project. + + Delegates to the WM ``actions/getPayloadSchemas`` endpoint. + + Args: + project: Absolute path to the project directory. + action_names: List of action names to fetch schemas for. + + Returns: + Mapping of action name → JSON Schema fragment, or ``None`` + for actions whose class could not be imported by the ER. + """ + result = await self.request( + "actions/getPayloadSchemas", + {"project": project, "action_names": action_names}, + ) + if not isinstance(result, dict) or "schemas" not in result: + raise ApiResponseError( + "actions/getPayloadSchemas", + f"missing 'schemas' field, got {result!r}", + ) + return result["schemas"] + + async def get_tree(self, parent_node_id: str | None = None) -> dict: + """Retrieve the hierarchical action tree from the WM server. + + ``parent_node_id`` is currently ignored by the server but is accepted for + future compatibility (and mirrors the arguments passed by the IDE + command). + The returned value is the raw dictionary returned by the server, which + at the moment has the shape ``{"nodes": [...]} ``. + """ + params: dict = {} + if parent_node_id is not None: + params["parent_node_id"] = parent_node_id + result = await self.request("actions/getTree", params) + return result + + async def set_config_overrides( + self, overrides: dict + ) -> None: + """Set persistent handler config overrides on the server. + + Overrides are stored for the lifetime of the server and applied to all + subsequent action runs. Call this before ``add_dir`` if possible so that runners + always start with the correct config and no update push is required. + + overrides format: {action_name: {handler_name_or_"": {param: value}}} + The empty-string key "" means the override applies to all handlers of + that action. + """ + await self.request("workspace/setConfigOverrides", {"overrides": overrides}) + + async def run_batch( + self, + actions: list[str], + projects: list[str] | None = None, + params: dict | None = None, + params_by_project: dict[str, dict] | None = None, + options: dict | None = None, + ) -> dict: + """Run multiple actions across multiple (or all) projects. + + Results are keyed by project path string, then action name. + All result keys use camelCase (returnCode, resultByFormat). + """ + body: dict = {"actions": actions} + if projects is not None: + body["projects"] = projects + if params: + body["params"] = params + if params_by_project: + body["paramsByProject"] = params_by_project + if options: + body["options"] = options + return await self.request("actions/runBatch", body) + + async def run_action( + self, + action: str, + project: str, + params: dict | None = None, + options: dict | None = None, + ) -> dict: + """Run an action on a project.""" + body: dict = { + "action": action, + "project": project, + "options": options, + } + if params: + body["params"] = params + return await self.request("actions/run", body) + + async def run_action_with_partial_results( + self, + action: str, + project: str, + partial_result_token: str, + params: dict | None = None, + options: dict | None = None, + ) -> dict: + """Run an action with streaming partial results via notifications. + + Pass ``project=""`` to run across all projects that expose the action. + Partial results are delivered as ``actions/partialResult`` notifications + before this coroutine returns the aggregated final result. + """ + body: dict = { + "action": action, + "project": project, + "partialResultToken": partial_result_token, + "options": options or {}, + } + if params: + body["params"] = params + return await self.request("actions/runWithPartialResults", body) + + async def add_dir( + self, + dir_path: pathlib.Path, + start_runners: bool = True, + projects: list[str] | None = None, + ) -> dict: + """Add a workspace directory. Returns {projects: [...]}. + + When ``start_runners=False`` the server reads configs and collects + actions without starting any extension runners. Use this when runner + environments may not exist yet (e.g. before ``prepare-envs``). + + When ``projects`` is provided, only those projects (by name) will have + their configs read and runners started — the rest are still discovered + but not initialised. Only use this in own-server mode where the server + lifetime matches a single CLI invocation. + """ + body: dict = {"dirPath": str(dir_path), "startRunners": start_runners} + if projects is not None: + body["projects"] = projects + return await self.request("workspace/addDir", body) + + async def start_runners(self, projects: list[str] | None = None) -> None: + """Start extension runners for all (or specified) projects. + + Complements any already-running runners — only missing runners are + started. Also resolves presets so ``project.actions`` is up to date. + """ + params: dict = {} + if projects is not None: + params["projects"] = projects + await self.request("workspace/startRunners", params) + + async def list_runners(self) -> list[dict]: + """List all extension runners and their status.""" + result = await self.request("runners/list") + if not isinstance(result, dict) or "runners" not in result: + raise ApiResponseError( + "runners/list", f"missing 'runners' field, got {result!r}" + ) + return result["runners"] + + async def check_env(self, project: str, env_name: str) -> bool: + """Return whether the named environment is valid for a project.""" + result = await self.request( + "runners/checkEnv", {"project": project, "envName": env_name} + ) + if not isinstance(result, dict) or "valid" not in result: + raise ApiResponseError( + "runners/checkEnv", f"missing 'valid' field, got {result!r}" + ) + return result["valid"] + + async def remove_env(self, project: str, env_name: str) -> None: + """Remove the named environment for a project.""" + await self.request( + "runners/removeEnv", {"project": project, "envName": env_name} + ) + + async def remove_dir(self, dir_path: pathlib.Path) -> None: + """Remove a workspace directory.""" + await self.request("workspace/removeDir", {"dirPath": str(dir_path)}) + + # -- Document notifications ------------------------------------------------- + + async def notify_document_opened( + self, uri: str, version: int | str | None = None, text: str = "" + ) -> None: + """Send document opened notification to the server.""" + params = {"uri": uri, "text": text} + if version is not None: + params["version"] = version + + self._send_notification("documents/opened", params) + + async def notify_document_closed(self, uri: str) -> None: + """Send document closed notification to the server.""" + self._send_notification("documents/closed", {"uri": uri}) + + async def notify_document_changed( + self, uri: str, version: int | str, content_changes: list[dict] + ) -> None: + """Send document changed notification to the server.""" + params = { + "uri": uri, + "version": version, + "contentChanges": content_changes, + } + self._send_notification("documents/changed", params) + + # -- Low-level notification ------------------------------------------------- + + def _send_notification(self, method: str, params: dict | None = None) -> None: + """Send a JSON-RPC notification (no response expected).""" + if self._writer is None: + raise RuntimeError("Not connected to FineCode WM server") + + msg = { + "jsonrpc": "2.0", + "method": method, + "params": params or {}, + } + + body = json.dumps(msg).encode("utf-8") + header = f"Content-Length: {len(body)}\r\n\r\n".encode("utf-8") + self._writer.write(header + body) + # Don't await drain for notifications, fire and forget + + + # -- Low-level request -------------------------------------------------- + + async def request(self, method: str, params: dict | None = None) -> dict: + """Send a JSON-RPC request and wait for the response. + + Raises: + ApiServerError: the server returned a JSON-RPC error. + ConnectionError: the connection was closed before a response arrived. + """ + if self._writer is None: + raise RuntimeError("Not connected to FineCode WM server") + + self._request_id += 1 + rid = self._request_id + msg = { + "jsonrpc": "2.0", + "id": rid, + "method": method, + "params": params or {}, + } + + future: asyncio.Future = asyncio.get_running_loop().create_future() + self._pending[rid] = future + + body = json.dumps(msg).encode("utf-8") + header = f"Content-Length: {len(body)}\r\n\r\n".encode("utf-8") + self._writer.write(header + body) + await self._writer.drain() + + response = await future + + if "error" in response: + error = response["error"] + raise ApiServerError(error["code"], error["message"]) + + return response.get("result") + + # -- Background reader -------------------------------------------------- + + async def _read_loop(self) -> None: + """Continuously read messages from the server and dispatch them.""" + try: + while self._reader is not None: + msg = await _read_message(self._reader) + if msg is None: + break + + if "id" in msg: + # Response to a pending request. + future = self._pending.pop(msg["id"], None) + if future is not None and not future.done(): + future.set_result(msg) + else: + logger.warning( + f"WmClient: received response for unknown id {msg['id']}" + ) + else: + # Server→client notification. + method = msg.get("method") + handler = self._notification_handlers.get(method) + if handler is not None: + asyncio.create_task(handler(msg.get("params"))) + else: + logger.trace( + f"WmClient: unhandled notification {method}" + ) + except asyncio.CancelledError: + raise + except (asyncio.IncompleteReadError, ConnectionResetError): + logger.info("WmClient: server connection lost") + except Exception: + logger.exception("WmClient: error in reader loop") + finally: + # Fail any remaining pending requests. + for future in self._pending.values(): + if not future.done(): + future.set_exception(ConnectionError("Connection lost")) + self._pending.clear() diff --git a/tests/workspace_manager/__init__.py b/src/finecode/wm_server/__init__.py similarity index 100% rename from tests/workspace_manager/__init__.py rename to src/finecode/wm_server/__init__.py diff --git a/tests/workspace_manager/server/__init__.py b/src/finecode/wm_server/config/__init__.py similarity index 100% rename from tests/workspace_manager/server/__init__.py rename to src/finecode/wm_server/config/__init__.py diff --git a/src/finecode/config/collect_actions.py b/src/finecode/wm_server/config/collect_actions.py similarity index 59% rename from src/finecode/config/collect_actions.py rename to src/finecode/wm_server/config/collect_actions.py index 52972995..1cd4966a 100644 --- a/src/finecode/config/collect_actions.py +++ b/src/finecode/wm_server/config/collect_actions.py @@ -1,71 +1,24 @@ +# docs: docs/configuration.md from pathlib import Path from typing import Any -import finecode.config.config_models as config_models -import finecode.context as context -import finecode.domain as domain +import finecode.wm_server.config.config_models as config_models +from finecode.wm_server import context, domain +from finecode.wm_server.config.read_configs import read_env_configs -def collect_actions( +def collect_project( project_path: Path, ws_context: context.WorkspaceContext, -) -> list[domain.Action]: - # preconditions: - # - project raw config exists in ws_context if such project exists - # - project expected to include finecode - try: - project = ws_context.ws_projects[project_path] - except KeyError as exception: - raise ValueError( - f"Project {project_path} doesn't exist." - + f" Existing projects: {ws_context.ws_projects}" - ) from exception - - try: - config = ws_context.ws_projects_raw_configs[project_path] - except KeyError as exception: - raise Exception("First you need to parse config of project") from exception - - actions = _collect_actions_in_config(config) - project.actions = actions - - action_handler_configs = _collect_action_handler_configs_in_config(config) - - # Apply overrides - # - # Merge handler config overrides from ws_context if available - if ws_context.handler_config_overrides: - for action in project.actions: - action_overrides = ws_context.handler_config_overrides.get(action.name, {}) - if not action_overrides: - continue - - for handler in action.handlers: - # Check for action-level overrides (empty string key) - action_level_overrides = action_overrides.get("", {}) - # Check for handler-specific overrides - handler_overrides = action_overrides.get(handler.name, {}) - - # Merge overrides if any exist - if action_level_overrides or handler_overrides: - if handler.source not in action_handler_configs: - action_handler_configs[handler.source] = {} - # Action-level first, then handler-specific (handler takes precedence) - action_handler_configs[handler.source] = { - **action_handler_configs[handler.source], - **action_level_overrides, - **handler_overrides, - } - - project.action_handler_configs = action_handler_configs - - return actions +) -> domain.CollectedProject: + """Collect actions, services, and handler configs from the project's raw config. + Constructs a :class:`~finecode.wm_server.domain.CollectedProject` and + replaces the existing entry in ``ws_context.ws_projects``. The raw config + must already be present (call ``read_project_config`` first). -def collect_services( - project_path: Path, - ws_context: context.WorkspaceContext, -) -> list[domain.ServiceDeclaration]: + Note: presets are **not** resolved here. + """ try: project = ws_context.ws_projects[project_path] except KeyError as exception: @@ -79,9 +32,23 @@ def collect_services( except KeyError as exception: raise Exception("First you need to parse config of project") from exception + actions = _collect_actions_in_config(config) + action_handler_configs = _collect_action_handler_configs_in_config(config) services = _collect_services_in_config(config) - project.services = services - return services + env_configs = read_env_configs(project_config=config) + + collected = domain.CollectedProject( + name=project.name, + dir_path=project.dir_path, + def_path=project.def_path, + status=project.status, + env_configs=env_configs, + actions=actions, + services=services, + action_handler_configs=action_handler_configs, + ) + ws_context.ws_projects[project_path] = collected + return collected def _collect_services_in_config( diff --git a/src/finecode/config/config_models.py b/src/finecode/wm_server/config/config_models.py similarity index 95% rename from src/finecode/config/config_models.py rename to src/finecode/wm_server/config/config_models.py index bdaaa5db..6b59bfd0 100644 --- a/src/finecode/config/config_models.py +++ b/src/finecode/wm_server/config/config_models.py @@ -1,3 +1,4 @@ +# docs: docs/concepts.md, docs/configuration.md from typing import Any from pydantic import BaseModel, ValidationError diff --git a/src/finecode/config/read_configs.py b/src/finecode/wm_server/config/read_configs.py similarity index 96% rename from src/finecode/config/read_configs.py rename to src/finecode/wm_server/config/read_configs.py index 9b3ebb04..516cd9e9 100644 --- a/src/finecode/config/read_configs.py +++ b/src/finecode/wm_server/config/read_configs.py @@ -1,3 +1,4 @@ +# docs: docs/configuration.md from importlib import metadata from pathlib import Path from typing import Any, NamedTuple @@ -5,9 +6,10 @@ from loguru import logger from tomlkit import loads as toml_loads -from finecode import context, domain, user_messages -from finecode.config import config_models -from finecode.runner import runner_client +from finecode import user_messages +from finecode.wm_server import context, domain +from finecode.wm_server.config import config_models +from finecode.wm_server.runner import runner_client async def read_projects_in_dir( @@ -37,7 +39,6 @@ async def read_projects_in_dir( continue status = domain.ProjectStatus.CONFIG_VALID - actions: list[domain.Action] | None = None with open(def_file, "rb") as pyproject_file: project_def = toml_loads(pyproject_file.read()).unwrap() @@ -49,20 +50,21 @@ async def read_projects_in_dir( ) if not finecode_in_dev_workspace: status = domain.ProjectStatus.NO_FINECODE - actions = [] - - new_project = domain.Project( - name=def_file.parent.name, - dir_path=def_file.parent, - def_path=def_file, - status=status, - actions=actions, - env_configs={}, - ) + is_new_project = def_file.parent not in ws_context.ws_projects - ws_context.ws_projects[def_file.parent] = new_project if is_new_project: + new_project = domain.Project( + name=project_def.get("project", {}).get("name", def_file.parent.name), + dir_path=def_file.parent, + def_path=def_file, + status=status, + ) + ws_context.ws_projects[def_file.parent] = new_project new_projects.append(new_project) + else: + # Preserve existing collected/resolved state — only update status in case + # the finecode dependency was added or removed since the last scan. + ws_context.ws_projects[def_file.parent].status = status return new_projects @@ -115,7 +117,7 @@ async def read_project_config( project_def = toml_loads(pyproject_file.read()).unwrap() # TODO: validate that finecode is installed? - base_config_path = Path(__file__).parent.parent / "base_config.toml" + base_config_path = Path(__file__).parent.parent.parent / "base_config.toml" # TODO: cache instead of reading each time with open(base_config_path, "r") as base_config_file: base_config = toml_loads(base_config_file.read()).unwrap() @@ -174,9 +176,6 @@ async def read_project_config( merge_services_dependencies_into_groups(project_config) ws_context.ws_projects_raw_configs[project.dir_path] = project_config - - env_configs = read_env_configs(project_config=project_config) - project.env_configs = env_configs else: logger.info( f"Project definition of type {project.def_path.name} is not supported yet" diff --git a/src/finecode/context.py b/src/finecode/wm_server/context.py similarity index 87% rename from src/finecode/context.py rename to src/finecode/wm_server/context.py index d28d7070..e78b8317 100644 --- a/src/finecode/context.py +++ b/src/finecode/wm_server/context.py @@ -4,10 +4,10 @@ from pathlib import Path from typing import TYPE_CHECKING, Any -from finecode import domain +from finecode.wm_server import domain +from finecode.wm_server.runner.runner_client import ExtensionRunnerInfo if TYPE_CHECKING: - from finecode.runner.runner_client import ExtensionRunnerInfo from finecode_jsonrpc._io_thread import AsyncIOThread @@ -46,6 +46,8 @@ class WorkspaceContext: default_factory=dict ) cached_actions_by_id: dict[str, CachedAction] = field(default_factory=dict) + # payload schema cache: project_path → {action_name: JSON Schema fragment | None} + ws_action_schemas: dict[Path, dict[str, dict | None]] = field(default_factory=dict) @dataclass diff --git a/src/finecode/domain.py b/src/finecode/wm_server/domain.py similarity index 60% rename from src/finecode/domain.py rename to src/finecode/wm_server/domain.py index dc0c1985..351dccb0 100644 --- a/src/finecode/domain.py +++ b/src/finecode/wm_server/domain.py @@ -1,5 +1,6 @@ from __future__ import annotations +import dataclasses import typing from enum import Enum, auto from pathlib import Path @@ -107,29 +108,27 @@ def to_dict(self) -> dict[str, typing.Any]: class Project: + """A project discovered in the workspace. + + This is the initial state: we know the project exists and have read its + basic identity (name, path, status), but actions and services have not + been collected yet. + + Transitions: + Project → CollectedProject via collect_actions.collect_project() + """ + def __init__( self, name: str, dir_path: Path, def_path: Path, status: ProjectStatus, - env_configs: dict[str, EnvConfig], - actions: list[Action] | None = None, ) -> None: self.name = name self.dir_path = dir_path self.def_path = def_path self.status = status - # None means actions were not collected yet - # if project.status is RUNNING, then actions are not None - self.actions = actions - self.services: list[ServiceDeclaration] = [] - # config by handler source - self.action_handler_configs: dict[str, dict[str, typing.Any]] = {} - # config by env name - # it always contains configs for all environments, even if user hasn't provided - # one explicitly(=there is a default config) - self.env_configs: dict[str, EnvConfig] = env_configs def __str__(self) -> str: return ( @@ -139,20 +138,78 @@ def __str__(self) -> str: def __repr__(self) -> str: return str(self) + +class CollectedProject(Project): + """A project whose actions and services have been collected from local config. + + Presets are **not** yet resolved. This state is used during the bootstrap + phase: the dev-workspace Extension Runner is started with the locally + collected actions so that it can resolve presets. Once presets are + resolved, the project is upgraded to :class:`ResolvedProject`. + + Transitions: + Project → CollectedProject via collect_actions.collect_project() + CollectedProject → ResolvedProject via ResolvedProject.from_collected() + (after re-reading config with presets) + """ + + def __init__( + self, + name: str, + dir_path: Path, + def_path: Path, + status: ProjectStatus, + env_configs: dict[str, EnvConfig], + actions: list[Action], + services: list[ServiceDeclaration], + action_handler_configs: dict[str, dict[str, typing.Any]], + ) -> None: + super().__init__(name, dir_path, def_path, status) + # config by env name — always contains configs for all environments, even if + # the user hasn't provided one explicitly (there is always a default config) + self.env_configs: dict[str, EnvConfig] = env_configs + self.actions: list[Action] = actions + self.services: list[ServiceDeclaration] = services + # config by handler source + self.action_handler_configs: dict[str, dict[str, typing.Any]] = ( + action_handler_configs + ) + @property def envs(self) -> list[str]: - if self.actions is None: - raise ValueError("Actions are not collected yet") - all_envs_set = ordered_set.OrderedSet([]) for action in self.actions: action_envs = [handler.env for handler in action.handlers] all_envs_set |= ordered_set.OrderedSet(action_envs) all_envs_set |= ordered_set.OrderedSet([svc.env for svc in self.services]) - return list(all_envs_set) +class ResolvedProject(CollectedProject): + """A project with fully resolved configuration, including all presets. + + This is the normal operating state of a project. Actions, services, and + handler configs include contributions from all presets. + + Use :meth:`from_collected` to upgrade a :class:`CollectedProject` after + preset resolution. + """ + + @classmethod + def from_collected(cls, collected: CollectedProject) -> "ResolvedProject": + """Upgrade a CollectedProject to ResolvedProject after preset resolution.""" + return cls( + name=collected.name, + dir_path=collected.dir_path, + def_path=collected.def_path, + status=collected.status, + env_configs=collected.env_configs, + actions=collected.actions, + services=collected.services, + action_handler_configs=collected.action_handler_configs, + ) + + class ProjectStatus(Enum): CONFIG_INVALID = auto() # config valid, but no finecode in project @@ -194,10 +251,34 @@ def __repr__(self) -> str: # self.source = source +class ExtensionRunnerStatus(Enum): + NO_VENV = auto() + INITIALIZING = auto() + FAILED = auto() + RUNNING = auto() + EXITED = auto() + + +@dataclasses.dataclass +class ExtensionRunner: + working_dir_path: Path + env_name: str + status: ExtensionRunnerStatus + + @property + def readable_id(self) -> str: + return f"{self.working_dir_path} ({self.env_name})" + + @property + def logs_path(self) -> Path: + return self.working_dir_path / ".venvs" / self.env_name / "logs" / "runner.log" + + class TextDocumentInfo: - def __init__(self, uri: str, version: str | int) -> None: + def __init__(self, uri: str, version: str | int, text: str = "") -> None: self.uri = uri self.version = version + self.text = text def __str__(self) -> str: return f'TextDocumentInfo(uri="{self.uri}", version="{self.version}")' @@ -219,7 +300,11 @@ class PartialResult(typing.NamedTuple): "Action", "ServiceDeclaration", "Project", + "CollectedProject", + "ResolvedProject", "TextDocumentInfo", "RunnerConfig", "EnvConfig", + "ExtensionRunnerStatus", + "ExtensionRunner", ] diff --git a/src/finecode/domain_helpers.py b/src/finecode/wm_server/domain_helpers.py similarity index 86% rename from src/finecode/domain_helpers.py rename to src/finecode/wm_server/domain_helpers.py index 38807a6d..80ea6947 100644 --- a/src/finecode/domain_helpers.py +++ b/src/finecode/wm_server/domain_helpers.py @@ -3,15 +3,14 @@ directly in the domain module. """ -from finecode import domain +from finecode.wm_server import domain def collect_all_handlers_to_initialize( - project: domain.Project, + project: domain.CollectedProject, env_name: str, ) -> dict[str, list[str]]: """Collect all handler names per action for the given env.""" - assert project.actions is not None result: dict[str, list[str]] = {} for action in project.actions: handler_names = [h.name for h in action.handlers if h.env == env_name] @@ -21,12 +20,11 @@ def collect_all_handlers_to_initialize( def collect_handlers_to_initialize_for_actions( - project: domain.Project, + project: domain.CollectedProject, env_name: str, action_names: list[str], ) -> dict[str, list[str]]: """Collect handler names per action for the given env, filtered by action names.""" - assert project.actions is not None result: dict[str, list[str]] = {} action_names_set = set(action_names) for action in project.actions: diff --git a/src/finecode/find_project.py b/src/finecode/wm_server/find_project.py similarity index 73% rename from src/finecode/find_project.py rename to src/finecode/wm_server/find_project.py index 508c720d..4473f30c 100644 --- a/src/finecode/find_project.py +++ b/src/finecode/wm_server/find_project.py @@ -2,9 +2,9 @@ from loguru import logger -from finecode import domain -from finecode.context import WorkspaceContext -from finecode.runner import runner_manager +from finecode.wm_server import domain +from finecode.wm_server.context import WorkspaceContext +from finecode.wm_server.runner import runner_manager class FileNotInWorkspaceException(BaseException): ... @@ -66,32 +66,30 @@ async def find_project_with_action_for_file( for project_dir_path in file_projects_pathes: project = ws_context.ws_projects[project_dir_path] - project_actions = project.actions - if project_actions is None: + if not isinstance(project, domain.CollectedProject): if project.status == domain.ProjectStatus.NO_FINECODE: continue - else: - if project.status == domain.ProjectStatus.CONFIG_VALID: - try: - await runner_manager.get_or_start_runners_with_presets( - project_dir_path=project_dir_path, ws_context=ws_context - ) - except runner_manager.RunnerFailedToStart as exception: - raise ValueError( - f"Action is related to project {project_dir_path} but runner " - f"with presets failed to start in it: {exception.message}" - ) - - assert project.actions is not None - project_actions = project.actions - else: + elif project.status == domain.ProjectStatus.CONFIG_VALID: + try: + await runner_manager.get_or_start_runners_with_presets( + project_dir_path=project_dir_path, ws_context=ws_context + ) + except runner_manager.RunnerFailedToStart as exception: raise ValueError( - f"Action is related to project {project_dir_path} but its action " - f"cannot be resolved({project.status})" + f"Action is related to project {project_dir_path} but runner " + f"with presets failed to start in it: {exception.message}" ) + # Re-fetch after preset resolution — now a CollectedProject + project = ws_context.ws_projects[project_dir_path] + assert isinstance(project, domain.CollectedProject) + else: + raise ValueError( + f"Action is related to project {project_dir_path} but its action " + f"cannot be resolved({project.status})" + ) try: - next(action for action in project_actions if action.name == action_name) + next(action for action in project.actions if action.name == action_name) except StopIteration: continue diff --git a/tests/workspace_manager/server/client/finecode/__init__.py b/src/finecode/wm_server/runner/__init__.py similarity index 100% rename from tests/workspace_manager/server/client/finecode/__init__.py rename to src/finecode/wm_server/runner/__init__.py diff --git a/src/finecode/runner/_internal_client_api.py b/src/finecode/wm_server/runner/_internal_client_api.py similarity index 97% rename from src/finecode/runner/_internal_client_api.py rename to src/finecode/wm_server/runner/_internal_client_api.py index bb8abe12..9d386a54 100644 --- a/src/finecode/runner/_internal_client_api.py +++ b/src/finecode/wm_server/runner/_internal_client_api.py @@ -6,7 +6,7 @@ from loguru import logger -from finecode.runner import _internal_client_types +from finecode.wm_server.runner import _internal_client_types from finecode_jsonrpc import client as jsonrpc_client diff --git a/src/finecode/runner/_internal_client_types.py b/src/finecode/wm_server/runner/_internal_client_types.py similarity index 100% rename from src/finecode/runner/_internal_client_types.py rename to src/finecode/wm_server/runner/_internal_client_types.py diff --git a/src/finecode/finecode_cmd.py b/src/finecode/wm_server/runner/finecode_cmd.py similarity index 100% rename from src/finecode/finecode_cmd.py rename to src/finecode/wm_server/runner/finecode_cmd.py diff --git a/src/finecode/runner/runner_client.py b/src/finecode/wm_server/runner/runner_client.py similarity index 77% rename from src/finecode/runner/runner_client.py rename to src/finecode/wm_server/runner/runner_client.py index bbf605d4..80cbb976 100644 --- a/src/finecode/runner/runner_client.py +++ b/src/finecode/wm_server/runner/runner_client.py @@ -14,8 +14,9 @@ from loguru import logger -import finecode.domain as domain -from finecode.runner import _internal_client_types, _internal_client_api +import finecode.wm_server.domain as domain +from finecode.wm_server.runner import _internal_client_types, _internal_client_api +from finecode.wm_server.utils.iterable_subscribe import IterableSubscribe import finecode_jsonrpc as jsonrpc_client @@ -36,30 +37,18 @@ class ActionRunStopped(jsonrpc_client.BaseRunnerRequestException): ... @dataclasses.dataclass -class ExtensionRunnerInfo: - working_dir_path: pathlib.Path - env_name: str - status: RunnerStatus +class ExtensionRunnerInfo(domain.ExtensionRunner): # NOTE: initialized doesn't mean the runner is running, check its status - initialized_event: asyncio.Event + initialized_event: asyncio.Event = dataclasses.field(default_factory=asyncio.Event) # e.g. if there is no venv for env, client can be None client: jsonrpc_client.JsonRpcClient | None = None - - @property - def readable_id(self) -> str: - return f"{self.working_dir_path} ({self.env_name})" - - @property - def logs_path(self) -> pathlib.Path: - return self.working_dir_path / ".venvs" / self.env_name / "logs" / "runner.log" + partial_results: IterableSubscribe = dataclasses.field( + default_factory=IterableSubscribe + ) -class RunnerStatus(enum.Enum): - NO_VENV = enum.auto() - INITIALIZING = enum.auto() - FAILED = enum.auto() - RUNNING = enum.auto() - EXITED = enum.auto() +# Alias for backward compatibility — status enum now lives in domain +RunnerStatus = domain.ExtensionRunnerStatus # JSON object or text @@ -100,7 +89,7 @@ class DevEnv(enum.StrEnum): CLI = 'cli' AI = 'ai' PRECOMMIT = 'precommit' - CI_CD = 'cicd' + CI = 'ci' async def run_action( @@ -140,8 +129,8 @@ async def run_action( if "error" in command_result: raise ActionRunFailed(command_result["error"]) - return_code = command_result["return_code"] - stringified_result = command_result["result_by_format"] + return_code = command_result["returnCode"] + stringified_result = command_result["resultByFormat"] # currently result is always dumped to json even if response format is expected to # be a string. See docs of ER lsp server for more details. try: @@ -155,6 +144,33 @@ async def run_action( return RunActionResponse(result_by_format=result_by_format, return_code=return_code) +async def merge_results( + runner: ExtensionRunnerInfo, + action_name: str, + results: list[dict], +) -> dict: + if not runner.initialized_event.is_set(): + await runner.initialized_event.wait() + + if runner.status != RunnerStatus.RUNNING: + raise ActionRunFailed( + f"Runner {runner.readable_id} is not running: {runner.status}" + ) + + response = await runner.client.send_request( + method=_internal_client_types.WORKSPACE_EXECUTE_COMMAND, + params=_internal_client_types.ExecuteCommandParams( + command="actions/mergeResults", + arguments=[action_name, results], + ), + timeout=None, + ) + command_result = response.result + if "error" in command_result: + raise ActionRunFailed(command_result["error"]) + return command_result["merged"] + + async def reload_action(runner: ExtensionRunnerInfo, action_name: str) -> None: if not runner.initialized_event.is_set(): await runner.initialized_event.wait() @@ -170,6 +186,27 @@ async def reload_action(runner: ExtensionRunnerInfo, action_name: str) -> None: ) +async def get_payload_schemas(runner: ExtensionRunnerInfo) -> dict[str, dict | None]: + """Fetch payload schemas for all actions known to the runner.""" + if not runner.initialized_event.is_set(): + await runner.initialized_event.wait() + + if runner.status != RunnerStatus.RUNNING: + raise ActionRunFailed( + f"Runner {runner.readable_id} is not running: {runner.status}" + ) + + response = await runner.client.send_request( + method=_internal_client_types.WORKSPACE_EXECUTE_COMMAND, + params=_internal_client_types.ExecuteCommandParams( + command="actions/getPayloadSchemas", + arguments=[], + ), + timeout=None, + ) + return response.result + + async def resolve_package_path( runner: ExtensionRunnerInfo, package_name: str ) -> dict[str, str]: @@ -237,7 +274,7 @@ async def notify_document_did_open( uri=document_info.uri, language_id="", version=int(document_info.version), - text="", + text=document_info.text, ) ), ) @@ -269,7 +306,9 @@ async def notify_document_did_change(runner: ExtensionRunnerInfo, change_params: "RunActionResponse", "RunResultFormat", "run_action", + "merge_results", "reload_action", + "get_payload_schemas", "resolve_package_path", "RunnerConfig", "update_config", diff --git a/src/finecode/runner/runner_manager.py b/src/finecode/wm_server/runner/runner_manager.py similarity index 92% rename from src/finecode/runner/runner_manager.py rename to src/finecode/wm_server/runner/runner_manager.py index 48cdbadf..15c05028 100644 --- a/src/finecode/runner/runner_manager.py +++ b/src/finecode/wm_server/runner/runner_manager.py @@ -13,17 +13,16 @@ from loguru import logger -from finecode import context, domain, domain_helpers, finecode_cmd -from finecode.config import collect_actions, config_models, read_configs -from finecode.runner import ( +from finecode.wm_server import context, domain, domain_helpers +from finecode.wm_server.config import collect_actions, config_models, read_configs +from finecode.wm_server.runner import ( runner_client, _internal_client_api, _internal_client_types, + finecode_cmd ) import finecode_jsonrpc as jsonrpc_client from finecode_jsonrpc import _io_thread -from finecode.utils import iterable_subscribe - project_changed_callback: ( typing.Callable[[domain.Project], collections.abc.Coroutine[None, None, None]] | None @@ -31,9 +30,6 @@ # get_document: typing.Callable[[], collections.abc.Coroutine] | None = None apply_workspace_edit: typing.Callable[[], collections.abc.Coroutine] | None = None start_debug_session: typing.Callable[[int], collections.abc.Coroutine] | None = None -partial_results: iterable_subscribe.IterableSubscribe = ( - iterable_subscribe.IterableSubscribe() -) # reexport RunnerFailedToStart = jsonrpc_client.RunnerFailedToStart @@ -110,9 +106,13 @@ async def _start_extension_runner_process( f"--project-path={runner.working_dir_path.as_posix()}", f"--env-name={runner.env_name}", ] - env_config = ws_context.ws_projects[runner.working_dir_path].env_configs[ - runner.env_name - ] + _project = ws_context.ws_projects[runner.working_dir_path] + _default_env_config = domain.EnvConfig(runner_config=domain.RunnerConfig(debug=False)) + env_config = ( + _project.env_configs.get(runner.env_name, _default_env_config) + if isinstance(_project, domain.CollectedProject) + else _default_env_config + ) runner_config = env_config.runner_config start_with_debug = debug or runner_config.debug @@ -188,7 +188,7 @@ async def on_exit(): ) async def on_progress(params: _internal_client_types.ProgressParams) -> None: - logger.debug(f"Got progress from runner for token: {params.token}") + logger.debug(f"Got progress from runner {runner.readable_id} for token: {params.token}") try: result_value = json.loads(params.value) except json.JSONDecodeError as exception: @@ -198,7 +198,7 @@ async def on_progress(params: _internal_client_types.ProgressParams) -> None: partial_result = domain.PartialResult( token=params.token, value=result_value ) - partial_results.publish(partial_result) + runner.partial_results.publish(partial_result) runner.client.feature(_internal_client_types.PROGRESS, on_progress) @@ -327,10 +327,7 @@ async def start_runners_with_presets( await read_configs.read_project_config( project=project, ws_context=ws_context ) - collect_actions.collect_actions( - project_path=project.dir_path, ws_context=ws_context - ) - collect_actions.collect_services( + collected = collect_actions.collect_project( project_path=project.dir_path, ws_context=ws_context ) except config_models.ConfigurationError as exception: @@ -338,19 +335,24 @@ async def start_runners_with_presets( f"Reading project config with presets and collecting actions in {project.dir_path} failed: {exception.message}" ) from exception + # Upgrade to ResolvedProject — presets are now resolved in the raw config + resolved = domain.ResolvedProject.from_collected(collected) + ws_context.ws_projects[project.dir_path] = resolved + # update config of dev_workspace runner, the new config contains resolved presets dev_workspace_runner = ws_context.ws_projects_extension_runners[ project.dir_path ]["dev_workspace"] handlers_to_init = ( - domain_helpers.collect_all_handlers_to_initialize(project, "dev_workspace") + domain_helpers.collect_all_handlers_to_initialize(resolved, "dev_workspace") if initialize_all_handlers else None ) await update_runner_config( runner=dev_workspace_runner, - project=project, + project=resolved, handlers_to_initialize=handlers_to_init, + ws_context=ws_context, ) @@ -402,16 +404,13 @@ async def start_runner( if ( project_def.dir_path not in ws_context.ws_projects_raw_configs - or project_def.actions is None + or not isinstance(project_def, domain.CollectedProject) ): try: await read_configs.read_project_config( project=project_def, ws_context=ws_context ) - collect_actions.collect_actions( - project_path=project_def.dir_path, ws_context=ws_context - ) - collect_actions.collect_services( + collect_actions.collect_project( project_path=project_def.dir_path, ws_context=ws_context ) except config_models.ConfigurationError as exception: @@ -422,7 +421,13 @@ async def start_runner( f"Found problem in configuration of {project_def.dir_path}: {exception.message}" ) from exception - await update_runner_config(runner=runner, project=project_def, handlers_to_initialize=handlers_to_initialize) + # Re-fetch from context — may now be CollectedProject if collection just happened + current_project_def = ws_context.ws_projects[project_def.dir_path] + if isinstance(current_project_def, domain.CollectedProject): + # update runner config if project actions are already known, otherwise it will + # be done as separate step + await update_runner_config(runner=runner, project=current_project_def, handlers_to_initialize=handlers_to_initialize, ws_context=ws_context) + await _finish_runner_init(runner=runner, project=project_def, ws_context=ws_context) runner.status = runner_client.RunnerStatus.RUNNING @@ -478,7 +483,7 @@ async def get_or_start_runner( async def _start_dev_workspace_runner( - project_def: domain.Project, ws_context: context.WorkspaceContext + project_def: domain.CollectedProject, ws_context: context.WorkspaceContext ) -> runner_client.ExtensionRunnerInfo: return await get_or_start_runner( project_def=project_def, env_name="dev_workspace", ws_context=ws_context @@ -515,10 +520,10 @@ async def _init_lsp_client( async def update_runner_config( runner: runner_client.ExtensionRunnerInfo, - project: domain.Project, + project: domain.CollectedProject, handlers_to_initialize: dict[str, list[str]] | None, + ws_context: context.WorkspaceContext, ) -> None: - assert project.actions is not None config = runner_client.RunnerConfig( actions=project.actions, action_handler_configs=project.action_handler_configs, @@ -535,6 +540,7 @@ async def update_runner_config( f"Runner failed to update config: {exception.message}" ) from exception + ws_context.ws_action_schemas.pop(project.dir_path, None) logger.debug(f"Updated config of runner {runner.readable_id}") diff --git a/tests/api/test_optimize_package_tree.py b/src/finecode/wm_server/services/__init__.py similarity index 100% rename from tests/api/test_optimize_package_tree.py rename to src/finecode/wm_server/services/__init__.py diff --git a/src/finecode/wm_server/services/action_tree.py b/src/finecode/wm_server/services/action_tree.py new file mode 100644 index 00000000..1fc463ae --- /dev/null +++ b/src/finecode/wm_server/services/action_tree.py @@ -0,0 +1,182 @@ +"""Action tree + +This module contains the logic that constructs the hierarchical action tree used by the +IDE. It also provides the request handler that the WM server exposes +as ``actions/getTree``. +""" + +from __future__ import annotations + +import asyncio +import pathlib +from loguru import logger + +from finecode.wm_server import context, domain + + +def _project_action_tree(project: domain.Project | None, ws_context: context.WorkspaceContext) -> list[dict]: + """Return action/env nodes for a single project. + + ``project`` may be None when constructing a node for a directory without a + project at its root. + + Side effect: populate ``ws_context.cached_actions_by_id`` so that later + ``actions/run`` requests can resolve action node identifiers. + """ + actions_nodes: list[dict] = [] + if project is None: + return actions_nodes + + if isinstance(project, domain.CollectedProject): + action_nodes: list[dict] = [] + for action in project.actions: + node_id = f"{project.dir_path.as_posix()}::{action.name}" + handlers_nodes: list[dict] = [] + for handler in action.handlers: + handler_node_id = f"{project.dir_path.as_posix()}::{action.name}::{handler.name}" + handlers_nodes.append( + { + "node_id": handler_node_id, + "name": handler.name, + "node_type": 2, # ACTION + "subnodes": [], + "status": "", + } + ) + action_nodes.append( + { + "node_id": node_id, + "name": action.name, + "node_type": 2, # ACTION + "subnodes": handlers_nodes, + "status": "", + } + ) + ws_context.cached_actions_by_id[node_id] = context.CachedAction( + action_id=node_id, + project_path=project.dir_path, + action_name=action.name, + ) + + node_id = f"{project.dir_path.as_posix()}::actions" + actions_nodes.append( + { + "node_id": node_id, + "name": "Actions", + "node_type": 3, # ACTION_GROUP + "subnodes": action_nodes, + "status": "", + } + ) + + envs_nodes: list[dict] = [] + for env in project.envs: + env_node_id = f"{project.dir_path.as_posix()}::envs::{env}" + envs_nodes.append( + { + "node_id": env_node_id, + "name": env, + "node_type": 6, # ENV + "subnodes": [], + "status": "", + } + ) + node_id = f"{project.dir_path.as_posix()}::envs" + actions_nodes.append( + { + "node_id": node_id, + "name": "Environments", + "node_type": 5, # ENV_GROUP + "subnodes": envs_nodes, + "status": "", + } + ) + else: + logger.info( + f"Project has no valid config and finecode: {project.dir_path}, no actions will be shown" + ) + + return actions_nodes + + +def _build_tree(ws_context: context.WorkspaceContext) -> list[dict]: + """Construct full workspace action tree as list of node dictionaries.""" + nodes: list[dict] = [] + projects_by_ws_dir: dict[pathlib.Path, list[pathlib.Path]] = {} + + all_ws_dirs = list(ws_context.ws_dirs_paths) + all_ws_dirs.sort() + + all_projects_paths = list(ws_context.ws_projects.keys()) + all_projects_paths.sort() + all_projects_paths_set = set(all_projects_paths) + + for ws_dir in all_ws_dirs: + ws_dir_projects = [p for p in all_projects_paths_set if p.is_relative_to(ws_dir)] + projects_by_ws_dir[ws_dir] = ws_dir_projects + all_projects_paths_set -= set(ws_dir_projects) + + if all_projects_paths_set: + logger.warning( + f"Unexpected setup: these projects {all_projects_paths_set} don't belong to any of workspace dirs: {all_ws_dirs}" + ) + + for ws_dir in ws_context.ws_dirs_paths: + ws_dir_projects = projects_by_ws_dir.get(ws_dir, []) + ws_dir_nodes_by_path: dict[pathlib.Path, dict] = {} + + if ws_dir in ws_dir_projects: + dir_node_type = 1 # PROJECT + project = ws_context.ws_projects.get(ws_dir) + status = project.status.name if project is not None else "" + else: + dir_node_type = 0 # DIRECTORY + status = "" + + actions_nodes = _project_action_tree(ws_context.ws_projects.get(ws_dir), ws_context) + node = { + "node_id": ws_dir.as_posix(), + "name": ws_dir.name, + "subnodes": actions_nodes, + "node_type": dir_node_type, + "status": status, + } + nodes.append(node) + ws_dir_nodes_by_path[ws_dir] = node + + for project_path in ws_dir_projects: + project = ws_context.ws_projects.get(project_path) + status = project.status.name if project is not None else "" + actions_nodes = _project_action_tree(project, ws_context) + node = { + "node_id": project_path.as_posix(), + "name": project_path.name, + "subnodes": actions_nodes, + "node_type": 1, # PROJECT + "status": status, + } + + for ws_dir_node_path in reversed(list(ws_dir_nodes_by_path.keys())): + if project_path.is_relative_to(ws_dir_node_path): + ws_dir_nodes_by_path[ws_dir_node_path]["subnodes"].append(node) + break + + ws_dir_nodes_by_path[project_path] = node + + return nodes + + +async def _handle_get_tree( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Request handler that returns the action tree for the workspace.""" + + # wait for dev_workspace runners to start + async with asyncio.TaskGroup() as tg: + for envs in ws_context.ws_projects_extension_runners.values(): + dev_workspace_runner = envs.get("dev_workspace") + if dev_workspace_runner is not None: + tg.create_task(dev_workspace_runner.initialized_event.wait()) + + nodes = _build_tree(ws_context) + return {"nodes": nodes} diff --git a/src/finecode/wm_server/services/document_sync.py b/src/finecode/wm_server/services/document_sync.py new file mode 100644 index 00000000..f2f2d6aa --- /dev/null +++ b/src/finecode/wm_server/services/document_sync.py @@ -0,0 +1,188 @@ +"""Document synchronization handlers for the WM server. + +Handles document lifecycle notifications (opened, closed, changed) and forwards +them to affected extension runners. +""" + +from __future__ import annotations + +import asyncio +import pathlib +from loguru import logger + +from finecode.wm_server import context, domain +from finecode.wm_server.services import text_utils + + +async def handle_documents_opened( + params: dict | None, ws_context: context.WorkspaceContext +) -> None: + """Handle document opened notification. Forward to affected runners.""" + if params is None: + return + + from finecode.wm_server.runner import runner_client + + uri = params.get("uri") + version = params.get("version") + text = params.get("text", "") + if not uri: + return + + file_path = pathlib.Path(uri.replace("file://", "")) + projects_paths = [ + project_path + for project_path, project in ws_context.ws_projects.items() + if project.status == domain.ProjectStatus.CONFIG_VALID + and file_path.is_relative_to(project_path) + ] + + document_info = domain.TextDocumentInfo(uri=uri, version=str(version or ""), text=text) + ws_context.opened_documents[uri] = document_info + try: + async with asyncio.TaskGroup() as tg: + for project_path in projects_paths: + runners_by_env = ws_context.ws_projects_extension_runners.get( + project_path, {} + ) + for runner in runners_by_env.values(): + if runner.status == runner_client.RunnerStatus.RUNNING: + tg.create_task( + runner_client.notify_document_did_open( + runner=runner, document_info=document_info + ) + ) + except ExceptionGroup as eg: + for exception in eg.exceptions: + logger.exception(exception) + logger.error(f"Error while sending opened document: {eg}") + + +async def handle_documents_closed( + params: dict | None, ws_context: context.WorkspaceContext +) -> None: + """Handle document closed notification. Forward to affected runners.""" + if params is None: + return + + from finecode.wm_server.runner import runner_client + + uri = params.get("uri") + if not uri: + return + + ws_context.opened_documents.pop(uri, None) + + file_path = pathlib.Path(uri.replace("file://", "")) + projects_paths = [ + project_path + for project_path, project in ws_context.ws_projects.items() + if project.status == domain.ProjectStatus.CONFIG_VALID + and file_path.is_relative_to(project_path) + ] + + try: + async with asyncio.TaskGroup() as tg: + for project_path in projects_paths: + runners_by_env = ws_context.ws_projects_extension_runners.get( + project_path, {} + ) + for runner in runners_by_env.values(): + if runner.status != runner_client.RunnerStatus.RUNNING: + logger.trace( + f"Runner {runner.readable_id} is not running, skip it" + ) + continue + + tg.create_task( + runner_client.notify_document_did_close( + runner=runner, document_uri=uri + ) + ) + except ExceptionGroup as e: + logger.error(f"Error while sending closed document: {e}") + + +async def handle_documents_changed( + params: dict | None, ws_context: context.WorkspaceContext +) -> None: + """Handle document changed notification. Forward to affected runners.""" + if params is None: + return + + from finecode.wm_server.runner import runner_client + + uri = params.get("uri") + version = params.get("version") + content_changes = params.get("contentChanges", []) + if not uri: + return + + file_path = pathlib.Path(uri.replace("file://", "")) + projects_paths = [ + project_path + for project_path, project in ws_context.ws_projects.items() + if project.status == domain.ProjectStatus.CONFIG_VALID + and file_path.is_relative_to(project_path) + ] + + # Convert camelCase content changes back to snake_case for runner_client + mapped_changes = [] + for change in content_changes: + if "range" in change: + # TextDocumentContentChangePartial + mapped_change = runner_client.TextDocumentContentChangePartial( + range=runner_client.Range( + start=runner_client.Position( + line=change["range"]["start"]["line"], + character=change["range"]["start"]["character"], + ), + end=runner_client.Position( + line=change["range"]["end"]["line"], + character=change["range"]["end"]["character"], + ), + ), + text=change.get("text", ""), + range_length=change.get("rangeLength"), + ) + mapped_changes.append(mapped_change) + else: + # TextDocumentContentChangeWholeDocument + mapped_change = runner_client.TextDocumentContentChangeWholeDocument( + text=change.get("text", "") + ) + mapped_changes.append(mapped_change) + + change_params = runner_client.DidChangeTextDocumentParams( + text_document=runner_client.VersionedTextDocumentIdentifier( + version=version, uri=uri + ), + content_changes=mapped_changes, + ) + + # Keep the content cache current so runner restarts get the latest state. + cached = ws_context.opened_documents.get(uri) + if cached is not None: + cached.text = text_utils.apply_text_changes(cached.text, mapped_changes) + cached.version = str(version) + + try: + async with asyncio.TaskGroup() as tg: + for project_path in projects_paths: + runners_by_env = ws_context.ws_projects_extension_runners.get( + project_path, {} + ) + for runner in runners_by_env.values(): + if runner.status != runner_client.RunnerStatus.RUNNING: + logger.trace( + f"Runner {runner.readable_id} is not running, skip it" + ) + continue + + tg.create_task( + runner_client.notify_document_did_change( + runner=runner, change_params=change_params + ) + ) + except ExceptionGroup as e: + logger.error(f"Error while sending changed document: {e}") diff --git a/src/finecode/wm_server/services/partial_results_service.py b/src/finecode/wm_server/services/partial_results_service.py new file mode 100644 index 00000000..5cce195c --- /dev/null +++ b/src/finecode/wm_server/services/partial_results_service.py @@ -0,0 +1,171 @@ +"""Helper for running actions that produce streaming partial results. + +It is intentionally small and +only encapsulates the orchestration logic; it does **not** perform any I/O +with client sockets. The request handler in ``wm_server.py`` will take the +async iterator produced here and write notifications back to the caller. +""" +from __future__ import annotations + +import asyncio +import pathlib + +from loguru import logger + +from finecode.wm_server import context, domain +from finecode.wm_server.runner import runner_client +from finecode.wm_server.services.run_service import ( + find_all_projects_with_action, + run_with_partial_results, + start_required_environments, + RunActionTrigger, + DevEnv, + RunResultFormat, +) + + +class PartialResultsStream: + """Asynchronous stream of partial values with final-result storage. + + Instances support ``async for`` iteration; values appended by the producer + are yielded to the consumer until :meth:`set_final` is called and the + internal queue is drained. + """ + + def __init__(self) -> None: + self._queue: asyncio.Queue[domain.PartialResultRawValue] = asyncio.Queue() + self._final: dict | None = None + self._done = asyncio.Event() + + def put(self, value: domain.PartialResultRawValue) -> None: + self._queue.put_nowait(value) + + def set_final(self, result: dict) -> None: + self._final = result + self._done.set() + + async def __aiter__(self): + # keep yielding until done and queue drained + while True: + if self._done.is_set() and self._queue.empty(): + break + yield await self._queue.get() + + async def final_result(self) -> dict: + await self._done.wait() + return self._final or {} + + +async def run_action_with_partial_results( + action_name: str, + project_path: str, + params: dict, + partial_result_token: str | int, + run_trigger: RunActionTrigger, + dev_env: DevEnv, + ws_context: context.WorkspaceContext, + result_formats: list[str] | None = None, +) -> PartialResultsStream: + """Run an action and return a stream of partial values. + + If ``project_path`` is the empty string the action will be executed in all + projects that declare it; otherwise it is run only in the project at that path. + + The returned :class:`PartialResultsStream` can be iterated to receive + ``domain.PartialResultRawValue`` objects. Once execution completes the + caller should call :meth:`PartialResultsStream.final_result` to obtain the + aggregated result equivalent to what ``actions/run`` would return. + """ + + # determine target project(s) — only CollectedProject instances have actions + projects: list[domain.CollectedProject] + if project_path: + project = ws_context.ws_projects.get(pathlib.Path(project_path)) + if project is None or not isinstance(project, domain.CollectedProject): + raise ValueError(f"Project '{project_path}' not found") + projects = [project] + else: + paths = find_all_projects_with_action(action_name, ws_context) + projects = [ + p for path in paths + if isinstance(p := ws_context.ws_projects[path], domain.CollectedProject) + ] + + # start runners so that run_with_partial_results can attach + await start_required_environments( + {p.dir_path: [action_name] for p in projects}, + ws_context, + initialize_all_handlers=True, + ) + + requested_formats = result_formats or ["json"] + runner_formats = [RunResultFormat(fmt) for fmt in requested_formats if fmt in ("json", "string")] + + stream = PartialResultsStream() + final_results: list[dict] = [] + return_codes: list[int] = [] + runners_used: list[runner_client.ExtensionRunnerInfo] = [] + + async def run_one(project: domain.CollectedProject) -> None: + logger.trace(f"partial_results: run_one start project={project.name} action={action_name} token={partial_result_token}") + async with run_with_partial_results( + action_name=action_name, + params=params, + partial_result_token=partial_result_token, + project_dir_path=project.dir_path, + run_trigger=run_trigger, + dev_env=dev_env, + ws_context=ws_context, + initialize_all_handlers=True, + result_formats=runner_formats, + ) as ctx: + partial_count = 0 + async for value in ctx: + partial_count += 1 + value_preview = str(value)[:200] if value else "None" + logger.trace(f"partial_results: got partial #{partial_count} from runner for project={project.name}: {value_preview}") + result_by_format: dict[str, domain.PartialResultRawValue] = {} + if "json" in requested_formats: + result_by_format["json"] = value + stream.put({"resultByFormat": result_by_format}) + logger.trace(f"partial_results: partial iteration done for project={project.name}, got {partial_count} partials") + + # Responses collected by the context manager from runner tasks + for resp in ctx.responses: + json_result = resp.json() + logger.trace(f"partial_results: final result for project={project.name}: return_code={resp.return_code}, keys={list(json_result.keys()) if isinstance(json_result, dict) else type(json_result)}") + final_results.append(json_result) + return_codes.append(resp.return_code) + + # If the runner sent no partial results (collected everything internally + # and returned it all as the final response), emit the final result as a + # partial result so the client still receives streaming updates. + if partial_count == 0 and json_result: + result_by_format: dict[str, domain.PartialResultRawValue] = {} + if "json" in requested_formats: + result_by_format["json"] = json_result + logger.trace(f"partial_results: no partials received for project={project.name}, emitting final result as partial") + stream.put({"resultByFormat": result_by_format}) + + # Collect a runner from this project to use for cross-project result merging. + action = next((a for a in project.actions if a.name == action_name), None) + if action and action.handlers: + env_name = action.handlers[0].env + runner = ws_context.ws_projects_extension_runners.get(project.dir_path, {}).get(env_name) + if runner is not None: + runners_used.append(runner) + + async with asyncio.TaskGroup() as tg: + for proj in projects: + tg.create_task(run_one(proj)) + + if final_results and runners_used: + aggregated = await runner_client.merge_results(runners_used[0], action_name, final_results) + else: + aggregated = {} + logger.trace(f"partial_results: aggregated result keys={list(aggregated.keys()) if isinstance(aggregated, dict) else type(aggregated)}") + final_result_by_format: dict[str, dict] = {} + if "json" in requested_formats: + final_result_by_format["json"] = aggregated + stream.set_final({"resultByFormat": final_result_by_format, "returnCode": max(return_codes) if return_codes else 0}) + return stream diff --git a/src/finecode/services/run_service/__init__.py b/src/finecode/wm_server/services/run_service/__init__.py similarity index 90% rename from src/finecode/services/run_service/__init__.py rename to src/finecode/wm_server/services/run_service/__init__.py index 8bcb6119..12d79ac5 100644 --- a/src/finecode/services/run_service/__init__.py +++ b/src/finecode/wm_server/services/run_service/__init__.py @@ -1,4 +1,7 @@ -from .exceptions import ActionRunFailed, StartingEnvironmentsFailed +from .exceptions import ( + ActionRunFailed, + StartingEnvironmentsFailed, +) from .proxy_utils import ( run_action, find_action_project_and_run, diff --git a/src/finecode/services/run_service/exceptions.py b/src/finecode/wm_server/services/run_service/exceptions.py similarity index 77% rename from src/finecode/services/run_service/exceptions.py rename to src/finecode/wm_server/services/run_service/exceptions.py index 675c1976..b3ea73ed 100644 --- a/src/finecode/services/run_service/exceptions.py +++ b/src/finecode/wm_server/services/run_service/exceptions.py @@ -1,8 +1,12 @@ class ActionRunFailed(Exception): def __init__(self, message: str) -> None: + super().__init__(message) self.message = message class StartingEnvironmentsFailed(Exception): def __init__(self, message: str) -> None: + super().__init__(message) self.message = message + + diff --git a/src/finecode/services/run_service/proxy_utils.py b/src/finecode/wm_server/services/run_service/proxy_utils.py similarity index 86% rename from src/finecode/services/run_service/proxy_utils.py rename to src/finecode/wm_server/services/run_service/proxy_utils.py index 5dfcc785..6b3f44b9 100644 --- a/src/finecode/services/run_service/proxy_utils.py +++ b/src/finecode/wm_server/services/run_service/proxy_utils.py @@ -9,13 +9,13 @@ import ordered_set from loguru import logger -from finecode import context, domain, domain_helpers, find_project, user_messages -from finecode.runner import runner_manager -from finecode.runner import runner_client -from finecode.runner.runner_manager import RunnerFailedToStart -from finecode.runner.runner_client import RunResultFormat # reexport +from finecode import user_messages +from finecode.wm_server import find_project, context, domain, domain_helpers +from finecode.wm_server.runner import runner_manager +from finecode.wm_server.runner import runner_client +from finecode.wm_server.runner.runner_manager import RunnerFailedToStart +from finecode.wm_server.runner.runner_client import RunResultFormat # reexport -from finecode.services.run_service import payload_preprocessor from .exceptions import ActionRunFailed, StartingEnvironmentsFailed @@ -70,7 +70,6 @@ async def find_action_project_and_run( params=params, project_def=project, ws_context=ws_context, - preprocess_payload=False, run_trigger=run_trigger, dev_env=dev_env, initialize_all_handlers=initialize_all_handlers, @@ -150,32 +149,62 @@ async def run_action_and_notify( partial_results_task: asyncio.Task, run_trigger: runner_client.RunActionTrigger, dev_env: runner_client.DevEnv, + result_formats: list[runner_client.RunResultFormat] | None = None, ) -> runner_client.RunActionResponse: try: - return await run_action_in_runner( + options: dict[str, typing.Any] = { + "partial_result_token": partial_result_token, + "meta": {"trigger": run_trigger.value, "dev_env": dev_env.value}, + } + if result_formats is not None: + options["result_formats"] = result_formats + logger.trace(f"run_action_and_notify: sending to runner {runner.readable_id}, action={action_name}, token={partial_result_token}, options_keys={list(options.keys())}") + response = await run_action_in_runner( action_name=action_name, params=params, runner=runner, - options={ - "partial_result_token": partial_result_token, - "meta": {"trigger": run_trigger.value, "dev_env": dev_env.value}, - }, + options=options, ) + logger.trace(f"run_action_and_notify: got response from runner {runner.readable_id}, return_code={response.return_code}, result_formats={list(response.result_by_format.keys())}") + return response finally: + logger.trace(f"run_action_and_notify: ending result_list, cancelling partial_results_task for token={partial_result_token}") result_list.end() partial_results_task.cancel("Got final result") async def get_partial_results( - result_list: AsyncList, partial_result_token: int | str + result_list: AsyncList, + partial_result_token: int | str, + runner: runner_client.ExtensionRunnerInfo, ) -> None: try: - with runner_manager.partial_results.iterator() as iterator: + logger.trace(f"get_partial_results: listening on runner {runner.readable_id} for token={partial_result_token}") + with runner.partial_results.iterator() as iterator: async for partial_result in iterator: + logger.trace(f"get_partial_results: received partial from {runner.readable_id}, result_token={partial_result.token}, our_token={partial_result_token}, match={partial_result.token == partial_result_token}") if partial_result.token == partial_result_token: + value_preview = str(partial_result.value)[:200] if partial_result.value else "None" + logger.trace(f"get_partial_results: matched! value preview: {value_preview}") result_list.append(partial_result.value) except asyncio.CancelledError: - pass + logger.trace(f"get_partial_results: cancelled for runner {runner.readable_id} token={partial_result_token}") + + +class RunWithPartialResultsContext: + """Holds both the partial results async iterable and the final runner responses. + + ``partials`` is available immediately for iteration. ``responses`` is + populated after the context manager exits (i.e. after all runner tasks + complete). + """ + + def __init__(self, partials: AsyncList[domain.PartialResultRawValue]) -> None: + self.partials = partials + self.responses: list[runner_client.RunActionResponse] = [] + + def __aiter__(self): + return self.partials.__aiter__() @contextlib.asynccontextmanager @@ -188,20 +217,16 @@ async def run_with_partial_results( dev_env: runner_client.DevEnv, ws_context: context.WorkspaceContext, initialize_all_handlers: bool = False, -) -> collections.abc.AsyncIterator[ - collections.abc.AsyncIterable[domain.PartialResultRawValue] -]: + result_formats: list[runner_client.RunResultFormat] | None = None, +) -> collections.abc.AsyncIterator[RunWithPartialResultsContext]: logger.trace(f"Run {action_name} in project {project_dir_path}") result: AsyncList[domain.PartialResultRawValue] = AsyncList() + ctx = RunWithPartialResultsContext(partials=result) project = ws_context.ws_projects[project_dir_path] try: + action_tasks: list[asyncio.Task] = [] async with asyncio.TaskGroup() as tg: - partial_results_task = tg.create_task( - get_partial_results( - result_list=result, partial_result_token=partial_result_token - ) - ) action = next( action for action in project.actions if action.name == action_name ) @@ -222,20 +247,31 @@ async def run_with_partial_results( f"Runner {env_name} in project {project.dir_path} failed: {exception.message}" ) from exception - tg.create_task( + runner_partial_results_task = tg.create_task( + get_partial_results( + result_list=result, + partial_result_token=partial_result_token, + runner=runner, + ) + ) + action_tasks.append(tg.create_task( run_action_and_notify( action_name=action_name, params=params, partial_result_token=partial_result_token, runner=runner, result_list=result, - partial_results_task=partial_results_task, + partial_results_task=runner_partial_results_task, run_trigger=run_trigger, dev_env=dev_env, + result_formats=result_formats, ) - ) + )) - yield result + yield ctx + # TaskGroup exited — all tasks completed, collect final responses + for task in action_tasks: + ctx.responses.append(task.result()) except ExceptionGroup as eg: errors: list[str] = [] for exception in eg.exceptions: @@ -290,15 +326,10 @@ def find_all_projects_with_action( # exclude projects without valid config and projects without requested action for project_dir_path, project_def in relevant_projects.copy().items(): - if project_def.status != domain.ProjectStatus.CONFIG_VALID: - # projects without valid config have no actions. Files of those projects - # will be not processed because we don't know whether it has one of expected - # actions + if not isinstance(project_def, domain.CollectedProject): + # projects without collected actions cannot be matched continue - # all running projects have actions - assert project_def.actions is not None - try: next(action for action in project_def.actions if action.name == action_name) except StopIteration: @@ -327,7 +358,7 @@ async def start_required_environments( required_envs_by_project: dict[pathlib.Path, set[str]] = {} for project_dir_path, action_names in actions_by_projects.items(): project = ws_context.ws_projects[project_dir_path] - if project.actions is not None: + if isinstance(project, domain.CollectedProject): project_required_envs = set() for action_name in action_names: # find the action and collect envs from its handlers @@ -425,6 +456,7 @@ async def _start_runner_or_update_config( runner=runner, project=project, handlers_to_initialize=handlers_to_initialize, + ws_context=ws_context ) except RunnerFailedToStart as exception: raise StartingEnvironmentsFailed( @@ -556,6 +588,8 @@ def find_projects_with_actions( actions_set = ordered_set.OrderedSet(actions) for project in ws_context.ws_projects.values(): + if not isinstance(project, domain.CollectedProject): + continue project_actions_names = [action.name for action in project.actions] # find which of requested actions are available in the project action_to_run_in_project = actions_set & ordered_set.OrderedSet( @@ -577,12 +611,11 @@ def find_projects_with_actions( async def run_action( action_name: str, params: dict[str, typing.Any], - project_def: domain.Project, + project_def: domain.CollectedProject, ws_context: context.WorkspaceContext, run_trigger: runner_client.RunActionTrigger, dev_env: runner_client.DevEnv, result_formats: list[runner_client.RunResultFormat] | None = None, - preprocess_payload: bool = True, initialize_all_handlers: bool = False, ) -> RunActionResponse: formatted_params = str(params) @@ -601,15 +634,7 @@ async def run_action( + " Please check logs." ) - if preprocess_payload: - payload = await payload_preprocessor.preprocess_for_project( - action_name=action_name, - payload=params, - project_dir_path=project_def.dir_path, - ws_context=ws_context, - ) - else: - payload = params + payload = params # cases: # - base: all action handlers are in one env @@ -618,7 +643,6 @@ async def run_action( # - mixed envs: action handlers are in different envs # -- concurrent execution of handlers # -- sequential execution of handlers - assert project_def.actions is not None action = next( action for action in project_def.actions if action.name == action_name ) diff --git a/src/finecode/services/shutdown_service.py b/src/finecode/wm_server/services/shutdown_service.py similarity index 84% rename from src/finecode/services/shutdown_service.py rename to src/finecode/wm_server/services/shutdown_service.py index 8b2d2db0..4c333255 100644 --- a/src/finecode/services/shutdown_service.py +++ b/src/finecode/wm_server/services/shutdown_service.py @@ -1,7 +1,7 @@ from loguru import logger -from finecode import context -from finecode.runner import runner_client, runner_manager +from finecode.wm_server import context +from finecode.wm_server.runner import runner_client, runner_manager def on_shutdown(ws_context: context.WorkspaceContext): @@ -19,5 +19,3 @@ def on_shutdown(ws_context: context.WorkspaceContext): if ws_context.runner_io_thread is not None: logger.trace("Stop IO thread") ws_context.runner_io_thread.stop(timeout=5) - - # TODO: stop MCP if running diff --git a/src/finecode/wm_server/services/text_utils.py b/src/finecode/wm_server/services/text_utils.py new file mode 100644 index 00000000..fd1c169c --- /dev/null +++ b/src/finecode/wm_server/services/text_utils.py @@ -0,0 +1,56 @@ +"""Utilities for applying LSP text document content changes to a string.""" + +from __future__ import annotations + +from finecode.wm_server.runner import runner_client + + +def apply_text_changes( + text: str, + changes: list[ + runner_client.TextDocumentContentChangePartial + | runner_client.TextDocumentContentChangeWholeDocument + ], +) -> str: + """Apply a sequence of LSP content changes to *text* and return the result. + + LSP character offsets are UTF-16 code unit counts. For files that contain + only BMP characters (U+0000–U+FFFF) each character maps to exactly one + UTF-16 code unit, so plain string indexing is correct. Files with + characters outside the BMP (e.g. emoji) may see off-by-one errors in the + rare case where a range boundary falls inside or immediately after such a + character; this is an accepted limitation for now. + """ + for change in changes: + if isinstance(change, runner_client.TextDocumentContentChangeWholeDocument): + text = change.text + else: + text = _apply_partial_change(text, change) + return text + + +def _apply_partial_change( + text: str, change: runner_client.TextDocumentContentChangePartial +) -> str: + lines = text.split("\n") + + start_line = change.range.start.line + start_char = change.range.start.character + end_line = change.range.end.line + end_char = change.range.end.character + + # Build the prefix: everything before the start position. + prefix = "\n".join(lines[:start_line]) + if start_line > 0: + prefix += "\n" + if start_line < len(lines): + prefix += lines[start_line][:start_char] + + # Build the suffix: everything after the end position. + suffix = "" + if end_line < len(lines): + suffix = lines[end_line][end_char:] + if end_line + 1 < len(lines): + suffix += "\n" + "\n".join(lines[end_line + 1 :]) + + return prefix + change.text + suffix diff --git a/src/finecode/wm_server/utils/__init__.py b/src/finecode/wm_server/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/finecode/utils/iterable_subscribe.py b/src/finecode/wm_server/utils/iterable_subscribe.py similarity index 100% rename from src/finecode/utils/iterable_subscribe.py rename to src/finecode/wm_server/utils/iterable_subscribe.py diff --git a/src/finecode/wm_server/wm_lifecycle.py b/src/finecode/wm_server/wm_lifecycle.py new file mode 100644 index 00000000..d7e0ff34 --- /dev/null +++ b/src/finecode/wm_server/wm_lifecycle.py @@ -0,0 +1,150 @@ +"""WM server lifecycle helpers used by clients. + +These functions let any client (LSP server, MCP server, CLI) discover, start, +and wait for the WM server without importing the server implementation itself. +""" + +from __future__ import annotations + +import asyncio +import os +import pathlib +import socket +import subprocess +import sys +import tempfile + +from loguru import logger + +NO_CLIENT_TIMEOUT_SECONDS = 30 + + +def _cache_dir() -> pathlib.Path: + """Return the FineCode cache directory inside the dev_workspace venv.""" + return pathlib.Path(sys.executable).parent.parent / "cache" / "finecode" + + +def discovery_file_path() -> pathlib.Path: + return _cache_dir() / "wm_port" + + +def read_port() -> int | None: + """Read the WM server port from the discovery file. Returns None if not found.""" + path = discovery_file_path() + if not path.exists(): + return None + try: + return int(path.read_text().strip()) + except (ValueError, OSError): + return None + + +def running_port() -> int | None: + """Return the port if a WM server is actively listening, None otherwise. + + Unlike ``read_port()``, this verifies the server actually accepts connections, + so a stale discovery file left by a crashed server returns None. + """ + port = read_port() + if port is None: + return None + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.settimeout(1) + s.connect(("127.0.0.1", port)) + return port + except (ConnectionRefusedError, OSError): + return None + + +def is_running() -> bool: + """Check if a WM server is already listening (discovery file exists and port responds).""" + return running_port() is not None + + +def ensure_running(workdir: pathlib.Path, log_level: str = "INFO") -> None: + """Start the WM server as a subprocess if not already running.""" + if is_running(): + return + + python_cmd = sys.executable + logger.info(f"Starting FineCode WM server subprocess in {workdir}") + subprocess.Popen( + [python_cmd, "-m", "finecode", "start-wm-server", f"--log-level={log_level}"], + cwd=str(workdir), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + +async def wait_until_ready(timeout: float = 30) -> int: + """Wait for the WM server to become available. Returns the port.""" + deadline = asyncio.get_event_loop().time() + timeout + while asyncio.get_event_loop().time() < deadline: + port = running_port() + if port is not None: + return port + await asyncio.sleep(0.5) + raise TimeoutError( + f"FineCode WM server did not start within {timeout}s. " + f"Check logs for errors." + ) + + +def start_own_server( + workdir: pathlib.Path, + log_level: str = "INFO", + port_file: pathlib.Path | None = None, +) -> pathlib.Path: + """Start a dedicated WM server subprocess for exclusive use by one client. + + Unlike ``ensure_running()``, this always starts a *fresh* process and writes + the listening port to a dedicated file (not the shared discovery file), so it + does not interfere with a concurrently running shared WM server (e.g. the one + used by the LSP/MCP clients). + + If *port_file* is given the server writes its port there; otherwise a + temporary file is created automatically. + + Returns the path to the port file. Pass it to + ``wait_until_ready_from_file()`` to obtain the port and connect. + The server auto-stops after the client disconnects. + """ + if port_file is None: + fd, port_file_str = tempfile.mkstemp(suffix=".finecode_port") + os.close(fd) + port_file = pathlib.Path(port_file_str) + # Write empty content so the server overwrites rather than appends. + port_file.write_text("") + + logger.info(f"Starting dedicated FineCode WM server in {workdir}") + subprocess.Popen( + [sys.executable, "-m", "finecode", "start-wm-server", "--port-file", str(port_file), "--log-level", log_level], + cwd=str(workdir), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + return port_file + + +async def wait_until_ready_from_file( + port_file: pathlib.Path, timeout: float = 30 +) -> int: + """Wait for a dedicated WM server using a custom port file. Returns the port.""" + deadline = asyncio.get_event_loop().time() + timeout + while asyncio.get_event_loop().time() < deadline: + try: + content = port_file.read_text().strip() + if content: + port = int(content) + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.settimeout(1) + s.connect(("127.0.0.1", port)) + return port + except (FileNotFoundError, ValueError, OSError): + pass + await asyncio.sleep(0.5) + raise TimeoutError( + f"Dedicated FineCode WM server did not start within {timeout}s. " + "Check logs for errors." + ) diff --git a/src/finecode/wm_server/wm_server.py b/src/finecode/wm_server/wm_server.py new file mode 100644 index 00000000..5c863973 --- /dev/null +++ b/src/finecode/wm_server/wm_server.py @@ -0,0 +1,1355 @@ +# docs: docs/concepts.md, docs/cli.md +"""FineCode WM Server — TCP JSON-RPC server for external tool integration. + +The WM server is the shared backbone that holds the WorkspaceContext. Any client +(LSP server, MCP server, CLI) can start it if not already running and connect to it. +When the last client disconnects, the server shuts down automatically. + +Discovery: writes the listening port to .venvs/dev_workspace/cache/finecode/wm_port +so clients can find it (same cache directory used for action results). +""" + +from __future__ import annotations + +import asyncio +import json +import pathlib +import socket +import typing + +from loguru import logger + +from finecode.wm_server import context, domain +from finecode.wm_server.wm_lifecycle import discovery_file_path + +CONTENT_LENGTH_HEADER = "Content-Length: " +DISCONNECT_TIMEOUT_SECONDS = 30 +NO_CLIENT_TIMEOUT_SECONDS = 30 + +# save so that server/getInfo can return it +_log_file_path: pathlib.Path | None = None + + +# --------------------------------------------------------------------------- +# JSON-RPC helpers +# --------------------------------------------------------------------------- + + +def _jsonrpc_response(id: int | str, result: typing.Any) -> dict: + return {"jsonrpc": "2.0", "id": id, "result": result} + + +def _jsonrpc_error( + id: int | str | None, code: int, message: str +) -> dict: + return {"jsonrpc": "2.0", "id": id, "error": {"code": code, "message": message}} + + +# --------------------------------------------------------------------------- +# Content-Length framing (shared with finecode_jsonrpc) +# --------------------------------------------------------------------------- + + +async def _read_message(reader: asyncio.StreamReader) -> dict | None: + """Read one Content-Length framed JSON-RPC message. Returns None on EOF.""" + header_line = await reader.readline() + if not header_line: + return None + header = header_line.decode("utf-8").strip() + if not header.startswith(CONTENT_LENGTH_HEADER): + logger.warning(f"FineCode API: unexpected header: {header!r}") + return None + content_length = int(header[len(CONTENT_LENGTH_HEADER) :]) + + # Read the blank separator line + separator = await reader.readline() + if separator.strip(): + logger.warning(f"FineCode API: expected blank line, got: {separator!r}") + + body = await reader.readexactly(content_length) + return json.loads(body.decode("utf-8")) + + +def _write_message(writer: asyncio.StreamWriter, msg: dict) -> None: + """Write one Content-Length framed JSON-RPC message.""" + body = json.dumps(msg).encode("utf-8") + header = f"Content-Length: {len(body)}\r\n\r\n".encode("utf-8") + writer.write(header + body) + + +# --------------------------------------------------------------------------- +# Method handlers (requests — client sends id, server responds) +# See docs/wm-protocol.md for full protocol documentation. +# --------------------------------------------------------------------------- + +NOT_IMPLEMENTED_CODE = -32002 +NOT_IMPLEMENTED_MSG = "Not yet implemented" + +MethodHandler = typing.Callable[ + [dict | None, context.WorkspaceContext], + typing.Coroutine[typing.Any, typing.Any, typing.Any], +] + +NotificationHandler = typing.Callable[ + [dict | None, context.WorkspaceContext], + typing.Coroutine[typing.Any, typing.Any, None], +] + + +class _NotImplementedError(Exception): + """Raised by stubs to signal that the method is not yet implemented.""" + + +def _stub(method_name: str) -> MethodHandler: + """Create a stub handler that raises _NotImplementedError.""" + + async def handler( + params: dict | None, ws_context: context.WorkspaceContext + ) -> typing.Any: + raise _NotImplementedError(f"{method_name}: {NOT_IMPLEMENTED_MSG}") + + handler.__doc__ = f"Stub for {method_name}. See docs/wm-protocol.md." + return handler + + +def _notification_stub(method_name: str) -> NotificationHandler: + """Create a stub notification handler that logs and does nothing.""" + + async def handler( + params: dict | None, ws_context: context.WorkspaceContext + ) -> None: + logger.trace(f"FineCode API: notification {method_name} received (stub, ignoring)") + + handler.__doc__ = f"Stub for {method_name}. See docs/wm-protocol.md." + return handler + + +# -- Server → client notifications ------------------------------------------ + + +def _notify_all_clients(method: str, params: dict) -> None: + """Broadcast a JSON-RPC notification to all connected clients.""" + msg = {"jsonrpc": "2.0", "method": method, "params": params} + for writer in list(_connected_clients): + try: + _write_message(writer, msg) + except Exception: + logger.trace(f"FineCode API: failed to notify client, skipping") + + +def _project_to_dict(project: domain.Project) -> dict: + return { + "name": project.name, + "path": str(project.dir_path), + "status": project.status.name, + } + + +def _find_project_by_path( + ws_context: context.WorkspaceContext, project_path: str +) -> domain.Project | None: + """Look up a project by its absolute directory path (canonical external identifier).""" + return ws_context.ws_projects.get(pathlib.Path(project_path)) + + +# -- Implemented handlers -------------------------------------------------- + + +async def _handle_list_projects( + params: dict | None, ws_context: context.WorkspaceContext +) -> list[dict]: + """List all projects. Params: {}. Result: [{name, path, status}].""" + return [_project_to_dict(p) for p in ws_context.ws_projects.values()] + + +async def _handle_get_project_raw_config( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Return the resolved raw config for a project by path. + + Params: ``{"project": "/abs/path/to/project"}`` + Result: ``{"rawConfig": {...}}`` + """ + params = params or {} + project_path = params.get("project") + if not project_path: + raise ValueError("project parameter is required") + + project = _find_project_by_path(ws_context, project_path) + if project is None: + raise ValueError(f"Project '{project_path}' not found") + + raw_config = ws_context.ws_projects_raw_configs.get(project.dir_path, {}) + return {"rawConfig": raw_config} + + +async def _handle_find_project_for_file( + params: dict, ws_context: context.WorkspaceContext +) -> dict: + """Return project directory path containing a given file. + + It finds the *nearest* project in the + workspace that actually "uses finecode" (i.e. has a valid config). The + project is determined purely based on path containment. + + **Params:** ``{"filePath": "/abs/path/to/file"}`` + **Result:** ``{"project": "/abs/path/to/project"}`` or ``{"project": null}`` if + the file does not belong to any suitable project. + """ + + file_path = pathlib.Path(params["filePath"]) + + # iterate over known projects in reverse-sorted order so that nested/child + # projects are considered before their parents. This mirrors the behaviour + # in ``find_project_with_action_for_file`` but without any action-specific + # checks. + sorted_dirs = list(ws_context.ws_projects.keys()) + # reverse sort by path (string) ensures children come first + sorted_dirs.sort(reverse=True) + + for project_dir in sorted_dirs: + if file_path.is_relative_to(project_dir): + project = ws_context.ws_projects[project_dir] + if project.status == domain.ProjectStatus.CONFIG_VALID: + return {"project": str(project.dir_path)} + # skip projects that aren't using finecode + continue + + # not in any project or none of the containing projects are CONFIG_VALID + return {"project": None} + + +async def _handle_add_dir( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Add a workspace directory. Discovers projects, reads configs, starts runners. + + Params: + dir_path: str - absolute path to the workspace directory + start_runners: bool - whether to start extension runners (default true). + When false, configs are read and actions collected without starting any + runners. Useful when runner environments may not exist yet (e.g. before + running prepare-envs). + projects: list[str] | null - optional list of project paths (absolute) to initialize. + Projects not in this list are discovered but not config-initialized or + started. Omit (or pass null) to initialize all projects. + Calling add_dir again for the same dir with a different filter (or no + filter) will initialize the previously skipped projects. + """ + from finecode.wm_server.config import collect_actions, read_configs + from finecode.wm_server.runner import runner_manager + from finecode.wm_server.runner.runner_client import RunnerStatus + + params = params or {} + dir_path = pathlib.Path(params["dirPath"]) + start_runners: bool = params.get("startRunners", True) + projects_filter: set[str] | None = ( + set(params["projects"]) if params.get("projects") else None + ) + logger.trace(f"Add ws dir: {dir_path}") + + if dir_path not in ws_context.ws_dirs_paths: + ws_context.ws_dirs_paths.append(dir_path) + + # Discover new projects in this dir (idempotent — skips already-known ones). + await read_configs.read_projects_in_dir(dir_path, ws_context) + + # Collect all projects in this dir that haven't been config-initialized yet. + # This covers both newly discovered projects and ones that were filtered out + # by a previous add_dir call with a projects filter. + projects_to_init = [ + p for p in ws_context.ws_projects.values() + if p.dir_path.is_relative_to(dir_path) + and p.dir_path not in ws_context.ws_projects_raw_configs + ] + + if projects_filter is not None: + projects_to_init = [p for p in projects_to_init if str(p.dir_path) in projects_filter] + + for project in projects_to_init: + await read_configs.read_project_config( + project=project, ws_context=ws_context, resolve_presets=False + ) + + if not start_runners: + # Collect actions directly from raw config without needing runners. + from finecode.wm_server.config import config_models + for project in projects_to_init: + if project.status == domain.ProjectStatus.CONFIG_VALID: + try: + collect_actions.collect_project( + project_path=project.dir_path, ws_context=ws_context + ) + except config_models.ConfigurationError as exc: + logger.warning( + f"Failed to collect actions for {project.name}: {exc.message}" + ) + return {"projects": [_project_to_dict(p) for p in projects_to_init]} + + try: + await runner_manager.start_runners_with_presets( + projects=projects_to_init, + ws_context=ws_context, + initialize_all_handlers=True, + ) + except runner_manager.RunnerFailedToStart as exc: + _notify_all_clients("server/userMessage", { + "message": f"Starting runners failed: {exc.message}. " + f"Did you run `finecode prepare-envs`?", + "type": "ERROR", + }) + + # If config overrides were set before this addDir call (e.g. standalone CLI mode), + # apply them to the newly discovered projects and push to their running runners. + if ws_context.handler_config_overrides and projects_to_init: + action_names = list(ws_context.handler_config_overrides.keys()) + _apply_config_overrides_to_projects(projects_to_init, action_names, ws_context.handler_config_overrides) + try: + async with asyncio.TaskGroup() as tg: + for project in projects_to_init: + runners = ws_context.ws_projects_extension_runners.get(project.dir_path, {}) + for runner in runners.values(): + if runner.status == RunnerStatus.RUNNING: + tg.create_task( + runner_manager.update_runner_config( + runner=runner, + project=project, + handlers_to_initialize=None, + ws_context=ws_context, + ) + ) + except* Exception as eg: + for exc in eg.exceptions: + logger.warning(f"Failed to push config update to runner: {exc}") + + return {"projects": [_project_to_dict(p) for p in projects_to_init]} + + +async def _handle_remove_dir( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Remove a workspace directory. Stops runners, removes affected projects.""" + from finecode.wm_server.runner import runner_manager + + dir_path = pathlib.Path(params["dirPath"]) + logger.trace(f'Remove ws dir: {dir_path}') + ws_context.ws_dirs_paths.remove(dir_path) + + for project_dir in list(ws_context.ws_projects.keys()): + if not project_dir.is_relative_to(dir_path): + continue + + # Keep if the project is also under another remaining ws_dir. + keep = any( + project_dir.is_relative_to(d) for d in ws_context.ws_dirs_paths + ) + if keep: + continue + + runners = ws_context.ws_projects_extension_runners.get(project_dir, {}) + for runner in runners.values(): + await runner_manager.stop_extension_runner(runner=runner) + del ws_context.ws_projects[project_dir] + ws_context.ws_projects_raw_configs.pop(project_dir, None) + + return {} + + +async def _handle_list_actions( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """List available actions, optionally filtered by project path.""" + project_filter = (params or {}).get("project") + actions = [] + for project in ws_context.ws_projects.values(): + if project_filter and str(project.dir_path) != project_filter: + continue + if not isinstance(project, domain.CollectedProject): + continue + for action in project.actions: + actions.append({ + "name": action.name, + "source": action.source, + "project": str(project.dir_path), + "handlers": [ + {"name": h.name, "source": h.source, "env": h.env} + for h in action.handlers + ], + }) + return {"actions": actions} + + +async def _handle_run_action( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Run an action on a project.""" + params = params or {} + action_name = params.get("action") + project_name = params.get("project") + action_params = params.get("params", {}) + options = params.get("options", {}) + + if not action_name: + raise ValueError("action parameter is required") + if not project_name: + raise ValueError("project parameter is required") + + # Find the project by its absolute directory path (canonical external identifier) + project = _find_project_by_path(ws_context, project_name) + + if project is None: + raise ValueError(f"Project '{project_name}' not found") + if not isinstance(project, domain.CollectedProject): + raise ValueError( + f"Project '{project_name}' actions are not collected yet. " + "Ensure the project is initialized before running actions." + ) + + # Import run_service here to avoid circular imports + from finecode.wm_server.services import run_service + + result_format_strs: list[str] = options.get("resultFormats", ["json"]) + result_formats = [ + run_service.RunResultFormat(fmt) + for fmt in result_format_strs + if fmt in ("json", "string") + ] + trigger = run_service.RunActionTrigger(options.get("trigger", "unknown")) + dev_env = run_service.DevEnv(options.get("devEnv", "cli")) + + try: + result = await run_service.run_action( + action_name=action_name, + params=action_params, + project_def=project, + ws_context=ws_context, + run_trigger=trigger, + dev_env=dev_env, + result_formats=result_formats, + initialize_all_handlers=True, + ) + return { + "resultByFormat": result.result_by_format, + "returnCode": result.return_code, + } + except run_service.ActionRunFailed: + raise + +from finecode.wm_server.services.action_tree import ( + _handle_get_tree, +) +from finecode.wm_server.services.document_sync import ( + handle_documents_opened, + handle_documents_closed, + handle_documents_changed, +) + + +async def _handle_actions_reload( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Reload an action's handlers in all relevant extension runners. + + Params: ``{"actionNodeId": "project_path::action_name"}`` + Result: ``{}`` + """ + from finecode.wm_server.runner import runner_client + + params = params or {} + action_node_id = params.get("actionNodeId", "") + parts = action_node_id.split("::") + if len(parts) < 2: + raise ValueError(f"Invalid action_node_id: {action_node_id!r}") + + project_path = pathlib.Path(parts[0]) + action_name = parts[1] + + runners_by_env = ws_context.ws_projects_extension_runners.get(project_path, {}) + for runner in runners_by_env.values(): + await runner_client.reload_action(runner, action_name) + + return {} + + +async def _handle_runners_list( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """List all extension runners and their status. + + Result: ``{"runners": [{"projectPath", "envName", "status", "readableId"}]}`` + """ + from finecode.wm_server.runner import runner_client + + runners = [] + for project_path, runners_by_env in ws_context.ws_projects_extension_runners.items(): + for env_name, runner in runners_by_env.items(): + runners.append({ + "projectPath": str(project_path), + "envName": env_name, + "status": runner.status.name, + "readableId": runner.readable_id, + }) + return {"runners": runners} + + +async def _handle_runners_restart( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Restart a specific extension runner. + + Params: ``{"runnerWorkingDir": "/abs/path", "envName": "dev_workspace", "debug": false}`` + Result: ``{}`` + """ + from finecode.wm_server.runner import runner_manager + + params = params or {} + runner_working_dir = params.get("runnerWorkingDir") + env_name = params.get("envName") + debug = params.get("debug", False) + + if not runner_working_dir or not env_name: + raise ValueError("runner_working_dir and env_name are required") + + await runner_manager.restart_extension_runner( + runner_working_dir_path=pathlib.Path(runner_working_dir), + env_name=env_name, + ws_context=ws_context, + debug=debug, + ) + return {} + + +async def _handle_start_runners( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Start extension runners for all (or specified) projects. + + Complements any runners already running — only missing runners are started. + Resolves presets so that ``project.actions`` reflects preset-defined handlers. + + Params: ``{"projects": ["project_name", ...]}`` (optional, default: all projects) + Result: ``{}`` + """ + from finecode.wm_server.runner import runner_manager + + params = params or {} + project_names: list[str] | None = params.get("projects") + + projects = list(ws_context.ws_projects.values()) + if project_names is not None: + projects = [p for p in projects if str(p.dir_path) in project_names] + + try: + await runner_manager.start_runners_with_presets( + projects=projects, + ws_context=ws_context, + ) + except runner_manager.RunnerFailedToStart as exc: + raise ValueError(f"Starting runners failed: {exc.message}") from exc + + return {} + + +async def _handle_runners_check_env( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Check whether an environment is valid for a given project. + + Params: ``{"project": "/abs/path/to/project", "envName": "dev_workspace"}`` + Result: ``{"valid": bool}`` + """ + from finecode.wm_server.runner import runner_manager + + params = params or {} + project_name = params.get("project") + env_name = params.get("envName") + + if not project_name or not env_name: + raise ValueError("project and envName are required") + + project = _find_project_by_path(ws_context, project_name) + if project is None: + raise ValueError(f"Project '{project_name}' not found") + + valid = await runner_manager.check_runner( + runner_dir=project.dir_path, env_name=env_name + ) + return {"valid": valid} + + +async def _handle_runners_remove_env( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Remove an environment for a given project. + + Stops the runner if running, then deletes the environment directory. + + Params: ``{"project": "/abs/path/to/project", "envName": "dev_workspace"}`` + Result: ``{}`` + """ + from finecode.wm_server.runner import runner_manager + + params = params or {} + project_name = params.get("project") + env_name = params.get("envName") + + if not project_name or not env_name: + raise ValueError("project and envName are required") + + project = _find_project_by_path(ws_context, project_name) + if project is None: + raise ValueError(f"Project '{project_name}' not found") + + # Stop the runner if it is currently running. + runners = ws_context.ws_projects_extension_runners.get(project.dir_path, {}) + runner = runners.get(env_name) + if runner is not None: + await runner_manager.stop_extension_runner(runner=runner) + + runner_manager.remove_runner_venv(runner_dir=project.dir_path, env_name=env_name) + return {} + + +async def _handle_server_get_info( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Handle ``server/getInfo``. + + Returns static information about the running WM Server instance, + including the path to its log file. + """ + return { + "logFilePath": str(_log_file_path) if _log_file_path is not None else None, + } + + +async def _handle_server_reset( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Reset the server state. + + Result: ``{}`` + """ + logger.info("FineCode API: server reset requested") + return {} + + +async def _handle_server_shutdown( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Shut down the WM server. + + Responds with ``{}`` and then stops the server on the next event-loop + iteration, giving the transport layer time to flush the response. + + Result: ``{}`` + """ + logger.info("FineCode API: shutdown requested by client") + asyncio.get_event_loop().call_soon(stop) + return {} + + +async def _handle_set_config_overrides( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Handle ``workspace/setConfigOverrides``. + + Stores handler config overrides persistently in the workspace context so that + they are applied to all subsequent action runs. These overrides survive across + multiple requests and do not require runners to be stopped first. + + If extension runners are already running they receive a config-update push + immediately; their initialized handlers are dropped and will be re-initialized + with the new config on the next run. + """ + from finecode.wm_server.runner import runner_manager + from finecode.wm_server.runner.runner_client import RunnerStatus + + params = params or {} + overrides: dict = params.get("overrides", {}) + + ws_context.handler_config_overrides = overrides + + # Apply to all existing project domain objects so that project.action_handler_configs + # reflects the new overrides + all_projects = list(ws_context.ws_projects.values()) + action_names = list(overrides.keys()) + if all_projects and action_names: + _apply_config_overrides_to_projects(all_projects, action_names, overrides) + + # Push the updated config to any already-running runners so they drop their + # initialized handlers and pick up the new config on the next invocation. + try: + async with asyncio.TaskGroup() as tg: + for project_path, runners_by_env in ws_context.ws_projects_extension_runners.items(): + project = ws_context.ws_projects.get(project_path) + if project is None or not isinstance(project, domain.CollectedProject): + continue + for runner in runners_by_env.values(): + if runner.status == RunnerStatus.RUNNING: + tg.create_task( + runner_manager.update_runner_config( + runner=runner, + project=project, + handlers_to_initialize=None, + ws_context=ws_context, + ) + ) + except* Exception as eg: + for exc in eg.exceptions: + logger.warning(f"Failed to push config update to runner: {exc}") + + return {} + + +def _apply_config_overrides_to_projects( + projects: list[domain.Project], + actions: list[str], + config_overrides: dict[str, dict[str, dict[str, typing.Any]]], +) -> dict[pathlib.Path, dict[str, dict[str, typing.Any]]]: + """Apply handler config overrides to project.action_handler_configs. + + ``config_overrides`` format: ``{action_name: {handler_name_or_"": {param: value}}}`` + where the empty-string key ``""`` means all handlers of that action. + + Returns the original ``action_handler_configs`` per project. + """ + originals: dict[pathlib.Path, dict[str, dict[str, typing.Any]]] = {} + actions_set = set(actions) + for project in projects: + if not isinstance(project, domain.CollectedProject): + continue + originals[project.dir_path] = { + source: dict(cfg) + for source, cfg in project.action_handler_configs.items() + } + for action in project.actions: + if action.name not in actions_set: + continue + action_overrides = config_overrides.get(action.name, {}) + if not action_overrides: + continue + action_level = action_overrides.get("", {}) + for handler in action.handlers: + handler_specific = action_overrides.get(handler.name, {}) + merged = {**action_level, **handler_specific} + if merged: + project.action_handler_configs[handler.source] = { + **(project.action_handler_configs.get(handler.source) or {}), + **merged, + } + return originals + + +async def _handle_run_batch( + params: dict | None, ws_context: context.WorkspaceContext +) -> typing.Any: + """Run multiple actions across multiple (or all) projects. + + Params: + actions: list[str] - action names to run + projects: list[str] | None - project paths (absolute) to filter; absent/null means all projects + params: dict - action payload shared across all projects + params_by_project: dict[str, dict] - per-project payload overrides keyed by project path string + options: + concurrently: bool - run actions concurrently within each project (default false) + result_formats: list[str] - "string" and/or "json" (default ["string"]) + trigger: str - run trigger (default "user") + dev_env: str - dev environment (default "cli") + + Result: {"results": {project_path_str: {action_name: {"resultByFormat": ..., "returnCode": int}}}, + "returnCode": int} + """ + from finecode.wm_server.services import run_service + + params = params or {} + actions: list[str] = params.get("actions", []) + project_names: list[str] | None = params.get("projects") + action_params: dict = params.get("params", {}) + params_by_project: dict[str, dict] = params.get("paramsByProject", {}) + options: dict = params.get("options", {}) + + concurrently: bool = options.get("concurrently", False) + result_format_strs: list[str] = options.get("resultFormats", ["string"]) + result_formats = [ + run_service.RunResultFormat(fmt) + for fmt in result_format_strs + if fmt in ("json", "string") + ] + trigger = run_service.RunActionTrigger(options.get("trigger", "user")) + dev_env = run_service.DevEnv(options.get("devEnv", "cli")) + + if not actions: + raise ValueError("actions list is required and must be non-empty") + + logger.debug(f"runBatch: actions={actions} projects={project_names} formats={result_format_strs}") + + # Build actions_by_project (path -> [action_names]) + if project_names is not None: + actions_by_project: dict[pathlib.Path, list[str]] = {} + for project_path_str in project_names: + project = _find_project_by_path(ws_context, project_path_str) + if project is None: + raise ValueError(f"Project '{project_path_str}' not found") + actions_by_project[project.dir_path] = list(actions) + else: + actions_by_project = run_service.find_projects_with_actions(ws_context, actions) + if not actions_by_project: + all_projects = list(ws_context.ws_projects.keys()) + projects_with_actions = { + str(p): [a.name for a in proj.actions] + for p, proj in ws_context.ws_projects.items() + if hasattr(proj, "actions") and proj.actions + } + logger.warning( + f"runBatch: no projects found with actions={actions}. " + f"Known projects: {[str(p) for p in all_projects]}. " + f"Actions per project: {projects_with_actions}" + ) + raise ValueError(f"No projects found with actions: {actions}") + + await run_service.start_required_environments( + actions_by_project, ws_context, update_config_in_running_runners=True + ) + + result_by_project = await run_service.run_actions_in_projects( + actions_by_project=actions_by_project, + action_payload=action_params, + ws_context=ws_context, + concurrently=concurrently, + result_formats=result_formats, + run_trigger=trigger, + dev_env=dev_env, + payload_overrides_by_project=params_by_project, + ) + + overall_return_code = 0 + results: dict[str, dict] = {} + for project_path, actions_result in result_by_project.items(): + project_results: dict[str, dict] = {} + for action_name, response in actions_result.items(): + overall_return_code |= response.return_code + project_results[action_name] = { + "resultByFormat": response.result_by_format, + "returnCode": response.return_code, + } + results[str(project_path)] = project_results + + logger.debug(f"runBatch: done, projects_count={len(results)} returnCode={overall_return_code}") + return { + "results": results, + "returnCode": overall_return_code, + } + + +# -- helpers --------------------------------------------------------------- + +def _notify_client(writer: asyncio.StreamWriter, method: str, params: dict) -> None: + """Send a notification to a single client only. + + Unlike ``_notify_all_clients`` this helper targets the provided writer, + which is useful for streaming partial results back to the request originator + without broadcasting to every connected client. + """ + msg = {"jsonrpc": "2.0", "method": method, "params": params} + try: + _write_message(writer, msg) + except Exception: + logger.trace("FineCode API: failed to notify client, skipping") + + +# -- Request handlers ------------------------------------------------------ + +async def _handle_run_with_partial_results( + params: dict | None, + ws_context: context.WorkspaceContext, + writer: asyncio.StreamWriter, +) -> dict: + """Handle the ``actions/runWithPartialResults`` request. + + The handler uses :mod:`partial_results_service` to obtain an async iterator + of partial values and forwards them to the requesting client only. When the + iterator completes an aggregated result dict is returned exactly as the + ``actions/run`` method would produce. + """ + if params is None: + raise ValueError("params required") + action_name = params.get("action") + token = params.get("partialResultToken") + if not action_name or token is None: + raise ValueError("action and partial_result_token are required") + project_path = params.get("project", "") + options = params.get("options", {}) + + from finecode.wm_server.services import run_service, partial_results_service + + trigger = run_service.RunActionTrigger(options.get("trigger", "system")) + dev_env = run_service.DevEnv(options.get("devEnv", "ide")) + result_formats = options.get("resultFormats", ["json"]) + + logger.trace(f"runWithPartialResults: action={action_name} project={project_path!r} token={token} formats={result_formats}") + + stream = await partial_results_service.run_action_with_partial_results( + action_name=action_name, + project_path=project_path, + params=params.get("params", {}), + partial_result_token=token, + run_trigger=trigger, + dev_env=dev_env, + ws_context=ws_context, + result_formats=result_formats, + ) + + partial_count = 0 + async for value in stream: + partial_count += 1 + logger.trace(f"runWithPartialResults: sending partial #{partial_count} for token={token}, keys={list(value.keys()) if isinstance(value, dict) else type(value)}") + _notify_client( + writer, + "actions/partialResult", + {"token": token, "value": value}, + ) + await writer.drain() + + final = await stream.final_result() + logger.trace(f"runWithPartialResults: done, sent {partial_count} partials, final keys={list(final.keys()) if isinstance(final, dict) else type(final)}") + return final + + +async def _handle_run_with_partial_results_task( + params: dict | None, + ws_context: context.WorkspaceContext, + writer: asyncio.StreamWriter, + req_id: int | str, +) -> None: + """Task to handle the ``actions/runWithPartialResults`` request asynchronously. + + This runs in a separate task to avoid blocking the client handler loop + during long-running actions. + """ + try: + result = await _handle_run_with_partial_results( + params, ws_context, writer + ) + _write_message(writer, _jsonrpc_response(req_id, result)) + await writer.drain() + except _NotImplementedError as exc: + _write_message( + writer, + _jsonrpc_error(req_id, NOT_IMPLEMENTED_CODE, str(exc)), + ) + await writer.drain() + except Exception as exc: + logger.exception( + "FineCode API: error handling actions/runWithPartialResults" + ) + _write_message( + writer, _jsonrpc_error(req_id, -32603, str(exc)) + ) + await writer.drain() + + +async def _handle_get_payload_schemas( + params: dict | None, ws_context: context.WorkspaceContext +) -> dict: + """Return payload schemas for the given actions in a project. + + Params: ``{"project": "/abs/path/to/project", "action_names": ["lint", "format"]}`` + Result: ``{"schemas": {"lint": {...} | null, "format": {...} | null}}`` + + Schemas are fetched on-demand from Extension Runners. The ``dev_workspace`` + runner is tried first (fast path). For actions whose class is not importable + there, the runner for each handler env is tried as a fallback. + + Results are cached in ``ws_context.ws_action_schemas``. + """ + from finecode.wm_server.runner import runner_client + + params = params or {} + project_path = params.get("project") + action_names: list[str] = params.get("action_names", []) + + if not project_path: + raise ValueError("project parameter is required") + + project = _find_project_by_path(ws_context, project_path) + if project is None: + raise ValueError(f"Project '{project_path}' not found") + if not isinstance(project, domain.CollectedProject): + raise ValueError( + f"Project '{project_path}' actions are not collected yet. " + "Ensure the project is initialized before requesting schemas." + ) + + cache = ws_context.ws_action_schemas.setdefault(project.dir_path, {}) + missing = [name for name in action_names if name not in cache] + + if missing: + runners_by_env = ws_context.ws_projects_extension_runners.get(project.dir_path, {}) + + # Phase 1: query dev_workspace runner (covers all finecode_extension_api actions) + dev_runner = runners_by_env.get("dev_workspace") + if dev_runner is not None and dev_runner.status == runner_client.RunnerStatus.RUNNING: + try: + schemas = await runner_client.get_payload_schemas(dev_runner) + cache.update(schemas) + except Exception as exc: + logger.debug(f"Failed to get payload schemas from dev_workspace runner: {exc}") + + # Phase 2: for actions still None, try the handler env runners + still_missing = [name for name in missing if cache.get(name) is None] + for action_name in still_missing: + action = next((a for a in project.actions if a.name == action_name), None) + if action is None: + continue + envs_to_try = {h.env for h in action.handlers if h.env and h.env != "dev_workspace"} + for env_name in envs_to_try: + runner = runners_by_env.get(env_name) + if runner is None or runner.status != runner_client.RunnerStatus.RUNNING: + continue + try: + schemas = await runner_client.get_payload_schemas(runner) + if schemas.get(action_name) is not None: + cache[action_name] = schemas[action_name] + break + except Exception as exc: + logger.debug( + f"Failed to get payload schemas from runner '{env_name}': {exc}" + ) + + return {"schemas": {name: cache.get(name) for name in action_names}} + + +# -- Method dispatch tables ------------------------------------------------ + +_METHODS: dict[str, MethodHandler] = { + # workspace/ + "workspace/listProjects": _handle_list_projects, + "workspace/findProjectForFile": _handle_find_project_for_file, + "workspace/addDir": _handle_add_dir, + "workspace/removeDir": _handle_remove_dir, + "workspace/setConfigOverrides": _handle_set_config_overrides, + "workspace/getProjectRawConfig": _handle_get_project_raw_config, + "workspace/startRunners": _handle_start_runners, + # actions/ + "actions/list": _handle_list_actions, + "actions/getTree": _handle_get_tree, + "actions/getPayloadSchemas": _handle_get_payload_schemas, + "actions/run": _handle_run_action, + "actions/runBatch": _handle_run_batch, + # (runWithPartialResults is handled specially in _handle_client) + "actions/reload": _handle_actions_reload, + # runners: + "runners/list": _handle_runners_list, + "runners/restart": _handle_runners_restart, + "runners/checkEnv": _handle_runners_check_env, + "runners/removeEnv": _handle_runners_remove_env, + # server/ + "server/getInfo": _handle_server_get_info, + "server/reset": _handle_server_reset, + "server/shutdown": _handle_server_shutdown, +} + +_NOTIFICATIONS: dict[str, NotificationHandler] = { + # documents/ + "documents/opened": handle_documents_opened, + "documents/closed": handle_documents_closed, + "documents/changed": handle_documents_changed, +} + + +# --------------------------------------------------------------------------- +# Connection tracking and client handler +# --------------------------------------------------------------------------- + +_connected_clients: set[asyncio.StreamWriter] = set() +_auto_stop_task: asyncio.Task | None = None +_no_client_timeout_task: asyncio.Task | None = None +_server: asyncio.Server | None = None +_discovery_file: pathlib.Path | None = None +_had_client: bool = False +_running_partial_result_tasks: dict[asyncio.StreamWriter, set[asyncio.Task]] = {} +_client_labels: dict[asyncio.StreamWriter, str] = {} +_disconnect_timeout: int = DISCONNECT_TIMEOUT_SECONDS + + +async def _schedule_auto_stop() -> None: + """Wait after the last client disconnects, then stop the server.""" + await asyncio.sleep(_disconnect_timeout) + if not _connected_clients: + logger.info(f"FineCode API: no clients connected for {_disconnect_timeout}s, shutting down") + stop() + + +async def _no_client_timeout() -> None: + """Stop the server if no client connects within the timeout after startup.""" + await asyncio.sleep(NO_CLIENT_TIMEOUT_SECONDS) + if not _had_client: + logger.info( + f"FineCode API: no client connected within {NO_CLIENT_TIMEOUT_SECONDS}s after startup, shutting down" + ) + stop() + + +async def _handle_client( + reader: asyncio.StreamReader, + writer: asyncio.StreamWriter, + ws_context: context.WorkspaceContext, +) -> None: + global _auto_stop_task, _had_client, _no_client_timeout_task + + peer = writer.get_extra_info("peername") + label = str(peer) + _client_labels[writer] = label + logger.info(f"FineCode API: client connected from {peer}") + _connected_clients.add(writer) + _had_client = True + + # Cancel the initial no-client timeout since a client connected. + if _no_client_timeout_task is not None and not _no_client_timeout_task.done(): + _no_client_timeout_task.cancel() + _no_client_timeout_task = None + + # Cancel pending auto-stop since a client connected. + if _auto_stop_task is not None and not _auto_stop_task.done(): + _auto_stop_task.cancel() + _auto_stop_task = None + + try: + while True: + msg = await _read_message(reader) + if msg is None: + break + + req_id = msg.get("id") + method = msg.get("method") + params = msg.get("params") + is_notification = req_id is None + + if method is None: + if not is_notification: + _write_message( + writer, _jsonrpc_error(req_id, -32600, "Invalid request: no method") + ) + await writer.drain() + continue + + # Notifications (no id) — dispatch and don't respond. + if is_notification: + notification_handler = _NOTIFICATIONS.get(method) + if notification_handler is not None: + logger.trace(f"[{label}] Received notification {method}") + try: + await notification_handler(params, ws_context) + except Exception as exc: + logger.exception(f"FineCode API: error in notification {method} (client: {label})") + else: + logger.trace(f"[{label}] FineCode API: unknown notification {method}, ignoring") + continue + + # Requests (has id) — dispatch and respond. + # ``client/initialize`` and ``actions/runWithPartialResults`` are + # handled specially because they need access to the writer. + if method == "client/initialize": + new_label = (params or {}).get("clientId") + if new_label: + logger.info(f"FineCode API: client {label} identified as '{new_label}'") + _client_labels[writer] = new_label + label = new_label + _write_message(writer, _jsonrpc_response(req_id, { + "logFilePath": str(_log_file_path) if _log_file_path is not None else None, + })) + await writer.drain() + continue + + if method == "actions/runWithPartialResults": + # Spawn a task to handle this long-running request without blocking + # the client handler loop. This allows the client to send other + # requests while this action is running. + task = asyncio.create_task( + _handle_run_with_partial_results_task( + params, ws_context, writer, req_id + ) + ) + # Track the task associated with this client + if writer not in _running_partial_result_tasks: + _running_partial_result_tasks[writer] = set() + _running_partial_result_tasks[writer].add(task) + task.add_done_callback(lambda t: _running_partial_result_tasks[writer].discard(t) if writer in _running_partial_result_tasks else None) + continue + + handler = _METHODS.get(method) + if handler is None: + _write_message( + writer, + _jsonrpc_error(req_id, -32601, f"Method not found: {method}"), + ) + await writer.drain() + continue + + try: + result = await handler(params, ws_context) + _write_message(writer, _jsonrpc_response(req_id, result)) + await writer.drain() + except _NotImplementedError as exc: + _write_message( + writer, _jsonrpc_error(req_id, NOT_IMPLEMENTED_CODE, str(exc)) + ) + await writer.drain() + except Exception as exc: + logger.exception(f"FineCode API: error handling {method} (client: {label})") + _write_message( + writer, _jsonrpc_error(req_id, -32603, str(exc)) + ) + await writer.drain() + except (asyncio.IncompleteReadError, ConnectionResetError): + pass + finally: + logger.info(f"FineCode API: client disconnected ({label})") + _connected_clients.discard(writer) + _client_labels.pop(writer, None) + + # Cancel any running partial result tasks for this client + if writer in _running_partial_result_tasks: + for task in _running_partial_result_tasks[writer]: + task.cancel() + del _running_partial_result_tasks[writer] + + writer.close() + await writer.wait_closed() + + # Schedule auto-stop if no clients remain. + if not _connected_clients: + _auto_stop_task = asyncio.create_task(_schedule_auto_stop()) + + +# --------------------------------------------------------------------------- +# Server lifecycle +# --------------------------------------------------------------------------- + + +def _find_free_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("127.0.0.1", 0)) + return s.getsockname()[1] + + +async def start( + ws_context: context.WorkspaceContext, + port_file: pathlib.Path | None = None, + disconnect_timeout: int = DISCONNECT_TIMEOUT_SECONDS, +) -> None: + """Start the FineCode API TCP server and write the discovery file. + + Args: + ws_context: Shared workspace context. + port_file: Path to write the listening port to. Defaults to the shared + discovery file (``_cache_dir() / "wm_port"``). Pass a custom path + when starting a dedicated instance so it does not overwrite the shared + server's discovery file. + disconnect_timeout: Seconds to wait after the last client disconnects + before shutting down. Defaults to DISCONNECT_TIMEOUT_SECONDS (30). + """ + global _server, _discovery_file, _no_client_timeout_task, _had_client, _disconnect_timeout + _had_client = False + _disconnect_timeout = disconnect_timeout + port = _find_free_port() + + _server = await asyncio.start_server( + lambda r, w: _handle_client(r, w, ws_context), + host="127.0.0.1", + port=port, + ) + + # Write discovery file so clients can find us. + _discovery_file = port_file if port_file is not None else discovery_file_path() + _discovery_file.parent.mkdir(parents=True, exist_ok=True) + _discovery_file.write_text(str(port)) + + logger.info(f"FineCode WM server listening on 127.0.0.1:{port}") + logger.info(f"Discovery file: {_discovery_file}") + + # Shut down if no client connects within the timeout. + _no_client_timeout_task = asyncio.create_task(_no_client_timeout()) + + try: + async with _server: + await _server.serve_forever() + finally: + stop() + # Clean up workspace resources (runners, IO thread). + from finecode.wm_server.services import shutdown_service + shutdown_service.on_shutdown(ws_context) + + +def stop() -> None: + """Stop the WM server and remove the discovery file.""" + global _server, _discovery_file + + if _server is not None: + _server.close() + _server = None + + if _discovery_file is not None and _discovery_file.exists(): + try: + _discovery_file.unlink() + logger.trace(f"Removed API discovery file: {_discovery_file}") + except OSError: + pass + _discovery_file = None + + # Cancel any running partial result tasks + for tasks in _running_partial_result_tasks.values(): + for task in tasks: + task.cancel() + _running_partial_result_tasks.clear() + + +# --------------------------------------------------------------------------- +# Standalone startup (with workspace initialization) +# --------------------------------------------------------------------------- + + +def _register_callbacks() -> None: + """Register runner_manager and user_messages callbacks that broadcast + server→client notifications.""" + from finecode import user_messages + from finecode.wm_server.runner import runner_manager + + async def on_project_changed(project: domain.Project) -> None: + _notify_all_clients("actions/treeChanged", { + "node": { + "node_id": str(project.dir_path), + "name": project.name, + "node_type": 1, + "status": project.status.name, + "subnodes": [], + }, + }) + + async def on_user_message(message: str, message_type: str) -> None: + _notify_all_clients("server/userMessage", { + "message": message, + "type": message_type.upper(), + }) + + runner_manager.project_changed_callback = on_project_changed + user_messages._notification_sender = on_user_message + + +async def start_standalone( + port_file: pathlib.Path | None = None, + disconnect_timeout: int = DISCONNECT_TIMEOUT_SECONDS, +) -> None: + """Start the WM server as a standalone process with its own WorkspaceContext. + + Args: + port_file: Optional custom path to write the listening port to. Used by + dedicated instances started via ``start_own_server()`` so they do not + overwrite the shared server's discovery file. + disconnect_timeout: Seconds to wait after the last client disconnects + before shutting down. + """ + ws_context = context.WorkspaceContext([]) + _register_callbacks() + await start(ws_context, port_file=port_file, disconnect_timeout=disconnect_timeout) diff --git a/tests/api/test_read_configs.py b/tests/api/test_read_configs.py deleted file mode 100644 index cfe7a6b5..00000000 --- a/tests/api/test_read_configs.py +++ /dev/null @@ -1,28 +0,0 @@ -from pathlib import Path - -import pytest - -from finecode.workspace_manager import context -from finecode.workspace_manager.config.read_configs import read_configs - - -@pytest.fixture -def nested_project_ws_context(): - ws_context = context.WorkspaceContext( - ws_dirs_pathes=[Path(__file__).parent.parent / "nested_package"] - ) - return ws_context - - -def test__read_configs__reads_py_packages_with_finecode( - nested_project_ws_context: context.WorkspaceContext, -): - read_configs(ws_context=nested_project_ws_context) - - ... - - -def test__read_configs__reads_py_packages_without_finecode(): ... - - -def test__read_configs__saves_raw_configs(): ... diff --git a/tests/conftest.py b/tests/conftest.py index 58747742..af7e4799 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,5 @@ import pytest -from .extension_runner.fixtures import runner_client_channel -from .workspace_manager.server.fixtures import client_channel - @pytest.fixture def anyio_backend(): diff --git a/tests/extension_runner/client/finecode/extension_runner/__init__.py b/tests/extension_runner/client/finecode/extension_runner/__init__.py deleted file mode 100644 index 5316be0e..00000000 --- a/tests/extension_runner/client/finecode/extension_runner/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass - -from modapp.client import BaseChannel -from modapp.models.dataclass import DataclassModel as BaseModel - - -class ExtensionRunnerServiceCls: - async def run_action( - self, channel: BaseChannel, request: RunActionRequest - ) -> RunActionResponse: - return await channel.send_unary_unary( - "/finecode.extension_runner.ExtensionRunnerService/RunAction", - request, - RunActionResponse, - ) - - async def update_config( - self, - channel: BaseChannel, - request: UpdateConfigRequest, - ) -> UpdateConfigResponse: - return await channel.send_unary_unary( - "/finecode.extension_runner.ExtensionRunnerService/UpdateConfig", - request, - UpdateConfigResponse, - ) - - -ExtensionRunnerService = ExtensionRunnerServiceCls() - - -@dataclass -class RunActionRequest(BaseModel): - action_name: str - apply_on: str # Path? - apply_on_text: str - - __modapp_path__ = "finecode.extension_runner.RunActionRequest" - - -@dataclass -class RunActionResponse(BaseModel): - result_text: str - - __modapp_path__ = "finecode.extension_runner.RunActionResponse" - - -@dataclass -class UpdateConfigRequest(BaseModel): - working_dir: str # Path? - config: dict[str, str] - - __modapp_path__ = "finecode.extension_runner.UpdateConfigRequest" - - -@dataclass -class UpdateConfigResponse(BaseModel): - __modapp_path__ = "finecode.extension_runner.UpdateConfigResponse" diff --git a/tests/extension_runner/fixtures.py b/tests/extension_runner/fixtures.py deleted file mode 100644 index 0f325330..00000000 --- a/tests/extension_runner/fixtures.py +++ /dev/null @@ -1,55 +0,0 @@ -import pytest -from modapp import Modapp -from modapp.channels.inmemory import InMemoryChannel -from modapp.client import Client -from modapp.converters.json import JsonConverter -from modapp.transports.inmemory import InMemoryTransport -from modapp.transports.inmemory_config import InMemoryTransportConfig - -import finecode.workspace_manager.main as workspace_manager_main -from finecode.extension_runner.api_routes import router -from finecode.extension_runner.api_routes import ws_context as global_ws_context - -pytestmark = pytest.mark.anyio - - -def _create_runner_app() -> Modapp: - app = Modapp( - set( - [ - InMemoryTransport( - config=InMemoryTransportConfig(), - converter=JsonConverter(), - ) - ], - ), - ) - - app.include_router(router) - return app - - -@pytest.fixture -async def runner_client_channel(): - app = _create_runner_app() - json_converter = JsonConverter() - try: - inmemory_transport = next( - transport - for transport in app.transports - if isinstance(transport, InMemoryTransport) - ) - except StopIteration as exception: - raise Exception( - "App configuration error. InMemory transport not found" - ) from exception - channel = InMemoryChannel(transport=inmemory_transport, converter=json_converter) - client = Client(channel=channel) - - await workspace_manager_main.start_in_ws_context(global_ws_context) - await app.run_async() - - try: - yield client.channel - finally: - app.stop() diff --git a/tests/extension_runner/test_run.py b/tests/extension_runner/test_run.py deleted file mode 100644 index 207db7f6..00000000 --- a/tests/extension_runner/test_run.py +++ /dev/null @@ -1,114 +0,0 @@ -from pathlib import Path - -import pytest - -from .client.finecode.extension_runner import ( - ExtensionRunnerService, - RunActionRequest, - RunActionResponse, - UpdateConfigRequest, -) - -pytestmark = pytest.mark.anyio - - -async def test__runs_existing_action(runner_client_channel): - list_ws_dir_path = Path(__file__).parent.parent / "list_ws" - cli_tool_dir_path = list_ws_dir_path / "cli_tool" - unformatted_src_path = cli_tool_dir_path / "cli_tool" / "unformatted.py" - update_config_request = UpdateConfigRequest( - working_dir=cli_tool_dir_path.as_posix(), config={} - ) - await ExtensionRunnerService.update_config( - runner_client_channel, update_config_request - ) - with open(unformatted_src_path, "r") as src_file: - src_content = src_file.read() - - request = RunActionRequest( - action_name="format", - apply_on=unformatted_src_path.as_posix(), - apply_on_text=src_content, - ) - - response = await ExtensionRunnerService.run_action( - channel=runner_client_channel, request=request - ) - - assert response == RunActionResponse( - result_text="""print("a") - - -print("b") -""" - ) - - -async def test__runs_existing_action_with_multiple_subactions(runner_client_channel): - list_ws_dir_path = Path(__file__).parent.parent / "list_ws" - cli_tool_dir_path = list_ws_dir_path / "cli_tool" - unformatted_src_path = ( - cli_tool_dir_path / "cli_tool" / "unformatted_with_imports.py" - ) - update_config_request = UpdateConfigRequest( - working_dir=cli_tool_dir_path.as_posix(), config={} - ) - await ExtensionRunnerService.update_config( - runner_client_channel, update_config_request - ) - with open(unformatted_src_path, "r") as src_file: - src_content = src_file.read() - - request = RunActionRequest( - action_name="format", - apply_on=unformatted_src_path.as_posix(), - apply_on_text=src_content, - ) - - response = await ExtensionRunnerService.run_action( - channel=runner_client_channel, request=request - ) - - assert response == RunActionResponse( - result_text="""import abc -import time -import typing - -print("a") - - -print("b") -""" - ) - - -async def test__runs_existing_action_from_preset(runner_client_channel): - list_ws_dir_path = Path(__file__).parent.parent / "list_ws" - ui_app_dir_path = list_ws_dir_path / "ui_app" - unformatted_src_path = ui_app_dir_path / "ui_app" / "unformatted.py" - update_config_request = UpdateConfigRequest( - working_dir=ui_app_dir_path.as_posix(), config={} - ) - await ExtensionRunnerService.update_config( - runner_client_channel, update_config_request - ) - with open(unformatted_src_path, "r") as src_file: - src_content = src_file.read() - - request = RunActionRequest( - action_name="format", - apply_on=unformatted_src_path.as_posix(), - apply_on_text=src_content, - ) - - response = await ExtensionRunnerService.run_action( - channel=runner_client_channel, request=request - ) - - assert response == RunActionResponse( - result_text="""print("a") - - -print("b") -""" - ) diff --git a/tests/workspace_manager/server/client/finecode/workspace_manager/__init__.py b/tests/workspace_manager/server/client/finecode/workspace_manager/__init__.py deleted file mode 100644 index e077757d..00000000 --- a/tests/workspace_manager/server/client/finecode/workspace_manager/__init__.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from enum import IntEnum - -from modapp.client import BaseChannel -from modapp.models.dataclass import DataclassModel - - -@dataclass -class BaseModel(DataclassModel): - __model_config__ = {**DataclassModel.__model_config__, "camelCase": True} - - -class WorkspaceManagerServiceCls: - async def list_actions( - self, channel: BaseChannel, request: ListActionsRequest - ) -> ListActionsResponse: - return await channel.send_unary_unary( - "/finecode.workspace_manager.WorkspaceManagerService/ListActions", - request, - ListActionsResponse, - ) - - async def add_workspace_dir( - self, channel: BaseChannel, request: AddWorkspaceDirRequest - ) -> AddWorkspaceDirResponse: - return await channel.send_unary_unary( - "/finecode.workspace_manager.WorkspaceManagerService/AddWorkspaceDir", - request, - AddWorkspaceDirResponse, - ) - - async def run_action( - self, channel: BaseChannel, request: RunActionRequest - ) -> RunActionResponse: - return await channel.send_unary_unary( - "/finecode.workspace_manager.WorkspaceManagerService/RunAction", - request, - RunActionResponse, - ) - - -WorkspaceManagerService = WorkspaceManagerServiceCls() - - -@dataclass -class ListActionsRequest(BaseModel): - parent_node_id: str - - __modapp_path__ = "finecode.workspace_manager.ListActionsRequest" - - -@dataclass -class ActionTreeNode(BaseModel): - node_id: str - name: str - subnodes: list[ActionTreeNode] - - class NodeType(IntEnum): - DIRECTORY = 0 - PROJECT = 1 - ACTION = 2 - PRESET = 3 - - __modapp_path__ = "finecode.workspace_manager.ActionTreeNode" - - -@dataclass -class ListActionsResponse(BaseModel): - nodes: list[ActionTreeNode] - - __modapp_path__ = "finecode.workspace_manager.ListActionsResponse" - - -@dataclass -class AddWorkspaceDirRequest(BaseModel): - dir_path: str - - __modapp_path__ = "finecode.workspace_manager.AddWorkspaceDirRequest" - - -@dataclass -class AddWorkspaceDirResponse(BaseModel): - __modapp_path__ = "finecode.workspace_manager.AddWorkspaceDirResponse" - - -@dataclass -class RunActionRequest(BaseModel): - action_node_id: str - apply_on: str # Path? - apply_on_text: str - - __modapp_path__ = "finecode.workspace_manager.RunActionRequest" - - -@dataclass -class RunActionResponse(BaseModel): - result_text: str - - __modapp_path__ = "finecode.workspace_manager.RunActionResponse" diff --git a/tests/workspace_manager/server/fixtures.py b/tests/workspace_manager/server/fixtures.py deleted file mode 100644 index 1a206dc4..00000000 --- a/tests/workspace_manager/server/fixtures.py +++ /dev/null @@ -1,55 +0,0 @@ -import pytest -from modapp import Modapp -from modapp.channels.inmemory import InMemoryChannel -from modapp.client import Client -from modapp.converters.json import JsonConverter -from modapp.transports.inmemory import InMemoryTransport -from modapp.transports.inmemory_config import InMemoryTransportConfig - -import finecode.workspace_manager.main as workspace_manager_main -from finecode.workspace_manager.server.api_routes import router -from finecode.workspace_manager.server.api_routes import ws_context as global_ws_context - -pytestmark = pytest.mark.anyio - - -def _create_manager_app() -> Modapp: - app = Modapp( - set( - [ - InMemoryTransport( - config=InMemoryTransportConfig(), - converter=JsonConverter(), - ) - ], - ), - ) - - app.include_router(router) - return app - - -@pytest.fixture -async def client_channel(): - app = _create_manager_app() - json_converter = JsonConverter() - try: - inmemory_transport = next( - transport - for transport in app.transports - if isinstance(transport, InMemoryTransport) - ) - except StopIteration as exception: - raise Exception( - "App configuration error. InMemory transport not found" - ) from exception - channel = InMemoryChannel(transport=inmemory_transport, converter=json_converter) - client = Client(channel=channel) - - await workspace_manager_main.start_in_ws_context(global_ws_context) - await app.run_async() - - try: - yield client.channel - finally: - app.stop() diff --git a/tests/workspace_manager/server/test_list_actions.py b/tests/workspace_manager/server/test_list_actions.py deleted file mode 100644 index 53b1ad2a..00000000 --- a/tests/workspace_manager/server/test_list_actions.py +++ /dev/null @@ -1,32 +0,0 @@ -from pathlib import Path - -from .client.finecode.workspace_manager import ( - AddWorkspaceDirRequest, - ListActionsRequest, - ListActionsResponse, - WorkspaceManagerService, -) - - -async def test__returns_correct_list(client_channel): - # ws dir 'list_ws': - # - project 'backend' - # - directory 'libraries' - # -- project 'domain' - # --- action - # --- preset - # ---- action - # ---- action - # - project 'cli_tool' - # -- local action - # -- action from project 'black' - list_ws_dir_path = Path(__file__).parent.parent / "list_ws" - add_ws_dir_request = AddWorkspaceDirRequest(dir_path=list_ws_dir_path.as_posix()) - await WorkspaceManagerService.add_workspace_dir(client_channel, add_ws_dir_request) - request = ListActionsRequest(parent_node_id="") - - response = await WorkspaceManagerService.list_actions( - channel=client_channel, request=request - ) - - assert response == ListActionsResponse(nodes=[]) diff --git a/tests/workspace_manager/server/test_run_action.py b/tests/workspace_manager/server/test_run_action.py deleted file mode 100644 index 1311d744..00000000 --- a/tests/workspace_manager/server/test_run_action.py +++ /dev/null @@ -1,117 +0,0 @@ -import asyncio -from pathlib import Path - -import pytest - -from .client.finecode.workspace_manager import ( - AddWorkspaceDirRequest, - ListActionsRequest, - RunActionRequest, - RunActionResponse, - WorkspaceManagerService, -) - -pytestmark = pytest.mark.anyio - - -async def test__runs_action_in_project(client_channel): - # workspace with single project - # TODO: move in fixture - list_ws_dir_path = Path(__file__).parent.parent.parent / "list_ws" - cli_tool_root_dir_path = list_ws_dir_path / "cli_tool" - unformatted_src_path = cli_tool_root_dir_path / "cli_tool" / "unformatted.py" - add_ws_dir_request = AddWorkspaceDirRequest( - dir_path=cli_tool_root_dir_path.as_posix() - ) - await WorkspaceManagerService.add_workspace_dir(client_channel, add_ws_dir_request) - - # workspace manager expects first list call to cache actions - request = ListActionsRequest(parent_node_id="") - await WorkspaceManagerService.list_actions(channel=client_channel, request=request) - - await asyncio.sleep(5) - - request = RunActionRequest( - action_node_id=f"{cli_tool_root_dir_path.as_posix()}::format", - apply_on=unformatted_src_path.as_posix(), - apply_on_text="", - ) - response = await WorkspaceManagerService.run_action( - channel=client_channel, request=request - ) - - assert response == RunActionResponse( - result_text="""print("a") - - -print("b") -""" - ) - - -async def test__runs_general_action_in_project(client_channel): - # workspace with single project - # TODO: move in fixture - list_ws_dir_path = Path(__file__).parent.parent.parent / "list_ws" - cli_tool_root_dir_path = list_ws_dir_path / "cli_tool" - unformatted_src_path = cli_tool_root_dir_path / "cli_tool" / "unformatted.py" - add_ws_dir_request = AddWorkspaceDirRequest( - dir_path=cli_tool_root_dir_path.as_posix() - ) - await WorkspaceManagerService.add_workspace_dir(client_channel, add_ws_dir_request) - - # workspace manager expects first list call to cache actions - request = ListActionsRequest(parent_node_id="") - await WorkspaceManagerService.list_actions(channel=client_channel, request=request) - - await asyncio.sleep(5) - - request = RunActionRequest( - action_node_id="format", - apply_on=unformatted_src_path.as_posix(), - apply_on_text="", - ) - response = await WorkspaceManagerService.run_action( - channel=client_channel, request=request - ) - - assert response == RunActionResponse( - result_text="""print("a") - - -print("b") -""" - ) - - -async def test__runs_action_in_one_of_projects(client_channel): - # workspace with multiple projects - # TODO: move in fixture - list_ws_dir_path = Path(__file__).parent.parent.parent / "list_ws" - cli_tool_root_dir_path = list_ws_dir_path / "cli_tool" - unformatted_src_path = cli_tool_root_dir_path / "cli_tool" / "unformatted.py" - add_ws_dir_request = AddWorkspaceDirRequest(dir_path=list_ws_dir_path.as_posix()) - await WorkspaceManagerService.add_workspace_dir(client_channel, add_ws_dir_request) - - # workspace manager expects first list call to cache actions - request = ListActionsRequest(parent_node_id="") - await WorkspaceManagerService.list_actions(channel=client_channel, request=request) - - # await asyncio.sleep(5) - - request = RunActionRequest( - action_node_id=f"{cli_tool_root_dir_path.as_posix()}::format", - apply_on=unformatted_src_path.as_posix(), - apply_on_text="", - ) - response = await WorkspaceManagerService.run_action( - channel=client_channel, request=request - ) - - assert response == RunActionResponse( - result_text="""print("a") - - -print("b") -""" - )