diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..5a225088a --- /dev/null +++ b/.dockerignore @@ -0,0 +1,65 @@ +# Include any files or directories that you don't want to be copied to your +# container here (e.g., local build artifacts, temporary files, etc.). +# +# For more help, visit the .dockerignore file reference guide at +# https://docs.docker.com/go/build-context-dockerignore/ + +**/.DS_Store +**/.classpath +**/.dockerignore +**/.env +**/.factorypath +**/.git +**/.gitignore +**/.idea +**/.project +**/.sts4-cache +**/.settings +**/.toolstarget +**/.vs +**/.vscode +**/.next +**/.cache +**/*.dbmdl +**/*.jfm +**/charts +**/docker-compose* +**/compose.y*ml +**/Dockerfile* +**/secrets.dev.yaml +**/values.dev.yaml +**/vendor +LICENSE +README.md +**/*.class +**/*.iml +**/*.ipr +**/*.iws +**/*.log +**/.apt_generated +**/.gradle +**/.gradletasknamecache +**/.nb-gradle +**/.springBeans +**/build +**/dist +**/gradle-app.setting +**/nbbuild +**/nbdist +**/nbproject/private +**/target +*.ctxt +.mtj.tmp +.mvn/timing.properties +buildNumber.properties +dependency-reduced-pom.xml +hs_err_pid* +pom.xml.next +pom.xml.releaseBackup +pom.xml.tag +pom.xml.versionsBackup +release.properties +replay_pid* +.github/ +ci/ +.github/ diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f968b4d70..48985289c 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -21,29 +21,65 @@ jobs: build: runs-on: ubuntu-latest + outputs: + alpine_server_image: ${{ steps.meta.outputs.alpine_server_image }} + ubuntu_server_image: ${{ steps.meta.outputs.ubuntu_server_image }} steps: - uses: actions/checkout@v4 - - name: Set up JDK - uses: actions/setup-java@v4 - with: - java-version: '17' - java-package: 'jdk+fx' - distribution: 'zulu' + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Set build metadata + id: meta + run: | + echo "alpine_server_image=oie-ci-server:alpine-temurin21-${GITHUB_SHA}" >> "$GITHUB_OUTPUT" + echo "ubuntu_server_image=oie-ci-server:ubuntu-temurin21-${GITHUB_SHA}" >> "$GITHUB_OUTPUT" + if [ "${GITHUB_REF}" = "refs/heads/main" ]; then + echo "ANT_BUILD_ARGS=" >> "$GITHUB_ENV" + else + echo "ANT_BUILD_ARGS=-DdisableSigning=true -Dcoverage=true" >> "$GITHUB_ENV" + fi + + - name: Build Alpine production image + run: | + docker buildx build \ + --progress=plain \ + --build-arg "ANT_BUILD_ARGS=${ANT_BUILD_ARGS}" \ + --cache-from type=gha,scope=oie-build \ + --cache-to type=gha,scope=oie-build,mode=max \ + --target jre-run \ + -t "${{ steps.meta.outputs.alpine_server_image }}" \ + --load \ + . - - name: Build OIE (signed) - if: github.ref == 'refs/heads/main' - working-directory: server - run: ant -f mirth-build.xml + - name: Build Ubuntu production image + run: | + docker buildx build \ + --progress=plain \ + --build-arg "ANT_BUILD_ARGS=${ANT_BUILD_ARGS}" \ + --cache-from type=gha,scope=oie-build \ + --cache-to type=gha,scope=oie-build,mode=max \ + --target jdk-run \ + -t "${{ steps.meta.outputs.ubuntu_server_image }}" \ + --load \ + . - - name: Build OIE (unsigned) - if: github.ref != 'refs/heads/main' - working-directory: server - run: ant -f mirth-build.xml -DdisableSigning=true -Dcoverage=true + - name: Export build artifacts and test results + run: | + rm -rf docker-build-output + docker buildx build \ + --progress=plain \ + --build-arg "ANT_BUILD_ARGS=${ANT_BUILD_ARGS}" \ + --cache-from type=gha,scope=oie-build \ + --cache-to type=gha,scope=oie-build,mode=max \ + --target build-output-export \ + --output type=local,dest=docker-build-output \ + . - name: Package distribution - run: tar czf openintegrationengine.tar.gz -C server/ setup --transform 's|^setup|openintegrationengine/|' + run: tar czf openintegrationengine.tar.gz -C docker-build-output/app/server setup --transform 's|^setup|openintegrationengine/|' - name: Create artifact uses: actions/upload-artifact@v4 @@ -51,12 +87,32 @@ jobs: name: oie-build path: openintegrationengine.tar.gz + - name: Save Alpine server image + run: docker save "${{ steps.meta.outputs.alpine_server_image }}" | gzip > oie-server-image-alpine-temurin21.tar.gz + + - name: Upload Alpine server image + uses: actions/upload-artifact@v4 + with: + name: oie-server-image-alpine-temurin21 + path: oie-server-image-alpine-temurin21.tar.gz + + - name: Save Ubuntu server image + run: docker save "${{ steps.meta.outputs.ubuntu_server_image }}" | gzip > oie-server-image-ubuntu-temurin21.tar.gz + + - name: Upload Ubuntu server image + uses: actions/upload-artifact@v4 + with: + name: oie-server-image-ubuntu-temurin21 + path: oie-server-image-ubuntu-temurin21.tar.gz + - name: Stage Test Results if: (!cancelled()) run: | mkdir -p aggregate-test-results - # Copy the directory structures - cp -r --parents */build/test-results aggregate-test-results/ + cp -r --parents docker-build-output/app/client/build/test-results aggregate-test-results/ + cp -r --parents docker-build-output/app/command/build/test-results aggregate-test-results/ + cp -r --parents docker-build-output/app/donkey/build/test-results aggregate-test-results/ + cp -r --parents docker-build-output/app/server/build/test-results aggregate-test-results/ - name: Upload Test Results if: (!cancelled()) @@ -66,3 +122,53 @@ jobs: path: | aggregate-test-results/**/*.xml + docker_smoke: + runs-on: ubuntu-latest + needs: build + strategy: + fail-fast: false + matrix: + configuration: + - alpine-temurin21-derby + - alpine-temurin21-mysql + - alpine-temurin21-postgres + - alpine-temurin21-sqlserver + - ubuntu-temurin21-derby + - ubuntu-temurin21-postgres + + steps: + - uses: actions/checkout@v4 + + - name: Download server image + uses: actions/download-artifact@v4 + with: + name: ${{ startsWith(matrix.configuration, 'ubuntu-') && 'oie-server-image-ubuntu-temurin21' || 'oie-server-image-alpine-temurin21' }} + + - name: Load server image + run: | + IMAGE_ARCHIVE="${{ startsWith(matrix.configuration, 'ubuntu-') && 'oie-server-image-ubuntu-temurin21.tar.gz' || 'oie-server-image-alpine-temurin21.tar.gz' }}" + gunzip -c "$IMAGE_ARCHIVE" | docker load + + - name: Build runner image + run: docker build --progress=plain -t oie-ci-runner:${{ github.sha }} ci/runner + + - name: Boot and tear down configuration + run: | + docker run --rm \ + --add-host host.docker.internal:host-gateway \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v "$GITHUB_WORKSPACE:/workspace" \ + oie-ci-runner:${{ github.sha }} \ + --workspace /workspace \ + --configuration "${{ matrix.configuration }}" \ + --server-image "${{ startsWith(matrix.configuration, 'ubuntu-') && needs.build.outputs.ubuntu_server_image || needs.build.outputs.alpine_server_image }}" \ + --results-root ci/test-results + + - name: Upload Docker Smoke Test Results + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 + with: + name: Test Results Docker Smoke - ${{ matrix.configuration }} + path: | + ci/test-results/**/*.xml + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..dd2b1fff8 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,107 @@ +# syntax=docker/dockerfile:1.19.0 +# SPDX-License-Identifier: MPL-2.0 +# SPDX-FileCopyrightText: 2025 Mitch Gaffigan + +# Stages: +# 1. Builder Stage: Compiles the application and resolves dependencies. Produces +# JAR files that can be deployed. +# 1a. Install dependencies +# 1b. Build the application +# 2. Runner Stage: Creates a lightweight image that runs the application using the JRE. + +FROM ubuntu:noble-20251013 AS builder +WORKDIR /app +# sdkman requires bash +SHELL ["/bin/bash", "-c"] +ARG ANT_BUILD_ARGS="-DdisableSigning=true" + +# Stage 1a: Install dependencies +# Install necessary tools +COPY .sdkmanrc . +RUN apt-get update\ + && apt-get install -y zip curl\ + && curl -s "https://get.sdkman.io?ci=true" | bash \ + && source "$HOME/.sdkman/bin/sdkman-init.sh" && sdk env install \ + && rm -rf /var/lib/apt/lists/* + +# Stage 1b: Build the application +# Copy the entire source tree (excluding .dockerignore files), and build +COPY . . +WORKDIR /app/server +RUN source "$HOME/.sdkman/bin/sdkman-init.sh" \ + && ANT_OPTS="-Dfile.encoding=UTF8" ant -f mirth-build.xml ${ANT_BUILD_ARGS} + +# Stage 1c: Present artifacts for export if not running within docker +FROM scratch AS build-output-export +COPY --from=builder /app/server/setup /app/server/setup/ +COPY --from=builder /app/client/build/test-results /app/client/build/test-results/ +COPY --from=builder /app/command/build/test-results /app/command/build/test-results/ +COPY --from=builder /app/donkey/build/test-results /app/donkey/build/test-results/ +COPY --from=builder /app/server/build/test-results /app/server/build/test-results/ + +########################################## +# +# Ubuntu JDK Image +# +########################################## + +FROM eclipse-temurin:21.0.9_10-jdk-noble AS jdk-run + +RUN groupadd engine \ + && usermod -l engine ubuntu \ + && adduser engine engine \ + && mkdir -p /opt/engine/appdata \ + && chown -R engine:engine /opt/engine + +WORKDIR /opt/engine +COPY --chown=engine:engine --from=builder \ + --exclude=cli-lib \ + --exclude=mirth-cli-launcher.jar \ + --exclude=mccommand \ + --exclude=manager-lib \ + --exclude=mirth-manager-launcher.jar \ + --exclude=mcmanager \ + /app/server/setup ./ + +VOLUME /opt/engine/appdata +VOLUME /opt/engine/custom-extensions +EXPOSE 8443 + +USER engine +ENTRYPOINT ["./configure-from-env"] +CMD ["./oieserver"] + +########################################## +# +# Alpine JRE Image +# +########################################## + +FROM eclipse-temurin:21.0.9_10-jre-alpine AS jre-run + +# Alpine does not include bash by default, so we install it +RUN apk add --no-cache bash +# useradd and groupadd are not available in Alpine +RUN addgroup -S engine \ + && adduser -S -g engine engine \ + && mkdir -p /opt/engine/appdata \ + && chown -R engine:engine /opt/engine + +WORKDIR /opt/engine +COPY --chown=engine:engine --from=builder \ + --exclude=cli-lib \ + --exclude=mirth-cli-launcher.jar \ + --exclude=mccommand \ + --exclude=manager-lib \ + --exclude=mirth-manager-launcher.jar \ + --exclude=mcmanager \ + /app/server/setup ./ + +VOLUME /opt/engine/appdata +VOLUME /opt/engine/custom-extensions + +EXPOSE 8443 + +USER engine +ENTRYPOINT ["./configure-from-env"] +CMD ["./oieserver"] diff --git a/ci/.gitignore b/ci/.gitignore new file mode 100644 index 000000000..afc2b7376 --- /dev/null +++ b/ci/.gitignore @@ -0,0 +1,2 @@ +test-results/ +__pycache__/ \ No newline at end of file diff --git a/ci/README.md b/ci/README.md new file mode 100644 index 000000000..ae441fd36 --- /dev/null +++ b/ci/README.md @@ -0,0 +1,230 @@ +# CI Test Design + +This directory defines a minimal integration test system for the Docker image produced by this repository. + +## Goals + +- Build the OIE server image from the in-tree `Dockerfile`. +- Build a dedicated CI runner image from `ci/runner/`. +- Run one or more docker compose configurations in parallel in CI. +- Wait for Docker healthchecks instead of scraping logs. +- Authenticate to the server over REST using `admin` / `admin`. +- Discover tests from the filesystem, not from a registry. +- Execute tests in lexicographic order. +- Keep the authoring model slim enough that most tests are just fixture files. + +## Directory Layout + +Suggested layout: + +```text +.github/workflows/ CI entrypoints +ci/ + README.md this design + runner/ dockerized Python runner + configurations/ compose files keyed by configuration name + tests/ filesystem-discovered test cases +``` + +Expected configuration names are the compose basenames without the file suffix, for example: + +- `alpine-temurin21-derby` +- `alpine-temurin21-mysql` +- `alpine-temurin21-postgres` +- `alpine-temurin21-mssql` + +Those names map directly to compose files under `ci/configurations/`, for example: + +- `ci/configurations/alpine-temurin21-derby.compose.yml` +- `ci/configurations/alpine-temurin21-postgres.compose.yml` + +No additional registry of configurations is planned. + +## CI Model + +The workflow is expected to do three things: + +1. Build the server image from the repository `Dockerfile` and tag it with a CI-unique tag. +2. Build the runner image from `ci/runner/Dockerfile`. +3. Launch one job per configuration so configurations can run in parallel. + +The workflow passes these inputs into the runner container: + +- the server image tag to inject into the selected compose file +- the configuration name to execute +- the tests root, defaulting to `ci/tests` + +The runner is a consumer of images, not the component that builds them. + +## Compose Contract + +Each compose file under `ci/configurations/` defines one test environment. + +Conventions are preferred over options: + +- The OIE service is named `oie`. +- The compose file references a server image that the runner rewrites to the CI-built image tag before startup. +- Dependencies such as MySQL or PostgreSQL declare Docker healthchecks. +- The OIE service declares a Docker healthcheck that represents API readiness, not just process start. + +The runner relies on `docker compose up -d` plus Compose health state. It should not parse container logs for readiness. + +If the current image does not already expose a suitable health endpoint, we will add one in the server later. No server changes are part of this design doc. + +## Runner Contract + +The runner is a small Python application packaged as a Docker image. + +Responsibilities: + +- select one configuration +- materialize the effective compose file with the CI-built OIE image tag +- boot the compose stack +- wait until all required services are healthy +- authenticate to the OIE REST API with `admin` / `admin` +- discover applicable tests under `ci/tests` +- run them in lexicographic order +- collect failures with enough detail to debug fixture mismatches +- always tear down the compose stack unless explicitly running in a local keep-alive mode + +Non-goals for the first iteration: + +- pluggable auth strategies +- dynamic service-name discovery +- per-test custom compose overrides +- a large assertion DSL +- server-side test helpers unless proven necessary + +## Test Discovery + +Tests live under `ci/tests/` and are discovered recursively. + +A test directory is any directory under `ci/tests/`. A test directory must contain at +least one recognized test, such as `channels/` or `test.py`. + +Tests run in lexicographic order by relative path. This keeps execution deterministic without extra metadata. + +The runner may support optional filtering by path later, but the default behavior is full discovery. + +## Configuration Filtering + +A test directory may contain a file named `configurations`. + +Rules: + +- The file contains newline-separated configuration names. +- Blank lines are ignored. +- A missing `configurations` file means the test runs in all configurations. + +This is the only planned per-test targeting mechanism. + +## Authentication + +The runner authenticates using the existing REST login endpoint. + +Initial contract: + +- username: `admin` +- password: `admin` +- transport: HTTPS + +The runner stores the authenticated session and passes a connected client object into any Python hook class loaded from a test directory. + +## Test Execution Phases + +Each test executes in this order: + +1. `startup` +2. deploy channels from `channels//channel.xml` +3. `postDeploy` +4. send messages from fixtures, assert results after each message +5. `postRun` +6. assertions +7. `teardown` + +Hook methods are optional and come from the test directory's `test.py` file if present. + +The initial hook surface is: + +- `startup` +- `postDeploy` +- `postRun` +- `teardown` + +These hooks are intended for narrow gaps that fixture-only tests cannot cover. + +## Message Fixtures + +Within a test channel directory, message fixtures are grouped by message name. + +Example: + +```text +channels/ + 01-gives-response/ + channel.xml + messages/ + 01-verify-date/ + source + source_metadata.yml + source_status + dest01 + dest01_metadata.yml + dest01_status + assertion.py + 02-verify-foo/ + ... +``` + +Test fixture are split into the original data: + +- `source` is the payload sent into the deployed channel. +- `source_sourcemap.yml` is the metadata sent with the source payload. (Optional) + +And the optional assertions: + +- `source_metadata.yml` is a dictionary of assertions for the source message metadata after submission. +- `source_status` is the asserted status enumeration text for the source message after submission. +- `source_response` is the asserted response payload if the channel gives a response to the source submission. +- `source_transformed` is the asserted transformed source payload if the channel transforms the message before delivery. +- `dest01_transformed` asserts byte-identical transformed content for destination 1 if the channel transforms the message before delivery. +- `dest01` asserts byte-identical sent content for destination 1. +- `dest01_response` asserts byte-identical response content if destination 1 gives a response to the message delivery. +- `dest01_metadata.yml` is a dictionary of assertions for destination 1 metadata. +- `dest01_status` asserts the status of the message at destination 1. +- `dest02*` repeats the same pattern for destination 2, and so on. +- `assertion.py` contains custom assertions for the message. + +Any function in `assertion.py` named `test_*` is automatically discovered and executed by +the runner after any fixture-based assertions. The function receives the authenticated +REST client and test results as arguments, and can execute arbitrary logic and assertions. + +The assertions may include the byte string `((ANY))` as a wildcard to ignore content that +is not relevant to the test case. This is useful for fields like timestamps or IDs that +are expected to change on each run. `((ANY))` behaves as regex `.*?`. + +## Channel Fixtures + +`channels/` contains exported channel XML files that the runner deploys before message execution. + +Initial assumptions: + +- files are deployed in lexicographic order +- deployment errors fail the current test immediately +- the runner removes or undeploys test channels during teardown + +## Local Developer Flow + +Local development should mirror CI closely. + +- runtests.sh: a shell entrypoint for macOS/Linux +- runtests.ps1: a PowerShell entrypoint for Windows + +Those scripts should do the same high-level steps as workflow yml's for +use by a developer running tests locally: + +1. build the server image +2. build the runner image +3. execute one configuration or all configurations + +The script layer should stay thin and delegate all real logic to the runner container. diff --git a/ci/configurations/alpine-temurin21-derby.compose.yml b/ci/configurations/alpine-temurin21-derby.compose.yml new file mode 100644 index 000000000..178d403ca --- /dev/null +++ b/ci/configurations/alpine-temurin21-derby.compose.yml @@ -0,0 +1,13 @@ +services: + oie: + image: ${OIE_IMAGE} + ports: + - "8443:8443" + healthcheck: + test: + - CMD-SHELL + - bash -lc 'exec 3<>/dev/tcp/127.0.0.1/8443' + interval: 10s + timeout: 5s + retries: 30 + start_period: 40s diff --git a/ci/configurations/alpine-temurin21-mysql.compose.yml b/ci/configurations/alpine-temurin21-mysql.compose.yml new file mode 100644 index 000000000..53e1211e2 --- /dev/null +++ b/ci/configurations/alpine-temurin21-mysql.compose.yml @@ -0,0 +1,40 @@ +services: + db: + image: mariadb:11.8 + environment: + MARIADB_ROOT_PASSWORD: root_password + MARIADB_DATABASE: mirthdb + MARIADB_USER: mirthdb + MARIADB_PASSWORD: mirthdb + healthcheck: + test: + - CMD-SHELL + - mariadb-admin ping -h 127.0.0.1 -umirthdb -pmirthdb + interval: 10s + timeout: 5s + retries: 20 + start_period: 20s + + oie: + image: ${OIE_IMAGE} + depends_on: + db: + condition: service_healthy + environment: + DATABASE: mysql + DATABASE_URL: jdbc:mysql://db:3306/mirthdb + DATABASE_USERNAME: mirthdb + DATABASE_PASSWORD: mirthdb + DATABASE_MAX_CONNECTIONS: 20 + DATABASE_MAX_RETRY: 10 + DATABASE_RETRY_WAIT: 5000 + ports: + - "8443:8443" + healthcheck: + test: + - CMD-SHELL + - bash -lc 'exec 3<>/dev/tcp/127.0.0.1/8443' + interval: 10s + timeout: 5s + retries: 30 + start_period: 40s diff --git a/ci/configurations/alpine-temurin21-postgres.compose.yml b/ci/configurations/alpine-temurin21-postgres.compose.yml new file mode 100644 index 000000000..dfd8fd479 --- /dev/null +++ b/ci/configurations/alpine-temurin21-postgres.compose.yml @@ -0,0 +1,39 @@ +services: + db: + image: postgres:18.3-alpine + environment: + POSTGRES_USER: mirthdb + POSTGRES_PASSWORD: mirthdb + POSTGRES_DB: mirthdb + healthcheck: + test: + - CMD-SHELL + - pg_isready -U mirthdb -d mirthdb + interval: 10s + timeout: 5s + retries: 15 + start_period: 5s + + oie: + image: ${OIE_IMAGE} + depends_on: + db: + condition: service_healthy + environment: + DATABASE: postgres + DATABASE_URL: jdbc:postgresql://db:5432/mirthdb + DATABASE_USERNAME: mirthdb + DATABASE_PASSWORD: mirthdb + DATABASE_MAX_CONNECTIONS: 20 + DATABASE_MAX_RETRY: 10 + DATABASE_RETRY_WAIT: 5000 + ports: + - "8443:8443" + healthcheck: + test: + - CMD-SHELL + - bash -lc 'exec 3<>/dev/tcp/127.0.0.1/8443' + interval: 10s + timeout: 5s + retries: 30 + start_period: 40s diff --git a/ci/configurations/alpine-temurin21-sqlserver.compose.yml b/ci/configurations/alpine-temurin21-sqlserver.compose.yml new file mode 100644 index 000000000..e0a4a3ddd --- /dev/null +++ b/ci/configurations/alpine-temurin21-sqlserver.compose.yml @@ -0,0 +1,54 @@ +services: + db: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2025-latest + environment: + ACCEPT_EULA: Y + MSSQL_PID: Developer + MSSQL_SA_PASSWORD: OieSqlServerPassw0rd! + healthcheck: + test: + - CMD-SHELL + - /opt/mssql-tools18/bin/sqlcmd -C -S 127.0.0.1 -U sa -P "$$MSSQL_SA_PASSWORD" -Q "SELECT 1" || /opt/mssql-tools/bin/sqlcmd -S 127.0.0.1 -U sa -P "$$MSSQL_SA_PASSWORD" -Q "SELECT 1" + interval: 10s + timeout: 5s + retries: 30 + start_period: 30s + + db-init: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2025-latest + depends_on: + db: + condition: service_healthy + environment: + MSSQL_SA_PASSWORD: OieSqlServerPassw0rd! + restart: "no" + entrypoint: + - /bin/bash + - -lc + - /opt/mssql-tools18/bin/sqlcmd -C -S db -U sa -P "$$MSSQL_SA_PASSWORD" -Q "IF DB_ID('mirthdb') IS NULL CREATE DATABASE mirthdb;" || /opt/mssql-tools/bin/sqlcmd -S db -U sa -P "$$MSSQL_SA_PASSWORD" -Q "IF DB_ID('mirthdb') IS NULL CREATE DATABASE mirthdb;" + + oie: + image: ${OIE_IMAGE} + depends_on: + db-init: + condition: service_completed_successfully + environment: + DATABASE: sqlserver + DATABASE_URL: jdbc:jtds:sqlserver://db:1433/mirthdb + DATABASE_USERNAME: sa + DATABASE_PASSWORD: OieSqlServerPassw0rd! + DATABASE_MAX_CONNECTIONS: 20 + DATABASE_MAX_RETRY: 20 + DATABASE_RETRY_WAIT: 5000 + ports: + - "8443:8443" + healthcheck: + test: + - CMD-SHELL + - bash -lc 'exec 3<>/dev/tcp/127.0.0.1/8443' + interval: 10s + timeout: 5s + retries: 30 + start_period: 40s diff --git a/ci/configurations/ubuntu-temurin21-derby.compose.yml b/ci/configurations/ubuntu-temurin21-derby.compose.yml new file mode 100644 index 000000000..178d403ca --- /dev/null +++ b/ci/configurations/ubuntu-temurin21-derby.compose.yml @@ -0,0 +1,13 @@ +services: + oie: + image: ${OIE_IMAGE} + ports: + - "8443:8443" + healthcheck: + test: + - CMD-SHELL + - bash -lc 'exec 3<>/dev/tcp/127.0.0.1/8443' + interval: 10s + timeout: 5s + retries: 30 + start_period: 40s diff --git a/ci/configurations/ubuntu-temurin21-postgres.compose.yml b/ci/configurations/ubuntu-temurin21-postgres.compose.yml new file mode 100644 index 000000000..dfd8fd479 --- /dev/null +++ b/ci/configurations/ubuntu-temurin21-postgres.compose.yml @@ -0,0 +1,39 @@ +services: + db: + image: postgres:18.3-alpine + environment: + POSTGRES_USER: mirthdb + POSTGRES_PASSWORD: mirthdb + POSTGRES_DB: mirthdb + healthcheck: + test: + - CMD-SHELL + - pg_isready -U mirthdb -d mirthdb + interval: 10s + timeout: 5s + retries: 15 + start_period: 5s + + oie: + image: ${OIE_IMAGE} + depends_on: + db: + condition: service_healthy + environment: + DATABASE: postgres + DATABASE_URL: jdbc:postgresql://db:5432/mirthdb + DATABASE_USERNAME: mirthdb + DATABASE_PASSWORD: mirthdb + DATABASE_MAX_CONNECTIONS: 20 + DATABASE_MAX_RETRY: 10 + DATABASE_RETRY_WAIT: 5000 + ports: + - "8443:8443" + healthcheck: + test: + - CMD-SHELL + - bash -lc 'exec 3<>/dev/tcp/127.0.0.1/8443' + interval: 10s + timeout: 5s + retries: 30 + start_period: 40s diff --git a/ci/runner/Dockerfile b/ci/runner/Dockerfile new file mode 100644 index 000000000..e5746fa41 --- /dev/null +++ b/ci/runner/Dockerfile @@ -0,0 +1,8 @@ +FROM docker:27-cli + +RUN apk add --no-cache bash curl python3 py3-lxml py3-yaml + +WORKDIR /app +COPY *.py /app/ + +ENTRYPOINT ["python3", "/app/run.py"] diff --git a/ci/runner/api.py b/ci/runner/api.py new file mode 100644 index 000000000..0c2998646 --- /dev/null +++ b/ci/runner/api.py @@ -0,0 +1,163 @@ +import importlib +import ssl +import urllib.parse +import urllib.error +import urllib.request +from http.cookiejar import CookieJar +from xml.etree.ElementTree import Element, SubElement, tostring + +REQUESTED_WITH_HEADER = "OpenIntegrationEngine-CI" +MAX_REQUEST_TIMEOUT_SECONDS = 15 + + +class ApiClient: + def __init__(self, base_url: str): + self.base_url = base_url.rstrip("/") + self.opener = build_opener() + + def request( + self, + path: str, + method: str = "GET", + data: bytes | None = None, + content_type: str | None = None, + accept: str = "application/xml", + timeout: int = MAX_REQUEST_TIMEOUT_SECONDS, + ) -> tuple[int, str]: + headers = { + "Accept": accept, + "X-Requested-With": REQUESTED_WITH_HEADER, + } + if content_type is not None: + headers["Content-Type"] = content_type + + request = urllib.request.Request( + f"{self.base_url}{path}", + data=data, + method=method, + headers=headers, + ) + + try: + with self.opener.open(request, timeout=timeout) as response: + body = response.read().decode("utf-8", errors="replace") + return response.status, body + except urllib.error.HTTPError as error: + body = error.read().decode("utf-8", errors="replace") + raise RuntimeError(f"HTTP {error.code} for {method} {path}: {body}") from error + except urllib.error.URLError as error: + raise RuntimeError(f"Request failed for {method} {path}: {error}") from error + + def create_channel(self, channel_xml: bytes) -> None: + self.request( + "/api/channels/", + method="POST", + data=channel_xml, + content_type="application/xml", + accept="*/*", + ) + + def deploy_channel(self, channel_id: str) -> None: + self.request(f"/api/channels/{channel_id}/_deploy", method="POST", accept="*/*") + + def get_channel_status(self, channel_id: str): + _, body = self.request( + f"/api/channels/{channel_id}/status", + accept="application/xml", + ) + return parse_xml(body) + + def undeploy_channel(self, channel_id: str) -> None: + self.request(f"/api/channels/{channel_id}/_undeploy", method="POST", accept="*/*") + + def remove_channel(self, channel_id: str) -> None: + self.request(f"/api/channels/{channel_id}", method="DELETE", accept="*/*") + + def process_message(self, channel_id: str, raw_data: str, source_map: dict[str, object] | None = None) -> int: + raw_message_xml = build_raw_message_xml(raw_data, source_map or {}) + _, body = self.request( + f"/api/channels/{channel_id}/messagesWithObj", + method="POST", + data=raw_message_xml, + content_type="application/xml", + accept="application/xml", + timeout=MAX_REQUEST_TIMEOUT_SECONDS, + ) + return parse_xml(body) + + def get_message_content(self, channel_id: str, message_id: int, meta_data_ids: list[int] | None = None): + query = "" + if meta_data_ids: + query = "?" + urllib.parse.urlencode([("metaDataId", meta_data_id) for meta_data_id in meta_data_ids]) + _, body = self.request( + f"/api/channels/{channel_id}/messages/{message_id}{query}", + accept="application/xml", + timeout=MAX_REQUEST_TIMEOUT_SECONDS, + ) + return parse_xml(body) + + def search_message(self, channel_id: str, message_id: int): + query = urllib.parse.urlencode( + { + "minMessageId": message_id, + "maxMessageId": message_id, + "includeContent": "true", + "offset": 0, + "limit": 1, + } + ) + _, body = self.request( + f"/api/channels/{channel_id}/messages?{query}", + accept="application/xml", + timeout=MAX_REQUEST_TIMEOUT_SECONDS, + ) + return parse_xml(body) + + +def parse_xml(body: str): + etree = importlib.import_module("lxml.etree") + return etree.fromstring(body.encode("utf-8")) + + +def build_raw_message_xml(raw_data: str, source_map: dict[str, object]) -> bytes: + raw_message = Element("com.mirth.connect.donkey.model.message.RawMessage") + SubElement(raw_message, "overwrite").text = "false" + SubElement(raw_message, "imported").text = "false" + SubElement(raw_message, "rawData").text = raw_data + + source_map_element = SubElement(raw_message, "sourceMap") + source_map_element.set("class", "linked-hash-map") + for key, value in source_map.items(): + if isinstance(value, dict): + raise RuntimeError(f"Nested source metadata is not supported for message submission: {key}") + entry = SubElement(source_map_element, "entry") + SubElement(entry, "string").text = str(key) + SubElement(entry, "string").text = str(value) + + SubElement(raw_message, "binary").text = "false" + return tostring(raw_message, encoding="utf-8", xml_declaration=True) + + +def build_opener() -> urllib.request.OpenerDirector: + cookie_jar = CookieJar() + ssl_context = ssl.create_default_context() + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + https_handler = urllib.request.HTTPSHandler(context=ssl_context) + return urllib.request.build_opener(https_handler, urllib.request.HTTPCookieProcessor(cookie_jar)) + + +def login_or_fail(base_url: str, username: str, password: str, timeout: int) -> ApiClient: + client = ApiClient(base_url) + payload = urllib.parse.urlencode({"username": username, "password": password}).encode("utf-8") + status, body = client.request( + "/api/users/_login", + method="POST", + data=payload, + content_type="application/x-www-form-urlencoded", + timeout=min(timeout, 15), + ) + if status == 200 and "SUCCESS" in body: + print("Authenticated successfully.", flush=True) + return client + raise RuntimeError(f"Unexpected login response: HTTP {status} body={body}") diff --git a/ci/runner/channeltests.py b/ci/runner/channeltests.py new file mode 100644 index 000000000..6670b77c6 --- /dev/null +++ b/ci/runner/channeltests.py @@ -0,0 +1,306 @@ +from dataclasses import dataclass, field +import importlib +import importlib.util +from pathlib import Path +import sys +import threading +import time +from types import ModuleType +from typing import Any + +from api import ApiClient +from junitxml import JUnitReport +from messagetests import MessageTestResult, run_message_tests_with_report + +CHANNEL_START_TIMEOUT_SECONDS = 15 +HOOK_TIMEOUT_SECONDS = 15 + + +@dataclass(frozen=True) +class ChannelFixture: + test_dir: Path + channel_dir: Path + channel_file: Path + channel_id: str + channel_name: str + + +@dataclass(frozen=True) +class ProvisionedChannel: + fixture: ChannelFixture + channel_id: str + + +@dataclass(frozen=True) +class TestRun: + test_dir: Path + channels: list[ChannelFixture] = field(default_factory=list) + + +@dataclass +class TestRunContext: + test_run: TestRun + provisioned_channels: list[ProvisionedChannel] = field(default_factory=list) + message_results: list[MessageTestResult] = field(default_factory=list) + + +def resolve_tests_root(workspace: str, tests_root: str) -> Path: + candidate = Path(tests_root) + if candidate.is_absolute(): + return candidate + return Path(workspace) / candidate + + +def parse_channel_fixture(channel_file: Path) -> ChannelFixture: + channel_id = parse_channel_id(channel_file) + return ChannelFixture( + test_dir=channel_file.parents[2], + channel_dir=channel_file.parent, + channel_file=channel_file, + channel_id=channel_id, + channel_name=channel_file.parent.name, + ) + + +def parse_channel_id(channel_file: Path) -> str: + etree = importlib.import_module("lxml.etree") + document = etree.parse(str(channel_file)) + channel_id = document.getroot().findtext("id") + if channel_id is None or not channel_id.strip(): + raise RuntimeError(f"Channel fixture is missing id: {channel_file}") + return channel_id.strip() + + +def test_runs_for_configuration(test_dir: Path, configuration: str) -> bool: + configurations_file = test_dir / "configurations" + if not configurations_file.exists(): + return True + + configurations = [line.strip() for line in configurations_file.read_text(encoding="utf-8").splitlines() if line.strip()] + return configuration in configurations + + +def discover_channels(tests_root: Path, configuration: str) -> list[ChannelFixture]: + if not tests_root.exists(): + return [] + + fixtures: list[ChannelFixture] = [] + for channel_file in sorted(tests_root.glob("**/channels/*/channel.xml")): + fixture = parse_channel_fixture(channel_file) + if test_runs_for_configuration(fixture.test_dir, configuration): + fixtures.append(fixture) + return fixtures + + +def discover_test_runs(tests_root: Path, configuration: str) -> list[TestRun]: + runs_by_dir: dict[Path, list[ChannelFixture]] = {} + for fixture in discover_channels(tests_root, configuration): + runs_by_dir.setdefault(fixture.test_dir, []).append(fixture) + + return [TestRun(test_dir=test_dir, channels=sorted(fixtures, key=lambda fixture: fixture.channel_dir)) for test_dir, fixtures in sorted(runs_by_dir.items())] + + +def run_channel_tests( + client: ApiClient, + test_runs: list[TestRun], + timeout_seconds: int, + report: JUnitReport, + keep_alive: bool = False, +) -> list[MessageTestResult]: + all_message_results: list[MessageTestResult] = [] + + for test_run in test_runs: + context = TestRunContext(test_run=test_run) + hooks = load_test_hooks(test_run.test_dir) + print(f"Running test {test_run.test_dir.name}", flush=True) + try: + run_reported_hook(report, test_run, "startup", hooks, client, context) + context.provisioned_channels = deploy_channel_fixtures_with_report(report, test_run, client, test_run.channels) + run_reported_hook(report, test_run, "postDeploy", hooks, client, context) + context.message_results = run_message_tests_with_report(client, context.provisioned_channels, timeout_seconds, report) + run_reported_hook(report, test_run, "postRun", hooks, client, context) + all_message_results.extend(context.message_results) + finally: + if keep_alive: + print(f"Preserving deployed test state for {test_run.test_dir.name} because keep-alive is enabled.", flush=True) + else: + try: + run_reported_hook(report, test_run, "teardown", hooks, client, context) + finally: + cleanup_channel_tests(client, context.provisioned_channels) + + return all_message_results + + +def deploy_channel_fixtures(client: ApiClient, channel_fixtures: list[ChannelFixture]) -> list[ProvisionedChannel]: + provisioned_channels: list[ProvisionedChannel] = [] + + for fixture in channel_fixtures: + print(f"Creating channel {fixture.channel_name} from {fixture.channel_file}", flush=True) + client.create_channel(fixture.channel_file.read_bytes()) + print(f"Deploying channel {fixture.channel_name} ({fixture.channel_id})", flush=True) + client.deploy_channel(fixture.channel_id) + wait_for_channel_started(client, fixture) + provisioned_channels.append(ProvisionedChannel(fixture=fixture, channel_id=fixture.channel_id)) + + return provisioned_channels + + +def deploy_channel_fixtures_with_report( + report: JUnitReport, + test_run: TestRun, + client: ApiClient, + channel_fixtures: list[ChannelFixture], +) -> list[ProvisionedChannel]: + provisioned_channels: list[ProvisionedChannel] = [] + + for fixture in channel_fixtures: + testcase_name = f"{test_run.test_dir.name}/{fixture.channel_name}/deploy" + classname = f"{test_run.test_dir.name}.{fixture.channel_name}" + provisioned_channels.append( + report.run_case( + testcase_name, + classname, + lambda fixture=fixture: deploy_single_channel_fixture(client, fixture), + ) + ) + + return provisioned_channels + + +def deploy_single_channel_fixture(client: ApiClient, fixture: ChannelFixture) -> ProvisionedChannel: + print(f"Creating channel {fixture.channel_name} from {fixture.channel_file}", flush=True) + client.create_channel(fixture.channel_file.read_bytes()) + print(f"Deploying channel {fixture.channel_name} ({fixture.channel_id})", flush=True) + client.deploy_channel(fixture.channel_id) + wait_for_channel_started(client, fixture) + return ProvisionedChannel(fixture=fixture, channel_id=fixture.channel_id) + + +def wait_for_channel_started(client: ApiClient, fixture: ChannelFixture, timeout_seconds: int = CHANNEL_START_TIMEOUT_SECONDS) -> None: + deadline = time.monotonic() + timeout_seconds + last_state: str | None = None + + while time.monotonic() < deadline: + status_xml = client.get_channel_status(fixture.channel_id) + state = dashboard_status_state(status_xml) + if state == "STARTED": + return + last_state = state + time.sleep(1) + + raise RuntimeError( + f"Timed out waiting for channel {fixture.channel_name} ({fixture.channel_id}) to start; last state was {last_state}" + ) + + +def dashboard_status_state(status_xml) -> str | None: + values = status_xml.xpath("./state/text()") + if not values: + return None + return str(values[0]).strip() + + +def deploy_channel_tests(client: ApiClient, channel_fixtures: list[ChannelFixture]) -> list[ProvisionedChannel]: + return deploy_channel_fixtures(client, channel_fixtures) + + +def cleanup_channel_tests(client: ApiClient, provisioned_channels: list[ProvisionedChannel]) -> None: + for provisioned in reversed(provisioned_channels): + safe_cleanup_channel("undeploy", undeploy_channel, client, provisioned) + + for provisioned in reversed(provisioned_channels): + safe_cleanup_channel("remove", remove_channel, client, provisioned) + + +def undeploy_channel(client: ApiClient, provisioned: ProvisionedChannel) -> None: + print(f"Undeploying channel {provisioned.fixture.channel_name} ({provisioned.channel_id})", flush=True) + client.undeploy_channel(provisioned.channel_id) + + +def remove_channel(client: ApiClient, provisioned: ProvisionedChannel) -> None: + print(f"Removing channel {provisioned.fixture.channel_name} ({provisioned.channel_id})", flush=True) + client.remove_channel(provisioned.channel_id) + + +def safe_cleanup_channel(action: str, cleanup, client: ApiClient, provisioned: ProvisionedChannel) -> None: + try: + cleanup(client, provisioned) + except Exception as error: + print(f"Ignoring {action} failure for {provisioned.fixture.channel_name}: {error}", file=sys.stderr, flush=True) + + +def load_test_hooks(test_dir: Path) -> Any: + hook_file = test_dir / "test.py" + if not hook_file.exists(): + return None + + module = load_module_from_file(hook_file, f"test_hooks_{test_dir.name}") + hook_class = getattr(module, "Hooks", None) or getattr(module, "TestHooks", None) + if hook_class is not None: + return hook_class() + return module + + +def invoke_hook(hooks: Any, hook_name: str, client: ApiClient, context: TestRunContext) -> None: + if hooks is None: + return + + hook = getattr(hooks, hook_name, None) + if callable(hook): + invoke_callable(hook, client, context) + + +def run_reported_hook( + report: JUnitReport, + test_run: TestRun, + hook_name: str, + hooks: Any, + client: ApiClient, + context: TestRunContext, +) -> None: + if hooks is None: + return + + hook = getattr(hooks, hook_name, None) + if not callable(hook): + return + + testcase_name = f"{test_run.test_dir.name}/{hook_name}" + classname = f"{test_run.test_dir.name}.hooks" + report.run_case(testcase_name, classname, lambda: invoke_callable(hook, client, context)) + + +def invoke_callable(callable_obj: Any, client: ApiClient, context: TestRunContext) -> None: + result: dict[str, BaseException | None] = {"error": None} + + def run_hook() -> None: + try: + try: + callable_obj(client, context) + except TypeError: + try: + callable_obj(client) + except TypeError: + callable_obj() + except BaseException as error: + result["error"] = error + + thread = threading.Thread(target=run_hook, daemon=True) + thread.start() + thread.join(HOOK_TIMEOUT_SECONDS) + + if thread.is_alive(): + raise RuntimeError(f"Hook timed out after {HOOK_TIMEOUT_SECONDS} seconds") + + if result["error"] is not None: + raise result["error"] + + +def load_module_from_file(module_file: Path, module_name: str) -> ModuleType: + spec = importlib.util.spec_from_file_location(module_name, module_file) + if spec is None or spec.loader is None: + raise RuntimeError(f"Unable to load module from {module_file}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module diff --git a/ci/runner/compose.py b/ci/runner/compose.py new file mode 100644 index 000000000..24ffa3e58 --- /dev/null +++ b/ci/runner/compose.py @@ -0,0 +1,55 @@ +import subprocess +import uuid +from pathlib import Path + +MAX_COMMAND_TIMEOUT_SECONDS = 90 + + +def sanitize_project_name(name: str) -> str: + filtered = "".join(character if character.isalnum() else "-" for character in name.lower()) + filtered = filtered.strip("-") or "oie-ci" + return f"oie-ci-{filtered}-{uuid.uuid4().hex[:8]}" + + +def run_command(command: list[str], env: dict[str, str], timeout_seconds: int = MAX_COMMAND_TIMEOUT_SECONDS) -> subprocess.CompletedProcess: + print(f"+ {' '.join(command)}", flush=True) + try: + return subprocess.run(command, env=env, check=False, timeout=timeout_seconds) + except subprocess.TimeoutExpired as error: + raise RuntimeError(f"Command timed out after {timeout_seconds} seconds: {' '.join(command)}") from error + + +def compose_up(compose_file: Path, project_name: str, env: dict[str, str], timeout: int) -> None: + command = [ + "docker", + "compose", + "-f", + str(compose_file), + "-p", + project_name, + "up", + "-d", + "--wait", + "--wait-timeout", + str(timeout), + ] + result = run_command(command, env, timeout_seconds=timeout) + if result.returncode != 0: + raise RuntimeError("docker compose up failed") + + +def compose_down(compose_file: Path, project_name: str, env: dict[str, str]) -> None: + command = [ + "docker", + "compose", + "-f", + str(compose_file), + "-p", + project_name, + "down", + "-v", + "--remove-orphans", + ] + result = run_command(command, env, timeout_seconds=MAX_COMMAND_TIMEOUT_SECONDS) + if result.returncode != 0: + raise RuntimeError("docker compose down failed") diff --git a/ci/runner/junitxml.py b/ci/runner/junitxml.py new file mode 100644 index 000000000..1220b1629 --- /dev/null +++ b/ci/runner/junitxml.py @@ -0,0 +1,95 @@ +from dataclasses import dataclass, field +from pathlib import Path +import time +import traceback +from typing import Callable, TypeVar +from xml.etree.ElementTree import Element, ElementTree, SubElement + +T = TypeVar("T") + + +@dataclass +class TestCaseResult: + name: str + classname: str + elapsed_seconds: float + failure_message: str | None = None + failure_text: str | None = None + + @property + def failed(self) -> bool: + return self.failure_message is not None + + +@dataclass +class JUnitReport: + suite_name: str + test_cases: list[TestCaseResult] = field(default_factory=list) + + def run_case(self, name: str, classname: str, func: Callable[[], T]) -> T: + started_at = time.monotonic() + try: + result = func() + except Exception as error: + self.test_cases.append( + TestCaseResult( + name=name, + classname=classname, + elapsed_seconds=time.monotonic() - started_at, + failure_message=str(error), + failure_text=traceback.format_exc(), + ) + ) + raise + + self.test_cases.append( + TestCaseResult( + name=name, + classname=classname, + elapsed_seconds=time.monotonic() - started_at, + ) + ) + return result + + def write_xml(self, results_file: Path) -> None: + results_file.parent.mkdir(parents=True, exist_ok=True) + + tests = len(self.test_cases) + failures = sum(1 for case in self.test_cases if case.failed) + elapsed = sum(case.elapsed_seconds for case in self.test_cases) + + testsuite = Element( + "testsuite", + { + "name": self.suite_name, + "tests": str(tests), + "failures": str(failures), + "errors": "0", + "skipped": "0", + "time": format_seconds(elapsed), + }, + ) + + for case in self.test_cases: + testcase = SubElement( + testsuite, + "testcase", + { + "name": case.name, + "classname": case.classname, + "time": format_seconds(case.elapsed_seconds), + }, + ) + if case.failed: + failure = SubElement( + testcase, + "failure", + {"message": case.failure_message or "Test failed"}, + ) + failure.text = case.failure_text or case.failure_message or "Test failed" + + ElementTree(testsuite).write(results_file, encoding="utf-8", xml_declaration=True) + + +def format_seconds(value: float) -> str: + return f"{value:.3f}" \ No newline at end of file diff --git a/ci/runner/main.py b/ci/runner/main.py new file mode 100644 index 000000000..19c3556b9 --- /dev/null +++ b/ci/runner/main.py @@ -0,0 +1,157 @@ +import argparse +import json +import os +import sys +from pathlib import Path + +from api import login_or_fail +from channeltests import ( + ChannelFixture, + discover_channels, + discover_test_runs, + resolve_tests_root, + run_channel_tests, +) +from compose import compose_down, compose_up, sanitize_project_name +from junitxml import JUnitReport + +MAX_OPERATION_TIMEOUT_SECONDS = 90 +DEFAULT_TIMEOUT_SECONDS = MAX_OPERATION_TIMEOUT_SECONDS +DEFAULT_BASE_URL = "https://host.docker.internal:8443" +DEFAULT_USERNAME = "admin" +DEFAULT_PASSWORD = "admin" +DEFAULT_TESTS_ROOT = "ci/tests" +DEFAULT_RESULTS_ROOT = "ci/test-results" + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Boot and tear down an OIE docker-compose test configuration.") + parser.add_argument("--workspace", default="/workspace", help="Workspace root containing ci/configurations.") + parser.add_argument("--configuration", help="Configuration name mapped to ci/configurations/.compose.yml.") + parser.add_argument("--compose-file", help="Explicit compose file path. Overrides --configuration.") + parser.add_argument("--server-image", required=True, help="Server image tag to inject into the compose environment.") + parser.add_argument("--base-url", default=DEFAULT_BASE_URL, help="Base URL used for readiness and login checks.") + parser.add_argument("--username", default=DEFAULT_USERNAME, help="REST username.") + parser.add_argument("--password", default=DEFAULT_PASSWORD, help="REST password.") + parser.add_argument("--tests-root", default=DEFAULT_TESTS_ROOT, help="Root directory containing test fixtures.") + parser.add_argument("--results-root", default=DEFAULT_RESULTS_ROOT, help="Directory where JUnit XML results are written.") + parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT_SECONDS, help="Timeout in seconds for any harness operation. Capped at 90 seconds.") + parser.add_argument("--keep-alive", action="store_true", help="Leave the compose stack running after a successful check.") + args = parser.parse_args() + args.timeout = normalize_timeout(args.timeout) + return args + + +def normalize_timeout(timeout_seconds: int) -> int: + return max(1, min(timeout_seconds, MAX_OPERATION_TIMEOUT_SECONDS)) + + +def resolve_compose_file(args: argparse.Namespace) -> Path: + if args.compose_file: + compose_file = Path(args.compose_file) + if not compose_file.is_absolute(): + compose_file = Path(args.workspace) / compose_file + return compose_file + + if not args.configuration: + raise ValueError("Either --configuration or --compose-file must be provided.") + + return Path(args.workspace) / "ci" / "configurations" / f"{args.configuration}.compose.yml" + + +def build_run_summary( + compose_file: Path, + config_name: str, + project_name: str, + server_image: str, + tests_root: Path, + channel_fixtures: list[ChannelFixture], + base_url: str, + keep_alive: bool, +) -> dict[str, object]: + return { + "compose_file": str(compose_file), + "configuration": config_name, + "project_name": project_name, + "server_image": server_image, + "tests_root": str(tests_root), + "channels": [str(fixture.channel_file.relative_to(tests_root)) for fixture in channel_fixtures], + "base_url": base_url, + "keep_alive": keep_alive, + } + + +def main() -> int: + args = parse_args() + compose_file = resolve_compose_file(args) + tests_root = resolve_tests_root(args.workspace, args.tests_root) + results_root = resolve_tests_root(args.workspace, args.results_root) + if not compose_file.exists(): + raise FileNotFoundError(f"Compose file not found: {compose_file}") + + config_name = args.configuration or compose_file.stem.replace(".compose", "") + test_runs = discover_test_runs(tests_root, config_name) + channel_fixtures = discover_channels(tests_root, config_name) + project_name = sanitize_project_name(config_name) + env = os.environ.copy() + env["OIE_IMAGE"] = args.server_image + report = JUnitReport(suite_name=config_name) + results_file = results_root / f"{config_name}.xml" + + print( + json.dumps( + build_run_summary( + compose_file, + config_name, + project_name, + args.server_image, + tests_root, + channel_fixtures, + args.base_url, + args.keep_alive, + ), + indent=2, + ), + flush=True, + ) + + compose_attempted = False + client = None + message_results = [] + teardown_error = None + try: + compose_attempted = True + report.run_case( + f"{config_name}/setup", + config_name, + lambda: compose_up(compose_file, project_name, env, args.timeout), + ) + client = report.run_case( + f"{config_name}/login", + config_name, + lambda: login_or_fail(args.base_url, args.username, args.password, args.timeout), + ) + message_results = run_channel_tests(client, test_runs, args.timeout, report, keep_alive=args.keep_alive) + + print( + f"Configuration boot completed. Ran {len(test_runs)} test(s) and validated {len(message_results)} message(s).", + flush=True, + ) + return 0 + finally: + if compose_attempted and not args.keep_alive: + try: + report.run_case( + f"{config_name}/teardown", + config_name, + lambda: compose_down(compose_file, project_name, env), + ) + except Exception as error: + print(f"Teardown failed: {error}", file=sys.stderr, flush=True) + teardown_error = error + + report.write_xml(results_file) + print(f"Wrote JUnit test results to {results_file}", flush=True) + + if teardown_error is not None: + raise RuntimeError("Compose teardown failed") from teardown_error diff --git a/ci/runner/messagetests.py b/ci/runner/messagetests.py new file mode 100644 index 000000000..3fe5cae5a --- /dev/null +++ b/ci/runner/messagetests.py @@ -0,0 +1,423 @@ +from dataclasses import dataclass +import importlib.util +from pathlib import Path +import re +import time +from types import ModuleType +from typing import TYPE_CHECKING, Any + +from api import ApiClient +from junitxml import JUnitReport + +if TYPE_CHECKING: + from channeltests import ProvisionedChannel + +ANY_WILDCARD = b"((ANY))" +PENDING_STATUSES = {"PENDING", "QUEUED"} + + +@dataclass(frozen=True) +class MessageFixture: + message_dir: Path + name: str + source_file: Path + source_sourcemap_file: Path | None + + +@dataclass(frozen=True) +class MessageTestResult: + provisioned_channel: "ProvisionedChannel" + fixture: MessageFixture + message_id: int + message_xml: Any + + def connector_message(self, meta_data_id: int): + connector_message = self.message_xml.xpath( + f"./connectorMessages/entry[int = '{meta_data_id}']/connectorMessage" + ) + if not connector_message: + raise AssertionError( + f"Message {self.message_id} is missing connector metadata id {meta_data_id} for {self.fixture.name}" + ) + return connector_message[0] + + def source_connector(self): + return self.connector_message(0) + + def destination_connector(self, destination_number: int): + return self.connector_message(destination_number) + + def xml_text(self) -> str: + etree = importlib.import_module("lxml.etree") + return etree.tostring(self.message_xml, encoding="unicode", pretty_print=True) + + +def run_message_tests(client: ApiClient, provisioned_channels: list["ProvisionedChannel"], timeout_seconds: int) -> list[MessageTestResult]: + results: list[MessageTestResult] = [] + + for provisioned_channel in provisioned_channels: + message_fixtures = discover_message_fixtures(provisioned_channel.fixture.channel_dir) + for fixture in message_fixtures: + results.append(run_message_test(client, provisioned_channel, fixture, timeout_seconds)) + + return results + + +def run_message_tests_with_report( + client: ApiClient, + provisioned_channels: list["ProvisionedChannel"], + timeout_seconds: int, + report: JUnitReport, +) -> list[MessageTestResult]: + results: list[MessageTestResult] = [] + + for provisioned_channel in provisioned_channels: + message_fixtures = discover_message_fixtures(provisioned_channel.fixture.channel_dir) + for fixture in message_fixtures: + testcase_name = f"{provisioned_channel.fixture.test_dir.name}/{provisioned_channel.fixture.channel_name}/{fixture.name}" + classname = f"{provisioned_channel.fixture.test_dir.name}.{provisioned_channel.fixture.channel_name}" + results.append( + report.run_case( + testcase_name, + classname, + lambda provisioned_channel=provisioned_channel, fixture=fixture: run_message_test( + client, + provisioned_channel, + fixture, + timeout_seconds, + ), + ) + ) + + return results + + +def discover_message_fixtures(channel_dir: Path) -> list[MessageFixture]: + messages_dir = channel_dir / "messages" + if not messages_dir.exists(): + return [] + + fixtures: list[MessageFixture] = [] + for message_dir in sorted(path for path in messages_dir.iterdir() if path.is_dir()): + source_file = message_dir / "source" + if not source_file.exists(): + raise RuntimeError(f"Message fixture is missing source payload: {message_dir}") + + source_sourcemap_file = message_dir / "source_sourcemap.yml" + fixtures.append( + MessageFixture( + message_dir=message_dir, + name=message_dir.name, + source_file=source_file, + source_sourcemap_file=source_sourcemap_file if source_sourcemap_file.exists() else None, + ) + ) + + return fixtures + + +def run_message_test( + client: ApiClient, + provisioned_channel: "ProvisionedChannel", + fixture: MessageFixture, + timeout_seconds: int, +) -> MessageTestResult: + print(f"Sending message {fixture.name} to channel {provisioned_channel.fixture.channel_name}", flush=True) + source_payload = fixture.source_file.read_text(encoding="utf-8") + source_sourcemap = load_yaml_mapping(fixture.source_sourcemap_file) + message_id = client.process_message(provisioned_channel.channel_id, source_payload, source_sourcemap) + result = wait_for_message_result(client, provisioned_channel, fixture, parse_xml_long(message_id), timeout_seconds) + print(f"Validated message {fixture.name} ({message_id})", flush=True) + return result + + +def wait_for_message_result( + client: ApiClient, + provisioned_channel: "ProvisionedChannel", + fixture: MessageFixture, + message_id: int, + timeout_seconds: int, +) -> MessageTestResult: + deadline = time.monotonic() + timeout_seconds + last_error: AssertionError | None = None + last_result: MessageTestResult | None = None + + while time.monotonic() < deadline: + message = message_element(client.search_message(provisioned_channel.channel_id, message_id)) + result = MessageTestResult( + provisioned_channel=provisioned_channel, + fixture=fixture, + message_id=message_id, + message_xml=message, + ) + last_result = result + + try: + validate_message_result(client, result) + return result + except AssertionError as error: + last_error = error + if is_terminal_message(result): + break + time.sleep(1) + + if last_error is not None: + detail = format_message_failure_detail(last_result) + raise AssertionError(f"Message fixture {fixture.message_dir} failed: {last_error}\n\n{detail}") from last_error + + detail = format_message_failure_detail(last_result) + if last_result is not None: + raise RuntimeError( + f"Timed out waiting for message {message_id} for fixture {fixture.message_dir}\n\n{detail}" + ) + raise RuntimeError(f"Timed out waiting for message {message_id} for fixture {fixture.message_dir}") + + +def validate_message_result(client: ApiClient, result: MessageTestResult) -> None: + validate_source_assertions(result) + validate_destination_assertions(result) + run_custom_assertions(client, result) + + +def validate_source_assertions(result: MessageTestResult) -> None: + source_connector = result.source_connector() + + source_metadata_file = result.fixture.message_dir / "source_metadata.yml" + if source_metadata_file.exists(): + expected_metadata = load_yaml_mapping(source_metadata_file) + actual_metadata = parse_metadata_assertion_map(source_connector) + assert_mapping_subset("source_metadata.yml", expected_metadata, actual_metadata) + + source_status_file = result.fixture.message_dir / "source_status" + if source_status_file.exists(): + expected_status = source_status_file.read_text(encoding="utf-8").strip() + actual_status = xpath_text(source_connector, "./status") + if actual_status != expected_status: + raise AssertionError(f"Expected source status {expected_status}, found {actual_status}") + + source_response_file = result.fixture.message_dir / "source_response" + if source_response_file.exists(): + assert_content_matches( + "source response", + source_response_file.read_bytes(), + response_payload_text(message_content_text(source_connector, "response")), + ) + + source_transformed_file = result.fixture.message_dir / "source_transformed" + if source_transformed_file.exists(): + assert_content_matches( + "source transformed", + source_transformed_file.read_bytes(), + message_content_text(source_connector, "transformed"), + ) + + +def validate_destination_assertions(result: MessageTestResult) -> None: + for path in sorted(result.fixture.message_dir.iterdir()): + name = path.name + if not path.is_file() or not name.startswith("dest"): + continue + + match = re.fullmatch(r"dest(\d+)(?:(_transformed|_response|_status|_metadata\.yml))?", name) + if not match: + continue + + destination_number = int(match.group(1)) + suffix = match.group(2) or "" + connector = result.destination_connector(destination_number) + + if suffix == "": + assert_content_matches(name, path.read_bytes(), message_content_text(connector, "sent")) + elif suffix == "_transformed": + assert_content_matches(name, path.read_bytes(), message_content_text(connector, "transformed")) + elif suffix == "_response": + assert_content_matches(name, path.read_bytes(), response_payload_text(message_content_text(connector, "response"))) + elif suffix == "_status": + expected_status = path.read_text(encoding="utf-8").strip() + actual_status = xpath_text(connector, "./status") + if actual_status != expected_status: + raise AssertionError(f"Expected {name} to be {expected_status}, found {actual_status}") + elif suffix == "_metadata.yml": + expected_metadata = load_yaml_mapping(path) + actual_metadata = parse_metadata_assertion_map(connector) + assert_mapping_subset(name, expected_metadata, actual_metadata) + + +def run_custom_assertions(client: ApiClient, result: MessageTestResult) -> None: + assertion_file = result.fixture.message_dir / "assertion.py" + if not assertion_file.exists(): + return + + module = load_module_from_file(assertion_file, f"message_assertions_{result.fixture.name}") + for attribute_name in sorted(dir(module)): + if not attribute_name.startswith("test_"): + continue + attribute = getattr(module, attribute_name) + if callable(attribute): + attribute(client, result) + + +def message_content_text(connector, field_name: str) -> str | None: + return xpath_text(connector, f"./{field_name}/content") + + +def response_payload_text(response_content: str | None) -> str | None: + if response_content is None: + return None + + stripped = response_content.strip() + if not stripped.startswith(" bool: + if xpath_text(result.message_xml, "./processed") != "true": + return False + + for connector_message in result.message_xml.xpath("./connectorMessages/entry/connectorMessage"): + status = xpath_text(connector_message, "./status") + if status in PENDING_STATUSES: + return False + return True + + +def parse_xml_long(xml_element) -> int: + if xml_element.tag != "long" or xml_element.text is None: + raise RuntimeError("Unexpected XML long response") + return int(xml_element.text.strip()) + + +def message_element(xml_element): + if xml_element.tag == "message": + return xml_element + if xml_element.tag == "list": + message_nodes = xml_element.xpath("./message") + if message_nodes: + return message_nodes[0] + raise RuntimeError("Unexpected XML message response") + + +def format_message_failure_detail(result: MessageTestResult | None) -> str: + if result is None: + return "No message result was captured." + return f"Actual message result XML:\n{result.xml_text()}" + + +def xpath_text(element, expression: str) -> str | None: + values = element.xpath(expression) + if not values: + return None + value = values[0] + if hasattr(value, "text"): + text = value.text + else: + text = str(value) + return text.strip() if text is not None else None + + +def first_xpath(element, expression: str): + values = element.xpath(expression) + return values[0] if values else None + + +def parse_metadata_assertion_map(connector) -> dict[str, Any]: + connector_metadata = parse_map_element(first_xpath(connector, "./connectorMapContent/content")) + message_metadata = parse_map_element(first_xpath(connector, "./metaDataMap")) + return connector_metadata | message_metadata + + +def parse_map_element(map_element) -> dict[str, Any]: + if map_element is None: + return {} + + entries_parent = first_xpath(map_element, "./m") or map_element + result: dict[str, Any] = {} + for entry_element in entries_parent.xpath("./entry"): + children = list(entry_element) + if len(children) < 2: + continue + key = parse_scalar_element(children[0]) + value = parse_scalar_element(children[1]) + if key is not None: + result[str(key)] = value + return result + + +def parse_scalar_element(element): + if element is None: + return None + if len(element) == 0: + return (element.text or "").strip() + if element.tag in {"linked-hash-map", "map", "m", "content", "metaDataMap"}: + return parse_map_element(element) + if element.tag in {"linked-hash-set", "set"}: + return [parse_scalar_element(child) for child in element] + return parse_map_element(element) + + +def assert_content_matches(label: str, expected_bytes: bytes, actual_content: str | None) -> None: + if actual_content is None: + raise AssertionError(f"Expected {label} content but none was stored") + + actual_bytes = actual_content.encode("utf-8") + pattern = re.escape(expected_bytes) + pattern = pattern.replace(re.escape(ANY_WILDCARD), b".*?") + if not re.fullmatch(pattern, actual_bytes, flags=re.DOTALL): + raise AssertionError( + f"Content mismatch for {label}. Expected {expected_bytes!r}, found {actual_bytes!r}" + ) + + +def assert_mapping_subset(label: str, expected: dict[str, Any], actual: dict[str, Any], path: str = "") -> None: + for key, expected_value in expected.items(): + key_path = f"{path}.{key}" if path else str(key) + if key not in actual: + raise AssertionError(f"Metadata mismatch for {label}: missing key {key_path}") + + actual_value = actual[key] + if isinstance(expected_value, dict): + if not isinstance(actual_value, dict): + raise AssertionError(f"Metadata mismatch for {label}: expected mapping at {key_path}") + assert_mapping_subset(label, expected_value, actual_value, key_path) + elif expected_value != actual_value: + raise AssertionError( + f"Metadata mismatch for {label} at {key_path}: expected {expected_value!r}, found {actual_value!r}" + ) + + +def load_yaml_mapping(path: Path | None) -> dict[str, Any]: + if path is None or not path.exists(): + return {} + + yaml = import_yaml_module() + data = yaml.safe_load(path.read_text(encoding="utf-8")) + if data is None: + return {} + if not isinstance(data, dict): + raise RuntimeError(f"Expected YAML mapping in {path}") + return data + + +def import_yaml_module() -> Any: + spec = importlib.util.find_spec("yaml") + if spec is None or spec.loader is None: + raise RuntimeError("PyYAML is not installed in the runner image") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def load_module_from_file(module_file: Path, module_name: str) -> ModuleType: + spec = importlib.util.spec_from_file_location(module_name, module_file) + if spec is None or spec.loader is None: + raise RuntimeError(f"Unable to load module from {module_file}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module diff --git a/ci/runner/run.py b/ci/runner/run.py new file mode 100644 index 000000000..919e55d19 --- /dev/null +++ b/ci/runner/run.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +import sys + +from main import main + + +if __name__ == "__main__": + try: + sys.exit(main()) + except Exception as error: + print(str(error), file=sys.stderr, flush=True) + sys.exit(1) diff --git a/ci/runtests.ps1 b/ci/runtests.ps1 new file mode 100644 index 000000000..7db6edb5f --- /dev/null +++ b/ci/runtests.ps1 @@ -0,0 +1,67 @@ +param( + [string]$Configuration = "all", + [string]$AntBuildArgs = "", + [switch]$DisableUnitTests, + [switch]$KeepAlive +) + +$ErrorActionPreference = "Stop" +$PSNativeCommandUseErrorActionPreference = $true +$RootDir = (Resolve-Path (Join-Path $PSScriptRoot "..")).Path +$AlpineServerImage = "oie-ci-server:local-alpine-temurin21" +$UbuntuServerImage = "oie-ci-server:local-ubuntu-temurin21" +$RunnerImage = if ($env:RUNNER_IMAGE) { $env:RUNNER_IMAGE } else { "oie-ci-runner:local" } +$ResultsDir = Join-Path $RootDir "ci/test-results" + +function Build-Images { + $serverBuildArgs = @("build") + if ($AntBuildArgs) { + $serverBuildArgs += @( "--build-arg", "ANT_BUILD_ARGS=$AntBuildArgs" ) + } + if ($DisableUnitTests) { + $serverBuildArgs += @( "--build-arg", "ANT_BUILD_ARGS=-DdisableTests=true -DdisableSigning=true" ) + } + docker @($serverBuildArgs + @( "--target", "jre-run", "-t", $AlpineServerImage, $RootDir )) + docker @($serverBuildArgs + @( "--target", "jdk-run", "-t", $UbuntuServerImage, $RootDir )) + + docker build -t $RunnerImage (Join-Path $RootDir "ci/runner") +} + +function Invoke-Configuration([string]$Name) { + $serverImage = if ($Name -like "ubuntu-*") { $UbuntuServerImage } else { $AlpineServerImage } + $runnerArgs = @( + "--workspace", "/workspace", + "--configuration", $Name, + "--server-image", $serverImage, + "--results-root", "ci/test-results" + ) + + if ($KeepAlive) { + $runnerArgs += "--keep-alive" + } + + docker run --rm ` + --add-host host.docker.internal:host-gateway ` + -v /var/run/docker.sock:/var/run/docker.sock ` + -v "${RootDir}:/workspace" ` + $RunnerImage ` + @runnerArgs +} + +Build-Images + + if (Test-Path $ResultsDir) { + Remove-Item -Recurse -Force $ResultsDir + } + New-Item -ItemType Directory -Path $ResultsDir | Out-Null + +if ($Configuration -eq "all") { + Get-ChildItem (Join-Path $RootDir "ci/configurations") -Filter "*.compose.yml" | + Sort-Object Name | + ForEach-Object { + $name = $_.Name -replace "\.compose\.yml$", "" + Invoke-Configuration $name + } +} else { + Invoke-Configuration $Configuration +} diff --git a/ci/tests/101-raw-no-op/channels/01-raw-no-op/channel.xml b/ci/tests/101-raw-no-op/channels/01-raw-no-op/channel.xml new file mode 100644 index 000000000..923b5d43e --- /dev/null +++ b/ci/tests/101-raw-no-op/channels/01-raw-no-op/channel.xml @@ -0,0 +1,184 @@ + + 62af393b-ff61-47ec-b5fa-ccd2cd08ce55 + 2 + Noop + + 1 + + 0 + sourceConnector + + + + None + true + false + false + 1 + + + Default Resource + [Default Resource] + + + 1000 + + + + + RAW + RAW + + + JavaScript + + + + + + JavaScript + + + + + + + + Channel Reader + SOURCE + true + true + + + + 1 + Destination 1 + + + + false + false + 10000 + false + 0 + false + false + 1 + + false + + + Default Resource + [Default Resource] + + + 1000 + true + + none + ${message.encodedData} + + + + + RAW + RAW + + + JavaScript + + + + + + JavaScript + + + + + + + RAW + RAW + + + JavaScript + + + + + + JavaScript + + + + + + + + Channel Writer + DESTINATION + true + true + + + // Modify the message variable below to pre process data +return message; + // This script executes once after a message has been processed +// Responses returned from here will be stored as "Postprocessor" in the response map +return; + // This script executes once when the channel is deployed +// You only have access to the globalMap and globalChannelMap here to persist data +return; + // This script executes once when the channel is undeployed +// You only have access to the globalMap and globalChannelMap here to persist data +return; + + true + DEVELOPMENT + false + false + false + false + false + false + STARTED + true + + + SOURCE + STRING + mirth_source + + + TYPE + STRING + mirth_type + + + + None + + + + + Default Resource + [Default Resource] + + + + + + true + + + America/Chicago + + + true + false + + 1 + + + \ No newline at end of file diff --git a/ci/tests/101-raw-no-op/channels/01-raw-no-op/messages/01-hello-world/dest01 b/ci/tests/101-raw-no-op/channels/01-raw-no-op/messages/01-hello-world/dest01 new file mode 100644 index 000000000..6c70ac283 --- /dev/null +++ b/ci/tests/101-raw-no-op/channels/01-raw-no-op/messages/01-hello-world/dest01 @@ -0,0 +1 @@ +((ANY))Hello world!((ANY)) \ No newline at end of file diff --git a/ci/tests/101-raw-no-op/channels/01-raw-no-op/messages/01-hello-world/source b/ci/tests/101-raw-no-op/channels/01-raw-no-op/messages/01-hello-world/source new file mode 100644 index 000000000..6769dd60b --- /dev/null +++ b/ci/tests/101-raw-no-op/channels/01-raw-no-op/messages/01-hello-world/source @@ -0,0 +1 @@ +Hello world! \ No newline at end of file diff --git a/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/channel.xml b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/channel.xml new file mode 100644 index 000000000..39db10066 --- /dev/null +++ b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/channel.xml @@ -0,0 +1,382 @@ + + 33538c13-5a0c-4399-9aca-b300a92cd712 + 2 + hl7-no-op + + 1 + + 0 + sourceConnector + + + + Auto-generate (Destinations completed) + true + false + false + 1 + + + Default Resource + [Default Resource] + + + 1000 + + + + + HL7V2 + HL7V2 + + + true + true + false + false + false + \r + true + + + false + false + \r + + + MSH_Segment + + + + \r + AA + + AE + An Error Occurred Processing Message. + AR + Message Rejected. + false + yyyyMMddHHmmss.SSS + + + AA,CA + AE,CE + AR,CR + true + Destination_Encoded + + + + + + true + true + false + false + false + \r + true + + + false + false + \r + + + MSH_Segment + + + + \r + AA + + AE + An Error Occurred Processing Message. + AR + Message Rejected. + false + yyyyMMddHHmmss.SSS + + + AA,CA + AE,CE + AR,CR + true + Destination_Encoded + + + + + + + + Channel Reader + SOURCE + true + true + + + + 1 + Destination 1 + + + + false + false + 10000 + false + 0 + false + false + 1 + + false + + + Default Resource + [Default Resource] + + + 1000 + true + + none + ${message.encodedData} + + + + + HL7V2 + HL7V2 + + + true + true + false + false + false + \r + true + + + false + false + \r + + + MSH_Segment + + + + \r + AA + + AE + An Error Occurred Processing Message. + AR + Message Rejected. + false + yyyyMMddHHmmss.SSS + + + AA,CA + AE,CE + AR,CR + true + Destination_Encoded + + + + + + true + true + false + false + false + \r + true + + + false + false + \r + + + MSH_Segment + + + + \r + AA + + AE + An Error Occurred Processing Message. + AR + Message Rejected. + false + yyyyMMddHHmmss.SSS + + + AA,CA + AE,CE + AR,CR + true + Destination_Encoded + + + + + + + HL7V2 + HL7V2 + + + true + true + false + false + false + \r + true + + + false + false + \r + + + MSH_Segment + + + + \r + AA + + AE + An Error Occurred Processing Message. + AR + Message Rejected. + false + yyyyMMddHHmmss.SSS + + + AA,CA + AE,CE + AR,CR + true + Destination_Encoded + + + + + + true + true + false + false + false + \r + true + + + false + false + \r + + + MSH_Segment + + + + \r + AA + + AE + An Error Occurred Processing Message. + AR + Message Rejected. + false + yyyyMMddHHmmss.SSS + + + AA,CA + AE,CE + AR,CR + true + Destination_Encoded + + + + + + + + Channel Writer + DESTINATION + true + true + + + // Modify the message variable below to pre process data +return message; + // This script executes once after a message has been processed +// Responses returned from here will be stored as "Postprocessor" in the response map +return; + // This script executes once when the channel is deployed +// You only have access to the globalMap and globalChannelMap here to persist data +return; + // This script executes once when the channel is undeployed +// You only have access to the globalMap and globalChannelMap here to persist data +return; + + true + DEVELOPMENT + false + false + false + false + false + false + STARTED + true + + + SOURCE + STRING + mirth_source + + + TYPE + STRING + mirth_type + + + + None + + + + + Default Resource + [Default Resource] + + + + + + true + + + America/Chicago + + + true + false + + 1 + + + \ No newline at end of file diff --git a/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/dest01_metadata.yml b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/dest01_metadata.yml new file mode 100644 index 000000000..bb0807341 --- /dev/null +++ b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/dest01_metadata.yml @@ -0,0 +1,2 @@ +mirth_source: "Some Other App" +mirth_type: "ADT-A01" diff --git a/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/source b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/source new file mode 100644 index 000000000..aa0a7adef --- /dev/null +++ b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/source @@ -0,0 +1,5 @@ +MSH|^~\&|Destination|Some Other App|Ehrsource|Or|202405221430||ADT^A01|Msgid99999|P|2.3| +EVN|A01|202405221430|| +PID|1|222333^^^2^MRN2|888777||Smith^Alice^^^^||19850412|F||Bl|456 Other St^^There^Or^97201^Usa||(503)555-1234|||S|Cat|111222333| +NK1|1|Smith^Robert^|Husband||(503)555-6789||||NK^Next Of Kin +PV1|1|I|3003^4004^02||||654321^Surgeon^Sarah^L^^Dr|||||||Emr|B1| diff --git a/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/source_response b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/source_response new file mode 100644 index 000000000..d0124944a --- /dev/null +++ b/ci/tests/110-hl7-no-op/channels/01-hl7-no-op/messages/01-adt-a01/source_response @@ -0,0 +1,2 @@ +MSH|^~\&|Ehrsource|Or|Destination|Some Other App|((ANY))||ACK|((ANY))|P|2.3 +MSA|AA|Msgid99999 diff --git a/server/basedir-includes/configure-from-env b/server/basedir-includes/configure-from-env new file mode 100755 index 000000000..3ab0ebbb7 --- /dev/null +++ b/server/basedir-includes/configure-from-env @@ -0,0 +1,247 @@ +#!/usr/bin/env bash +# +# SPDX-License-Identifier: MPL-2.0 +# SPDX-FileCopyrightText: 2023 NextGen Healthcare +# + +set -e + +APP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +custom_extension_count=`ls -1 "$APP_DIR"/custom-extensions/*.zip 2>/dev/null | wc -l` +if [ $custom_extension_count != 0 ]; then + echo "Found ${custom_extension_count} custom extensions." + for extension in $(ls -1 "$APP_DIR"/custom-extensions/*.zip); do + unzip -o -q $extension -d "$APP_DIR/extensions" + done +fi + +# set storepass and keypass to 'changeme' so they aren't overwritten later +KEYSTORE_PASS=changeme +sed -i "s/^keystore\.storepass\s*=\s*.*\$/keystore.storepass = ${KEYSTORE_PASS//\//\\/}/" "$APP_DIR/conf/mirth.properties" +sed -i "s/^keystore\.keypass\s*=\s*.*\$/keystore.keypass = ${KEYSTORE_PASS//\//\\/}/" "$APP_DIR/conf/mirth.properties" + +# merge the environment variables into /opt/engine/conf/mirth.properties +# db type +if ! [ -z "${DATABASE+x}" ]; then + sed -i "s/^database\s*=\s*.*\$/database = ${DATABASE//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# db username +if ! [ -z "${DATABASE_USERNAME+x}" ]; then + sed -i "s/^database\.username\s*=\s*.*\$/database.username = ${DATABASE_USERNAME//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# db password +if ! [ -z "${DATABASE_PASSWORD+x}" ]; then + sed -i "s/^database\.password\s*=\s*.*\$/database.password = ${DATABASE_PASSWORD//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# db url +if ! [ -z "${DATABASE_URL+x}" ]; then + sed -i "s/^database\.url\s*=\s*.*\$/database.url = ${DATABASE_URL//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# database max connections +if ! [ -z "${DATABASE_MAX_CONNECTIONS+x}" ]; then + sed -i "s/^database\.max-connections\s*=\s*.*\$/database.max-connections = ${DATABASE_MAX_CONNECTIONS//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# database max retries +if ! [ -z "${DATABASE_MAX_RETRY+x}" ]; then + sed -i "s/^database\.connection\.maxretry\s*=\s*.*\$/database.connection.maxretry = ${DATABASE_MAX_RETRY//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# database retry wait time +if ! [ -z "${DATABASE_RETRY_WAIT+x}" ]; then + sed -i "s/^database\.connection\.retrywaitinmilliseconds\s*=\s*.*\$/database.connection.retrywaitinmilliseconds = ${DATABASE_RETRY_WAIT//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# keystore storepass +if ! [ -z "${KEYSTORE_STOREPASS+x}" ]; then + sed -i "s/^keystore\.storepass\s*=\s*.*\$/keystore.storepass = ${KEYSTORE_STOREPASS//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# keystore keypass +if ! [ -z "${KEYSTORE_KEYPASS+x}" ]; then + sed -i "s/^keystore\.keypass\s*=\s*.*\$/keystore.keypass = ${KEYSTORE_KEYPASS//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +if ! [ -z "${KEYSTORE_TYPE+x}" ]; then + sed -i "s/^keystore\.type\s*=\s*.*\$/keystore.type = ${KEYSTORE_TYPE//\//\\/}/" "$APP_DIR/conf/mirth.properties" +fi + +# session store +if ! [ -z "${SESSION_STORE+x}" ]; then + LINE_COUNT=`grep "server.api.sessionstore" "$APP_DIR/conf/mirth.properties" | wc -l` + if [ $LINE_COUNT -lt 1 ]; then + echo -e "\nserver.api.sessionstore = ${SESSION_STORE//\//\\/}" >> "$APP_DIR/conf/mirth.properties" + else + sed -i "s/^server\.api\.sessionstore\s*=\s*.*\$/server.api.sessionstore = ${SESSION_STORE//\//\\/}/" "$APP_DIR/conf/mirth.properties" + fi +fi + +#server ID +if ! [ -z "${SERVER_ID+x}" ]; then + echo -e "server.id = ${SERVER_ID//\//\\/}" > "$APP_DIR/appdata/server.id" +fi + +# merge extra environment variables starting with _MP_ into mirth.properties +while read -r keyvalue; do + KEY="${keyvalue%%=*}" + VALUE="${keyvalue#*=}" + VALUE=$(tr -dc '\40-\176' <<< "$VALUE") + + if ! [ -z "${KEY}" ] && ! [ -z "${VALUE}" ] && ! [[ ${VALUE} =~ ^\ +$ ]]; then + + # filter for variables starting with "_MP_" + if [[ ${KEY} == _MP_* ]]; then + + # echo "found property ${KEY}=${VALUE}" + + # example: _MP_DATABASE_MAX__CONNECTIONS -> database.max-connections + + # remove _MP_ + # example: DATABASE_MAX__CONNECTIONS + ACTUAL_KEY=${KEY:4} + + # switch '__' to '-' + # example: DATABASE_MAX-CONNECTIONS + ACTUAL_KEY="${ACTUAL_KEY//__/-}" + + # switch '_' to '.' + # example: DATABASE.MAX-CONNECTIONS + ACTUAL_KEY="${ACTUAL_KEY//_/.}" + + # lower case + # example: database.max-connections + ACTUAL_KEY="${ACTUAL_KEY,,}" + + # if key does not exist in mirth.properties append it at bottom + LINE_COUNT=`grep "^${ACTUAL_KEY}" "$APP_DIR/conf/mirth.properties" | wc -l` + if [ $LINE_COUNT -lt 1 ]; then + # echo "key ${ACTUAL_KEY} not found in mirth.properties, appending. Value = ${VALUE}" + echo -e "\n${ACTUAL_KEY} = ${VALUE//\//\\/}" >> "$APP_DIR/conf/mirth.properties" + else # otherwise key exists, overwrite it + # echo "key ${ACTUAL_KEY} exists, overwriting. Value = ${VALUE}" + ESCAPED_KEY="${ACTUAL_KEY//./\\.}" + sed -i "s/^${ESCAPED_KEY}\s*=\s*.*\$/${ACTUAL_KEY} = ${VALUE//\//\\/}/" "$APP_DIR/conf/mirth.properties" + fi + fi + fi +done <<< "`printenv`" + +# merge vmoptions into /opt/engine/oieserver.vmoptions +if ! [ -z "${VMOPTIONS+x}" ]; then + PREV_IFS="$IFS" + IFS="," + read -ra vmoptions <<< "$VMOPTIONS" + IFS="$PREV_IFS" + + for vmoption in "${vmoptions[@]}" + do + echo "${vmoption}" >> "$APP_DIR/oieserver.vmoptions" + done +fi + +# merge the user's secret mirth.properties +# takes a whole mirth.properties file and merges line by line with /opt/engine/conf/mirth.properties +if [ -f /run/secrets/mirth_properties ]; then + + # add new line in case /opt/engine/conf/mirth.properties doesn't end with one + echo "" >> "$APP_DIR/conf/mirth.properties" + + while read -r keyvalue; do + KEY="${keyvalue%%=*}" + VALUE="${keyvalue#*=}" + + # remove leading and trailing white space + KEY="$(echo -e "${KEY}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + VALUE="$(echo -e "${VALUE}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + + if ! [ -z "${KEY}" ] && ! [ -z "${VALUE}" ] && ! [[ ${VALUE} =~ ^\ +$ ]]; then + # if key does not exist in mirth.properties append it at bottom + LINE_COUNT=`grep "^${KEY}" "$APP_DIR/conf/mirth.properties" | wc -l` + if [ $LINE_COUNT -lt 1 ]; then + # echo "key ${KEY} not found in mirth.properties, appending. Value = ${VALUE}" + echo -e "${KEY} = ${VALUE//\//\\/}" >> "$APP_DIR/conf/mirth.properties" + else # otherwise key exists, overwrite it + # echo "key ${KEY} exists, overwriting. Value = ${VALUE}" + ESCAPED_KEY="${KEY//./\\.}" + sed -i "s/^${ESCAPED_KEY}\s*=\s*.*\$/${KEY} = ${VALUE//\//\\/}/" "$APP_DIR/conf/mirth.properties" + fi + fi + done <<< "`cat /run/secrets/mirth_properties`" +fi + +# merge the user's secret vmoptions +# takes a whole oieserver.vmoptions file and merges line by line with /opt/engine/oieserver.vmoptions +if [ -f /run/secrets/oieserver_vmoptions ]; then + (cat /run/secrets/oieserver_vmoptions ; echo "") >> "$APP_DIR/oieserver.vmoptions" +fi + +# download jars from this url "$CUSTOM_JARS_DOWNLOAD", set by user +if ! [ -z "${CUSTOM_JARS_DOWNLOAD+x}" ]; then + echo "Downloading Jars at ${CUSTOM_JARS_DOWNLOAD}" + if ! [ -z "${ALLOW_INSECURE}" ] && [ "${ALLOW_INSECURE}" == "true" ]; then + curl -ksSLf "${CUSTOM_JARS_DOWNLOAD}" -o userJars.zip || echo "problem with custom jars download" + else + curl -sSLf "${CUSTOM_JARS_DOWNLOAD}" -o userJars.zip || echo "problem with custom jars download" + fi + + # Unzipping contents of userJars.zip into /opt/engine/server-launcher-lib folder + if [ -e "userJars.zip" ]; then + echo "Unzipping contents of userJars.zip into $APP_DIR/server-launcher-lib" + unzip userJars.zip -d "$APP_DIR/server-launcher-lib" + # removing the downloaded zip file + rm userJars.zip + fi +fi + +# download extensions from this url "$EXTENSIONS_DOWNLOAD", set by user +if ! [ -z "${EXTENSIONS_DOWNLOAD+x}" ]; then + echo "Downloading extensions at ${EXTENSIONS_DOWNLOAD}" + if ! [ -z "${ALLOW_INSECURE}" ] && [ "${ALLOW_INSECURE}" == "true" ]; then + curl -ksSLf "${EXTENSIONS_DOWNLOAD}" -o userExtensions.zip || echo "problem with extensions download" + else + curl -sSLf "${EXTENSIONS_DOWNLOAD}" -o userExtensions.zip || echo "problem with extensions download" + fi + + # Unzipping contents of userExtensions.zip + if [ -e "userExtensions.zip" ]; then + echo "Unzipping contents of userExtensions.zip" + mkdir /tmp/userextensions + unzip userExtensions.zip -d /tmp/userextensions + # removing the downloaded zip file + rm userExtensions.zip + + # Unzipping contents of individual extension zip files into /opt/engine/extensions folder + zipFileCount=`ls -1 /tmp/userextensions/*.zip 2>/dev/null | wc -l` + if [ $zipFileCount != 0 ]; then + echo "Unzipping contents of /tmp/userextensions/ zips into $APP_DIR/extensions" + for f in /tmp/userextensions/*.zip; do unzip "$f" -d "$APP_DIR/extensions"; done + fi + # removing the tmp folder + rm -rf /tmp/userextensions + fi +fi + +# download keystore +if ! [ -z "${KEYSTORE_DOWNLOAD+x}" ]; then + echo "Downloading keystore at ${KEYSTORE_DOWNLOAD}" + if ! [ -z "${ALLOW_INSECURE}" ] && [ "${ALLOW_INSECURE}" == "true" ]; then + curl -ksSLf "${KEYSTORE_DOWNLOAD}" -o "$APP_DIR/appdata/keystore.jks" || echo "problem with keystore download" + else + curl -sSLf "${KEYSTORE_DOWNLOAD}" -o "$APP_DIR/appdata/keystore.jks" || echo "problem with keystore download" + fi +fi + +# if delay is set as an environment variable then wait that long in seconds +if ! [ -z "${DELAY+x}" ]; then + sleep $DELAY +fi + +# if there are any arguments, invoke them as a command +if [ $# -ne 0 ]; then + exec "$@" +fi diff --git a/server/build.xml b/server/build.xml index 011b3264e..b450642bb 100644 --- a/server/build.xml +++ b/server/build.xml @@ -1021,6 +1021,9 @@ + + + diff --git a/server/test/com/mirth/connect/server/launcher/Log4jMigrationsTest.java b/server/test/com/mirth/connect/server/launcher/Log4jMigrationsTest.java index 126c5f3da..7687563c4 100644 --- a/server/test/com/mirth/connect/server/launcher/Log4jMigrationsTest.java +++ b/server/test/com/mirth/connect/server/launcher/Log4jMigrationsTest.java @@ -79,7 +79,7 @@ public void testMigrateLog4jFailsGracefullyWithReadOnlyFile() throws Exception { try { Files.setPosixFilePermissions(path, EnumSet.of(PosixFilePermission.OWNER_READ)); - assertFalse(Files.isWritable(path)); + Assume.assumeFalse("Test requires the file to be effectively non-writable", Files.isWritable(path)); System.setErr(new PrintStream(errBytes, true, StandardCharsets.UTF_8.name())); Log4jMigrations.migrateConfiguration(file);