diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 3e7e9e9cf091..ffc0150ebac5 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/devcontainers/typescript-node:18-bookworm +FROM mcr.microsoft.com/devcontainers/typescript-node:22-bookworm RUN apt-get install -y wget bzip2 diff --git a/.github/actions/build-vsix/action.yml b/.github/actions/build-vsix/action.yml index eaabe5141e8b..95fec979b08e 100644 --- a/.github/actions/build-vsix/action.yml +++ b/.github/actions/build-vsix/action.yml @@ -22,7 +22,7 @@ runs: using: 'composite' steps: - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: ${{ inputs.node_version }} cache: 'npm' @@ -32,7 +32,7 @@ runs: # Jedi LS depends on dataclasses which is not in the stdlib in Python 3.7. - name: Use Python 3.9 for JediLSP - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: 3.9 cache: 'pip' @@ -93,7 +93,7 @@ runs: VSIX_NAME: ${{ inputs.vsix_name }} - name: Upload VSIX - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: ${{ inputs.artifact_name }} path: ${{ inputs.vsix_name }} diff --git a/.github/actions/lint/action.yml b/.github/actions/lint/action.yml index 9992b442c276..0bd5a2d8e1e2 100644 --- a/.github/actions/lint/action.yml +++ b/.github/actions/lint/action.yml @@ -10,7 +10,7 @@ runs: using: 'composite' steps: - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: ${{ inputs.node_version }} cache: 'npm' @@ -36,7 +36,7 @@ runs: shell: bash - name: Install Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' cache: 'pip' diff --git a/.github/instructions/learning.instructions.md b/.github/instructions/learning.instructions.md new file mode 100644 index 000000000000..28b085f486ce --- /dev/null +++ b/.github/instructions/learning.instructions.md @@ -0,0 +1,34 @@ +--- +applyTo: '**' +description: This document describes how to deal with learnings that you make. (meta instruction) +--- + +This document describes how to deal with learnings that you make. +It is a meta-instruction file. + +Structure of learnings: + +- Each instruction file has a "Learnings" section. +- Each learning has a counter that indicates how often that learning was useful (initially 1). +- Each learning has a 1 sentence description of the learning that is clear and concise. + +Example: + +```markdown +## Learnings + +- Prefer `const` over `let` whenever possible (1) +- Avoid `any` type (3) +``` + +When the user tells you "learn!", you should: + +- extract a learning from the recent conversation + _ identify the problem that you created + _ identify why it was a problem + _ identify how you were told to fix it/how the user fixed it + _ generate only one learning (1 sentence) that helps to summarize the insight gained +- then, add the reflected learning to the "Learnings" section of the most appropriate instruction file + +Important: Whenever a learning was really useful, increase the counter!! +When a learning was not useful and just caused more problems, decrease the counter. diff --git a/.github/instructions/python-quality-checks.instructions.md b/.github/instructions/python-quality-checks.instructions.md new file mode 100644 index 000000000000..48f37529dfbc --- /dev/null +++ b/.github/instructions/python-quality-checks.instructions.md @@ -0,0 +1,97 @@ +--- +applyTo: 'python_files/**' +description: Guide for running and fixing Python quality checks (Ruff and Pyright) that run in CI +--- + +# Python Quality Checks β€” Ruff and Pyright + +Run the same Python quality checks that run in CI. All checks target `python_files/` and use config from `python_files/pyproject.toml`. + +## Commands + +```bash +npm run check-python # Run both Ruff and Pyright +npm run check-python:ruff # Linting and formatting only +npm run check-python:pyright # Type checking only +``` + +## Fixing Ruff Errors + +**Auto-fix most issues:** + +```bash +cd python_files +python -m ruff check . --fix +python -m ruff format +npm run check-python:ruff # Verify +``` + +**Manual fixes:** + +- Ruff shows file, line number, rule code (e.g., `F841`), and description +- Open the file, read the error, fix the code +- Common: line length (100 char max), import sorting, unused variables + +## Fixing Pyright Errors + +**Common patterns and fixes:** + +- **Undefined variable/import**: Add the missing import +- **Type mismatch**: Correct the type or add type annotations +- **Missing return type**: Add `-> ReturnType` to function signatures + ```python + def my_function() -> str: # Add return type + return "result" + ``` + +**Verify:** + +```bash +npm run check-python:pyright +``` + +## Configuration + +- **Ruff**: Line length 100, Python 3.9+, 40+ rule families (flake8, isort, pyupgrade, etc.) +- **Pyright**: Version 1.1.308 (or whatever is found in the environment), ignores `lib/` and 15+ legacy files +- Config: `python_files/pyproject.toml` sections `[tool.ruff]` and `[tool.pyright]` + +## Troubleshooting + +**"Module not found" in Pyright**: Install dependencies + +```bash +python -m pip install --upgrade -r build/test-requirements.txt +nox --session install_python_libs +``` + +**Import order errors**: Auto-fix with `ruff check . --fix` + +**Type errors in ignored files**: Legacy files in `pyproject.toml` ignore listβ€”fix if working on them + +## When Writing Tests + +**Always format your test files before committing:** + +```bash +cd python_files +ruff format tests/ # Format all test files +# or format specific files: +ruff format tests/unittestadapter/test_utils.py +``` + +**Best practice workflow:** + +1. Write your test code +2. Run `ruff format` on the test files +3. Run the tests to verify they pass +4. Run `npm run check-python` to catch any remaining issues + +This ensures your tests pass both functional checks and quality checks in CI. + +## Learnings + +- Always run `npm run check-python` before pushing to catch CI failures early (1) +- Use `ruff check . --fix` to auto-fix most linting issues before manual review (1) +- Pyright version must match CI (1.1.308) to avoid inconsistent results between local and CI runs (1) +- Always run `ruff format` on test files after writing them to avoid formatting CI failures (1) diff --git a/.github/instructions/testing-workflow.instructions.md b/.github/instructions/testing-workflow.instructions.md new file mode 100644 index 000000000000..948886a59635 --- /dev/null +++ b/.github/instructions/testing-workflow.instructions.md @@ -0,0 +1,580 @@ +--- +applyTo: '**/test/**' +--- + +# AI Testing Workflow Guide: Write, Run, and Fix Tests + +This guide provides comprehensive instructions for AI agents on the complete testing workflow: writing tests, running them, diagnosing failures, and fixing issues. Use this guide whenever working with test files or when users request testing tasks. + +## Complete Testing Workflow + +This guide covers the full testing lifecycle: + +1. **πŸ“ Writing Tests** - Create comprehensive test suites +2. **▢️ Running Tests** - Execute tests using VS Code tools +3. **πŸ” Diagnosing Issues** - Analyze failures and errors +4. **πŸ› οΈ Fixing Problems** - Resolve compilation and runtime issues +5. **βœ… Validation** - Ensure coverage and resilience + +### When to Use This Guide + +**User Requests Testing:** + +- "Write tests for this function" +- "Run the tests" +- "Fix the failing tests" +- "Test this code" +- "Add test coverage" + +**File Context Triggers:** + +- Working in `**/test/**` directories +- Files ending in `.test.ts` or `.unit.test.ts` +- Test failures or compilation errors +- Coverage reports or test output analysis + +## Test Types + +When implementing tests as an AI agent, choose between two main types: + +### Unit Tests (`*.unit.test.ts`) + +- **Fast isolated testing** - Mock all external dependencies +- **Use for**: Pure functions, business logic, data transformations +- **Execute with**: `runTests` tool with specific file patterns +- **Mock everything** - VS Code APIs automatically mocked via `/src/test/unittests.ts` + +### Extension Tests (`*.test.ts`) + +- **Full VS Code integration** - Real environment with actual APIs +- **Use for**: Command registration, UI interactions, extension lifecycle +- **Execute with**: VS Code launch configurations or `runTests` tool +- **Slower but comprehensive** - Tests complete user workflows + +## πŸ€– Agent Tool Usage for Test Execution + +### Primary Tool: `runTests` + +Use the `runTests` tool to execute tests programmatically rather than terminal commands for better integration and result parsing: + +```typescript +// Run specific test files +await runTests({ + files: ['/absolute/path/to/test.unit.test.ts'], + mode: 'run', +}); + +// Run tests with coverage +await runTests({ + files: ['/absolute/path/to/test.unit.test.ts'], + mode: 'coverage', + coverageFiles: ['/absolute/path/to/source.ts'], +}); + +// Run specific test names +await runTests({ + files: ['/absolute/path/to/test.unit.test.ts'], + testNames: ['should handle edge case', 'should validate input'], +}); +``` + +### Compilation Requirements + +Before running tests, ensure compilation. Always start compilation with `npm run watch-tests` before test execution to ensure TypeScript files are built. Recompile after making import/export changes before running tests, as stubs won't work if they're applied to old compiled JavaScript that doesn't have the updated imports: + +```typescript +// Start watch mode for auto-compilation +await run_in_terminal({ + command: 'npm run watch-tests', + isBackground: true, + explanation: 'Start test compilation in watch mode', +}); + +// Or compile manually +await run_in_terminal({ + command: 'npm run compile-tests', + isBackground: false, + explanation: 'Compile TypeScript test files', +}); +``` + +### Alternative: Terminal Execution + +For targeted test runs when `runTests` tool is unavailable. Note: When a targeted test run yields 0 tests, first verify the compiled JS exists under `out/test` (rootDir is `src`); absence almost always means the test file sits outside `src` or compilation hasn't run yet: + +```typescript +// Run specific test suite +await run_in_terminal({ + command: 'npm run unittest -- --grep "Suite Name"', + isBackground: false, + explanation: 'Run targeted unit tests', +}); +``` + +## πŸ” Diagnosing Test Failures + +### Common Failure Patterns + +**Compilation Errors:** + +```typescript +// Missing imports +if (error.includes('Cannot find module')) { + await addMissingImports(testFile); +} + +// Type mismatches +if (error.includes("Type '" && error.includes("' is not assignable"))) { + await fixTypeIssues(testFile); +} +``` + +**Runtime Errors:** + +```typescript +// Mock setup issues +if (error.includes('stub') || error.includes('mock')) { + await fixMockConfiguration(testFile); +} + +// Assertion failures +if (error.includes('AssertionError')) { + await analyzeAssertionFailure(error); +} +``` + +### Systematic Failure Analysis + +Fix test issues iteratively - run tests, analyze failures, apply fixes, repeat until passing. When unit tests fail with VS Code API errors like `TypeError: X is not a constructor` or `Cannot read properties of undefined (reading 'Y')`, check if VS Code APIs are properly mocked in `/src/test/unittests.ts` - add missing APIs following the existing pattern. + +```typescript +interface TestFailureAnalysis { + type: 'compilation' | 'runtime' | 'assertion' | 'timeout'; + message: string; + location: { file: string; line: number; col: number }; + suggestedFix: string; +} + +function analyzeFailure(failure: TestFailure): TestFailureAnalysis { + if (failure.message.includes('Cannot find module')) { + return { + type: 'compilation', + message: failure.message, + location: failure.location, + suggestedFix: 'Add missing import statement', + }; + } + // ... other failure patterns +} +``` + +### Agent Decision Logic for Test Type Selection + +**Choose Unit Tests (`*.unit.test.ts`) when analyzing:** + +- Functions with clear inputs/outputs and no VS Code API dependencies +- Data transformation, parsing, or utility functions +- Business logic that can be isolated with mocks +- Error handling scenarios with predictable inputs + +**Choose Extension Tests (`*.test.ts`) when analyzing:** + +- Functions that register VS Code commands or use `vscode.*` APIs +- UI components, tree views, or command palette interactions +- File system operations requiring workspace context +- Extension lifecycle events (activation, deactivation) + +**Agent Implementation Pattern:** + +```typescript +function determineTestType(functionCode: string): 'unit' | 'extension' { + if ( + functionCode.includes('vscode.') || + functionCode.includes('commands.register') || + functionCode.includes('window.') || + functionCode.includes('workspace.') + ) { + return 'extension'; + } + return 'unit'; +} +``` + +## 🎯 Step 1: Automated Function Analysis + +As an AI agent, analyze the target function systematically: + +### Code Analysis Checklist + +```typescript +interface FunctionAnalysis { + name: string; + inputs: string[]; // Parameter types and names + outputs: string; // Return type + dependencies: string[]; // External modules/APIs used + sideEffects: string[]; // Logging, file system, network calls + errorPaths: string[]; // Exception scenarios + testType: 'unit' | 'extension'; +} +``` + +### Analysis Implementation + +1. **Read function source** using `read_file` tool +2. **Identify imports** - look for `vscode.*`, `child_process`, `fs`, etc. +3. **Map data flow** - trace inputs through transformations to outputs +4. **Catalog dependencies** - external calls that need mocking +5. **Document side effects** - logging, file operations, state changes + +### Test Setup Differences + +#### Unit Test Setup (\*.unit.test.ts) + +```typescript +// Mock VS Code APIs - handled automatically by unittests.ts +import * as sinon from 'sinon'; +import * as workspaceApis from '../../common/workspace.apis'; // Wrapper functions + +// Stub wrapper functions, not VS Code APIs directly +// Always mock wrapper functions (e.g., workspaceApis.getConfiguration()) instead of +// VS Code APIs directly to avoid stubbing issues +const mockGetConfiguration = sinon.stub(workspaceApis, 'getConfiguration'); +``` + +#### Extension Test Setup (\*.test.ts) + +```typescript +// Use real VS Code APIs +import * as vscode from 'vscode'; + +// Real VS Code APIs available - no mocking needed +const config = vscode.workspace.getConfiguration('python'); +``` + +## 🎯 Step 2: Generate Test Coverage Matrix + +Based on function analysis, automatically generate comprehensive test scenarios: + +### Coverage Matrix Generation + +```typescript +interface TestScenario { + category: 'happy-path' | 'edge-case' | 'error-handling' | 'side-effects'; + description: string; + inputs: Record; + expectedOutput?: any; + expectedSideEffects?: string[]; + shouldThrow?: boolean; +} +``` + +### Automated Scenario Creation + +1. **Happy Path**: Normal execution with typical inputs +2. **Edge Cases**: Boundary conditions, empty/null inputs, unusual but valid data +3. **Error Scenarios**: Invalid inputs, dependency failures, exception paths +4. **Side Effects**: Verify logging calls, file operations, state changes + +### Agent Pattern for Scenario Generation + +```typescript +function generateTestScenarios(analysis: FunctionAnalysis): TestScenario[] { + const scenarios: TestScenario[] = []; + + // Generate happy path for each input combination + scenarios.push(...generateHappyPathScenarios(analysis)); + + // Generate edge cases for boundary conditions + scenarios.push(...generateEdgeCaseScenarios(analysis)); + + // Generate error scenarios for each dependency + scenarios.push(...generateErrorScenarios(analysis)); + + return scenarios; +} +``` + +## πŸ—ΊοΈ Step 3: Plan Your Test Coverage + +### Create a Test Coverage Matrix + +#### Main Flows + +- βœ… **Happy path scenarios** - normal expected usage +- βœ… **Alternative paths** - different configuration combinations +- βœ… **Integration scenarios** - multiple features working together + +#### Edge Cases + +- πŸ”Έ **Boundary conditions** - empty inputs, missing data +- πŸ”Έ **Error scenarios** - network failures, permission errors +- πŸ”Έ **Data validation** - invalid inputs, type mismatches + +#### Real-World Scenarios + +- βœ… **Fresh install** - clean slate +- βœ… **Existing user** - migration scenarios +- βœ… **Power user** - complex configurations +- πŸ”Έ **Error recovery** - graceful degradation + +### Example Test Plan Structure + +```markdown +## Test Categories + +### 1. Configuration Migration Tests + +- No legacy settings exist +- Legacy settings already migrated +- Fresh migration needed +- Partial migration required +- Migration failures + +### 2. Configuration Source Tests + +- Global search paths +- Workspace search paths +- Settings precedence +- Configuration errors + +### 3. Path Resolution Tests + +- Absolute vs relative paths +- Workspace folder resolution +- Path validation and filtering + +### 4. Integration Scenarios + +- Combined configurations +- Deduplication logic +- Error handling flows +``` + +## πŸ”§ Step 4: Set Up Your Test Infrastructure + +### Test File Structure + +```typescript +// 1. Imports - group logically +import assert from 'node:assert'; +import * as sinon from 'sinon'; +import { Uri } from 'vscode'; +import * as logging from '../../../common/logging'; +import * as pathUtils from '../../../common/utils/pathUtils'; +import * as workspaceApis from '../../../common/workspace.apis'; + +// 2. Function under test +import { getAllExtraSearchPaths } from '../../../managers/common/nativePythonFinder'; + +// 3. Mock interfaces +interface MockWorkspaceConfig { + get: sinon.SinonStub; + inspect: sinon.SinonStub; + update: sinon.SinonStub; +} +``` + +### Mock Setup Strategy + +Create minimal mock objects with only required methods and use TypeScript type assertions (e.g., `mockApi as PythonEnvironmentApi`) to satisfy interface requirements instead of implementing all interface methods when only specific methods are needed for the test. Simplify mock setup by only mocking methods actually used in tests and use `as unknown as Type` for TypeScript compatibility. + +```typescript +suite('Function Integration Tests', () => { + // 1. Declare all mocks + let mockGetConfiguration: sinon.SinonStub; + let mockGetWorkspaceFolders: sinon.SinonStub; + let mockTraceLog: sinon.SinonStub; + let mockTraceError: sinon.SinonStub; + let mockTraceWarn: sinon.SinonStub; + + // 2. Mock complex objects + let pythonConfig: MockWorkspaceConfig; + let envConfig: MockWorkspaceConfig; + + setup(() => { + // 3. Initialize all mocks + mockGetConfiguration = sinon.stub(workspaceApis, 'getConfiguration'); + mockGetWorkspaceFolders = sinon.stub(workspaceApis, 'getWorkspaceFolders'); + mockTraceLog = sinon.stub(logging, 'traceLog'); + mockTraceError = sinon.stub(logging, 'traceError'); + mockTraceWarn = sinon.stub(logging, 'traceWarn'); + + // 4. Set up default behaviors + mockGetWorkspaceFolders.returns(undefined); + + // 5. Create mock configuration objects + // When fixing mock environment creation, use null to truly omit + // properties rather than undefined + pythonConfig = { + get: sinon.stub(), + inspect: sinon.stub(), + update: sinon.stub(), + }; + + envConfig = { + get: sinon.stub(), + inspect: sinon.stub(), + update: sinon.stub(), + }; + }); + + teardown(() => { + sinon.restore(); // Always clean up! + }); +}); +``` + +## Step 4: Write Tests Using Mock β†’ Run β†’ Assert Pattern + +### The Three-Phase Pattern + +#### Phase 1: Mock (Set up the scenario) + +```typescript +test('Description of what this tests', async () => { + // Mock β†’ Clear description of the scenario + pythonConfig.inspect.withArgs('venvPath').returns({ globalValue: '/path' }); + envConfig.inspect.withArgs('globalSearchPaths').returns({ globalValue: [] }); + mockGetWorkspaceFolders.returns([{ uri: Uri.file('/workspace') }]); +``` + +#### Phase 2: Run (Execute the function) + +```typescript +// Run +const result = await getAllExtraSearchPaths(); +``` + +#### Phase 3: Assert (Verify the behavior) + +```typescript + // Assert - Use set-based comparison for order-agnostic testing + const expected = new Set(['/expected', '/paths']); + const actual = new Set(result); + assert.strictEqual(actual.size, expected.size, 'Should have correct number of paths'); + assert.deepStrictEqual(actual, expected, 'Should contain exactly the expected paths'); + + // Verify side effects + // Use sinon.match() patterns for resilient assertions that don't break on minor output changes + assert(mockTraceLog.calledWith(sinon.match(/completion/i)), 'Should log completion'); +}); +``` + +## Step 6: Make Tests Resilient + +### Use Order-Agnostic Comparisons + +```typescript +// ❌ Brittle - depends on order +assert.deepStrictEqual(result, ['/path1', '/path2', '/path3']); + +// βœ… Resilient - order doesn't matter +const expected = new Set(['/path1', '/path2', '/path3']); +const actual = new Set(result); +assert.strictEqual(actual.size, expected.size, 'Should have correct number of paths'); +assert.deepStrictEqual(actual, expected, 'Should contain exactly the expected paths'); +``` + +### Use Flexible Error Message Testing + +```typescript +// ❌ Brittle - exact text matching +assert(mockTraceError.calledWith('Error during legacy python settings migration:')); + +// βœ… Resilient - pattern matching +assert(mockTraceError.calledWith(sinon.match.string, sinon.match.instanceOf(Error)), 'Should log migration error'); + +// βœ… Resilient - key terms with regex +assert(mockTraceError.calledWith(sinon.match(/migration.*error/i)), 'Should log migration error'); +``` + +### Handle Complex Mock Scenarios + +```typescript +// For functions that call the same mock multiple times +envConfig.inspect.withArgs('globalSearchPaths').returns({ globalValue: [] }); +envConfig.inspect + .withArgs('globalSearchPaths') + .onSecondCall() + .returns({ + globalValue: ['/migrated/paths'], + }); + +// Testing async functions with child processes: +// Call the function first to get a promise, then use setTimeout to emit mock events, +// then await the promise - this ensures proper timing of mock setup versus function execution + +// Cannot stub internal function calls within the same module after import - stub external +// dependencies instead (e.g., stub childProcessApis.spawnProcess rather than trying to stub +// helpers.isUvInstalled when testing helpers.shouldUseUv) because intra-module calls use +// direct references, not module exports +``` + +## πŸ§ͺ Step 7: Test Categories and Patterns + +### Configuration Tests + +- Test different setting combinations +- Test setting precedence (workspace > user > default) +- Test configuration errors and recovery +- Always use dynamic path construction with Node.js `path` module when testing functions that resolve paths against workspace folders to ensure cross-platform compatibility + +### Data Flow Tests + +- Test how data moves through the system +- Test transformations (path resolution, filtering) +- Test state changes (migrations, updates) + +### Error Handling Tests + +- Test graceful degradation +- Test error logging +- Test fallback behaviors + +### Integration Tests + +- Test multiple features together +- Test real-world scenarios +- Test edge case combinations + +## πŸ“Š Step 8: Review and Refine + +### Test Quality Checklist + +- [ ] **Clear naming** - test names describe the scenario and expected outcome +- [ ] **Good coverage** - main flows, edge cases, error scenarios +- [ ] **Resilient assertions** - won't break due to minor changes +- [ ] **Readable structure** - follows Mock β†’ Run β†’ Assert pattern +- [ ] **Isolated tests** - each test is independent +- [ ] **Fast execution** - tests run quickly with proper mocking + +### Common Anti-Patterns to Avoid + +- ❌ Testing implementation details instead of behavior +- ❌ Brittle assertions that break on cosmetic changes +- ❌ Order-dependent tests that fail due to processing changes +- ❌ Tests that don't clean up mocks properly +- ❌ Overly complex test setup that's hard to understand + +## πŸ”„ Reviewing and Improving Existing Tests + +### Quick Review Process + +1. **Read test files** - Check structure and mock setup +2. **Run tests** - Establish baseline functionality +3. **Apply improvements** - Use patterns below. When reviewing existing tests, focus on behavior rather than implementation details in test names and assertions +4. **Verify** - Ensure tests still pass + +### Common Fixes + +- Over-complex mocks β†’ Minimal mocks with only needed methods +- Brittle assertions β†’ Behavior-focused with error messages +- Vague test names β†’ Clear scenario descriptions (transform "should return X when Y" into "should [expected behavior] when [scenario context]") +- Missing structure β†’ Mock β†’ Run β†’ Assert pattern +- Untestable Node.js APIs β†’ Create proxy abstraction functions (use function overloads to preserve intelligent typing while making functions mockable) + +## 🧠 Agent Learnings + +- When mocking `testController.createTestItem()` in unit tests, use `typemoq.It.isAny()` for parameters when testing handler behavior (not ID/label generation logic), but consider using specific matchers (e.g., `It.is((id: string) => id.startsWith('_error_'))`) when the actual values being passed are important for correctness - this balances test precision with maintainability (2) +- Remove unused variables from test code immediately - leftover tracking variables like `validationCallCount` that aren't referenced indicate dead code that should be simplified (1) +- Use `Uri.file(path).fsPath` for both sides of path comparisons in tests to ensure cross-platform compatibility - Windows converts forward slashes to backslashes automatically (1) diff --git a/.github/instructions/testing_feature_area.instructions.md b/.github/instructions/testing_feature_area.instructions.md new file mode 100644 index 000000000000..038dc1025ea5 --- /dev/null +++ b/.github/instructions/testing_feature_area.instructions.md @@ -0,0 +1,187 @@ +--- +applyTo: 'src/client/testing/**' +--- + +# Testing feature area β€” Discovery, Run, Debug, and Results + +This document maps the testing support in the extension: discovery, execution (run), debugging, result reporting and how those pieces connect to the codebase. It's written for contributors and agents who need to navigate, modify, or extend test support (both `unittest` and `pytest`). + +## Overview + +- Purpose: expose Python tests in the VS Code Test Explorer (TestController), support discovery, run, debug, and surface rich results and outputs. +- Scope: provider-agnostic orchestration + provider-specific adapters, TestController mapping, IPC with Python-side scripts, debug launch integration, and configuration management. + +## High-level architecture + +- Controller / UI bridge: orchestrates TestController requests and routes them to workspace adapters. +- Workspace adapter: provider-agnostic coordinator that translates TestController requests to provider adapters and maps payloads back into TestItems/TestRuns. +- Provider adapters: implement discovery/run/debug for `unittest` and `pytest` by launching Python scripts and wiring named-pipe IPC. +- Result resolver: translates Python-side JSON/IPCPayloads into TestController updates (start/pass/fail/output/attachments). +- Debug launcher: prepares debug sessions and coordinates the debugger attach flow with the Python runner. + +## Key components (files and responsibilities) + +- Entrypoints + - `src/client/testing/testController/controller.ts` β€” `PythonTestController` (main orchestrator). + - `src/client/testing/serviceRegistry.ts` β€” DI/wiring for testing services. +- Workspace orchestration + - `src/client/testing/testController/workspaceTestAdapter.ts` β€” `WorkspaceTestAdapter` (provider-agnostic entry used by controller). +- Provider adapters + - Unittest + - `src/client/testing/testController/unittest/testDiscoveryAdapter.ts` + - `src/client/testing/testController/unittest/testExecutionAdapter.ts` + - Pytest + - `src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts` + - `src/client/testing/testController/pytest/pytestExecutionAdapter.ts` +- Result resolution and helpers + - `src/client/testing/testController/common/resultResolver.ts` β€” `PythonResultResolver` (maps payload -> TestController updates). + - `src/client/testing/testController/common/testItemUtilities.ts` β€” helpers for TestItem lifecycle. + - `src/client/testing/testController/common/types.ts` β€” `ITestDiscoveryAdapter`, `ITestExecutionAdapter`, `ITestResultResolver`, `ITestDebugLauncher`. + - `src/client/testing/testController/common/debugLauncher.ts` β€” debug session creation helper. + - `src/client/testing/testController/common/utils.ts` β€” named-pipe helpers and command builders (`startDiscoveryNamedPipe`, etc.). +- Configuration + - `src/client/testing/common/testConfigurationManager.ts` β€” per-workspace test settings. + - `src/client/testing/configurationFactory.ts` β€” configuration service factory. +- Utilities & glue + - `src/client/testing/utils.ts` β€” assorted helpers used by adapters. + - Python-side scripts: `python_files/unittestadapter/*`, `python_files/pytestadapter/*` β€” discovery/run code executed by adapters. + +## Python subprocess runners (what runs inside Python) + +The adapters in the extension don't implement test discovery/run logic themselves β€” they spawn a Python subprocess that runs small helper scripts located under `python_files/` and stream structured events back to the extension over the named-pipe IPC. This is a central part of the feature area; changes here usually require coordinated edits in both the TypeScript adapters and the Python scripts. + +- Unittest helpers (folder: `python_files/unittestadapter`) + + - `discovery.py` β€” performs `unittest` discovery and emits discovery payloads (test suites, cases, locations) on the IPC channel. + - `execution.py` / `django_test_runner.py` β€” run tests for `unittest` and, where applicable, Django test runners; emit run events (start, stdout/stderr, pass, fail, skip, teardown) and attachment info. + - `pvsc_utils.py`, `django_handler.py` β€” utility helpers used by the runners for environment handling and Django-specific wiring. + - The adapter TypeScript files (`testDiscoveryAdapter.ts`, `testExecutionAdapter.ts`) construct the command line, start a named-pipe listener, and spawn these Python scripts using the extension's ExecutionFactory (activated interpreter) so the scripts execute inside the user's selected environment. + +- Pytest helpers (folder: `python_files/vscode_pytest`) + + - `_common.py` β€” shared helpers for pytest runner scripts. + - `run_pytest_script.py` β€” the primary pytest runner used for discovery and execution; emits the same structured IPC payloads the extension expects (discovery events and run events). + - The `pytest` execution adapter (`pytestExecutionAdapter.ts`) and discovery adapter build the CLI to run `run_pytest_script.py`, start the pipe, and translate incoming payloads via `PythonResultResolver`. + +- IPC contract and expectations + + - Adapters rely on a stable JSON payload contract emitted by the Python scripts: identifiers for tests, event types (discovered, collected, started, passed, failed, skipped), timings, error traces, and optional attachments (logs, captured stdout/stderr, file links). + - The extension maps these payloads to `TestItem`/`TestRun` updates via `PythonResultResolver` (`src/client/testing/testController/common/resultResolver.ts`). If you change payload shape, update the resolver and tests concurrently. + +- How the subprocess is started + - Execution adapters use the extension's `ExecutionFactory` (preferred) to get an activated interpreter and then spawn a child process that runs the helper script. The adapter will set up environment variables and command-line args (including the pipe name / run-id) so the Python runner knows where to send events and how to behave (discovery vs run vs debug). + - For debug sessions a debug-specific entry argument/port is passed and `common/debugLauncher.ts` coordinates starting a VS Code debug session that will attach to the Python process. + +## Core functionality (what to change where) + +- Discovery + - Entry: `WorkspaceTestAdapter.discoverTests` β†’ provider discovery adapter. Adapter starts a named-pipe listener, spawns the discovery script in an activated interpreter, forwards discovery events to `PythonResultResolver` which creates/updates TestItems. + - Files: `workspaceTestAdapter.ts`, `*DiscoveryAdapter.ts`, `resultResolver.ts`, `testItemUtilities.ts`. +- Run / Execution + - Entry: `WorkspaceTestAdapter.executeTests` β†’ provider execution adapter. Adapter spawns runner in an activated env, runner streams run events to the pipe, `PythonResultResolver` updates a `TestRun` with start/pass/fail and attachments. + - Files: `workspaceTestAdapter.ts`, `*ExecutionAdapter.ts`, `resultResolver.ts`. +- Debugging + - Flow: debug request flows like a run but goes through `debugLauncher.ts` to create a VS Code debug session with prepared ports/pipes. The Python runner coordinates attach/continue with the debugger. + - Files: `*ExecutionAdapter.ts`, `common/debugLauncher.ts`, `common/types.ts`. +- Result reporting + - `resultResolver.ts` is the canonical place to change how JSON payloads map to TestController constructs (messages, durations, error traces, attachments). + +## Typical workflows (short) + +- Full discovery + + 1. `PythonTestController` triggers discovery -> `WorkspaceTestAdapter.discoverTests`. + 2. Provider discovery adapter starts pipe and launches Python discovery script. + 3. Discovery events -> `PythonResultResolver` -> TestController tree updated. + +- Run tests + + 1. Controller collects TestItems -> creates `TestRun`. + 2. `WorkspaceTestAdapter.executeTests` delegates to execution adapter which launches the runner. + 3. Runner events arrive via pipe -> `PythonResultResolver` updates `TestRun`. + 4. On process exit the run is finalized. + +- Debug a test + 1. Debug request flows to execution adapter. + 2. Adapter prepares ports and calls `debugLauncher` to start a VS Code debug session with the run ID. + 3. Runner coordinates with the debugger; `PythonResultResolver` still receives and applies run events. + +## Tests and examples to inspect + +- Unit/integration tests for adapters and orchestration under `src/test/` (examples): + - `src/test/testing/common/testingAdapter.test.ts` + - `src/test/testing/testController/workspaceTestAdapter.unit.test.ts` + - `src/test/testing/testController/unittest/testExecutionAdapter.unit.test.ts` + - Adapter tests demonstrate expected telemetry, debug-launch payloads and result resolution. + +## History & evolution (brief) + +- Migration to TestController API: the code organizes around VS Code TestController, mapping legacy adapter behaviour into TestItems/TestRuns. +- Named-pipe IPC: discovery/run use named-pipe IPC to stream events from Python runner scripts (`python_files/*`) which enables richer, incremental updates and debug coordination. +- Environment activation: adapters prefer the extension ExecutionFactory (activated interpreter) to run discovery and test scripts. + +## Pointers for contributors (practical) + +- To extend discovery output: update the Python discovery script in `python_files/*` and `resultResolver.ts` to parse new payload fields. +- To change run behaviour (args/env/timouts): update the provider execution adapter (`*ExecutionAdapter.ts`) and add/update tests under `src/test/`. +- To change debug flow: edit `common/debugLauncher.ts` and adapters' debug paths; update tests that assert launch argument shapes. + +## Django support (how it works) + +- The extension supports Django projects by delegating discovery and execution to Django-aware Python helpers under `python_files/unittestadapter`. + - `python_files/unittestadapter/django_handler.py` contains helpers that invoke `manage.py` for discovery or execute Django test runners inside the project context. + - `python_files/unittestadapter/django_test_runner.py` provides `CustomDiscoveryTestRunner` and `CustomExecutionTestRunner` which integrate with the extension by using the same IPC contract (they use `UnittestTestResult` and `send_post_request` to emit discovery/run payloads). +- How adapters pass Django configuration: + - Execution adapters set environment variables (e.g. `MANAGE_PY_PATH`) and modify `PYTHONPATH` so Django code and the custom test runner are importable inside the spawned subprocess. + - For discovery the adapter may run the discovery helper which calls `manage.py test` with a custom test runner that emits discovery payloads instead of executing tests. +- Practical notes for contributors: + - Changes to Django discovery/execution often require edits in both `django_test_runner.py`/`django_handler.py` and the TypeScript adapters (`testDiscoveryAdapter.ts` / `testExecutionAdapter.ts`). + - The Django test runner expects `TEST_RUN_PIPE` environment variable to be present to send IPC events (see `django_test_runner.py`). + +## Settings referenced by this feature area + +- The extension exposes several `python.testing.*` settings used by adapters and configuration code (declared in `package.json`): + - `python.testing.pytestEnabled`, `python.testing.unittestEnabled` β€” enable/disable frameworks. + - `python.testing.pytestPath`, `python.testing.pytestArgs`, `python.testing.unittestArgs` β€” command path and CLI arguments used when spawning helper scripts. + - `python.testing.cwd` β€” optional working directory used when running discovery/runs. + - `python.testing.autoTestDiscoverOnSaveEnabled`, `python.testing.autoTestDiscoverOnSavePattern` β€” control automatic discovery on save. + - `python.testing.debugPort` β€” default port used for debug runs. + - `python.testing.promptToConfigure` β€” whether to prompt users to configure tests when potential test folders are found. +- Where to look in the code: + - Settings are consumed by `src/client/testing/common/testConfigurationManager.ts`, `src/client/testing/configurationFactory.ts`, and adapters under `src/client/testing/testController/*` which read settings to build CLI args and env for subprocesses. + - The setting definitions and descriptions are in `package.json` and localized strings in `package.nls.json`. + +## Coverage support (how it works) + +- Coverage is supported by running the Python helper scripts with coverage enabled and then collecting a coverage payload from the runner. + - Pytest-side coverage logic lives in `python_files/vscode_pytest/__init__.py` (checks `COVERAGE_ENABLED`, imports `coverage`, computes per-file metrics and emits a `CoveragePayloadDict`). + - Unittest adapters enable coverage by setting environment variable(s) (e.g. `COVERAGE_ENABLED`) when launching the subprocess; adapters and `resultResolver.ts` handle the coverage profile kind (`TestRunProfileKind.Coverage`). +- Flow summary: + 1. User starts a Coverage run via Test Explorer (profile kind `Coverage`). + 2. Controller/adapters set `COVERAGE_ENABLED` (or equivalent) in the subprocess env and invoke the runner script. + 3. The Python runner collects coverage (using `coverage` or `pytest-cov`), builds a file-level coverage map, and sends a coverage payload back over the IPC. + 4. `PythonResultResolver` (`src/client/testing/testController/common/resultResolver.ts`) receives the coverage payload and stores `detailedCoverageMap` used by the TestController profile to show file-level coverage details. +- Tests that exercise coverage flows are under `src/test/testing/*` and `python_files/tests/*` (see `testingAdapter.test.ts` and adapter unit tests that assert `COVERAGE_ENABLED` is set appropriately). + +## Interaction with the VS Code API + +- TestController API + - The feature area is built on VS Code's TestController/TestItem/TestRun APIs (`vscode.tests.createTestController` / `tests.createTestController` in the code). The controller creates a `TestController` in `src/client/testing/testController/controller.ts` and synchronizes `TestItem` trees with discovery payloads. + - `PythonResultResolver` maps incoming JSON events to VS Code API calls: `testRun.appendOutput`, `testRun.passed/failed/skipped`, `testRun.end`, and `TestItem` updates (labels, locations, children). +- Debug API + - Debug runs use the Debug API to start an attach/launch session. The debug launcher implementation is in `src/client/testing/testController/common/debugLauncher.ts` which constructs a debug configuration and calls the VS Code debug API to start a session (e.g. `vscode.debug.startDebugging`). + - Debug adapter/resolver code in the extension's debugger modules may also be used when attaching to Django or test subprocesses. +- Commands and configuration + - The Test Controller wires commands that appear in the Test Explorer and editor context menus (see `package.json` contributes `commands`) and listens to configuration changes filtered by `python.testing` in `src/client/testing/main.ts`. +- The "Copy Test ID" command (`python.copyTestId`) can be accessed from both the Test Explorer context menu (`testing/item/context`) and the editor gutter icon context menu (`testing/item/gutter`). This command copies test identifiers to the clipboard in the appropriate format for the active test framework (pytest path format or unittest module.class.method format). +- Execution factory & activated environments + - Adapters use the extension `ExecutionFactory` to spawn subprocesses in an activated interpreter (so the user's venv/conda is used). This involves the extension's internal environment execution APIs and sometimes `envExt` helpers when the external environment extension is present. + +## Learnings + +- Never await `showErrorMessage()` calls in test execution adapters as it blocks the test UI thread and freezes the Test Explorer (1) +- VS Code test-related context menus are contributed to using both `testing/item/context` and `testing/item/gutter` menu locations in package.json for full coverage (1) + +``` + +``` diff --git a/.github/prompts/extract-impl-instructions.prompt.md b/.github/prompts/extract-impl-instructions.prompt.md new file mode 100644 index 000000000000..c2fb08b443c7 --- /dev/null +++ b/.github/prompts/extract-impl-instructions.prompt.md @@ -0,0 +1,79 @@ +--- +mode: edit +--- + +Analyze the specified part of the VS Code Python Extension codebase to generate or update implementation instructions in `.github/instructions/.instructions.md`. + +## Task + +Create concise developer guidance focused on: + +### Implementation Essentials + +- **Core patterns**: How this component is typically implemented and extended +- **Key interfaces**: Essential classes, services, and APIs with usage examples +- **Integration points**: How this component interacts with other extension parts +- **Common tasks**: Typical development scenarios with step-by-step guidance + +### Content Structure + +````markdown +--- +description: 'Implementation guide for the part of the Python Extension' +--- + +# Implementation Guide + +## Overview + +Brief description of the component's purpose and role in VS Code Python Extension. + +## Key Concepts + +- Main abstractions and their responsibilities +- Important interfaces and base classes + +## Common Implementation Patterns + +### Pattern 1: [Specific Use Case] + +```typescript +// Code example showing typical implementation +``` +```` + +### Pattern 2: [Another Use Case] + +```typescript +// Another practical example +``` + +## Integration Points + +- How this component connects to other VS Code Python Extension systems +- Required services and dependencies +- Extension points and contribution models + +## Essential APIs + +- Key methods and interfaces developers need +- Common parameters and return types + +## Gotchas and Best Practices + +- Non-obvious behaviors to watch for +- Performance considerations +- Common mistakes to avoid + +``` + +## Guidelines +- **Be specific**: Use actual class names, method signatures, and file paths +- **Show examples**: Include working code snippets from the codebase +- **Target implementation**: Focus on how to build with/extend this component +- **Keep it actionable**: Every section should help developers accomplish tasks + +Source conventions from existing `.github/instructions/*.instructions.md`, `CONTRIBUTING.md`, and codebase patterns. + +If `.github/instructions/.instructions.md` exists, intelligently merge new insights with existing content. +``` diff --git a/.github/prompts/extract-usage-instructions.prompt.md b/.github/prompts/extract-usage-instructions.prompt.md new file mode 100644 index 000000000000..ea48f162a220 --- /dev/null +++ b/.github/prompts/extract-usage-instructions.prompt.md @@ -0,0 +1,30 @@ +--- +mode: edit +--- + +Analyze the user requested part of the codebase (use a suitable ) to generate or update `.github/instructions/.instructions.md` for guiding developers and AI coding agents. + +Focus on practical usage patterns and essential knowledge: + +- How to use, extend, or integrate with this code area +- Key architectural patterns and conventions specific to this area +- Common implementation patterns with code examples +- Integration points and typical interaction patterns with other components +- Essential gotchas and non-obvious behaviors + +Source existing conventions from `.github/instructions/*.instructions.md`, `CONTRIBUTING.md`, and `README.md`. + +Guidelines: + +- Write concise, actionable instructions using markdown structure +- Document discoverable patterns with concrete examples +- If `.github/instructions/.instructions.md` exists, merge intelligently +- Target developers who need to work with or extend this code area + +Update `.github/instructions/.instructions.md` with header: + +``` +--- +description: "How to work with the part of the codebase" +--- +``` diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6d122b77288b..74f5d5a58a3a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -11,7 +11,7 @@ on: permissions: {} env: - NODE_VERSION: 22.17.0 + NODE_VERSION: 22.21.1 PYTHON_VERSION: '3.10' # YML treats 3.10 the number as 3.1, so quotes around 3.10 # Force a path with spaces and to test extension works in these scenarios # Unicode characters are causing 2.7 failures so skip that for now. @@ -84,12 +84,12 @@ jobs: # vsix-target: alpine-arm64 steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: 'python-env-tools' @@ -115,7 +115,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false @@ -130,12 +130,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Use Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false @@ -155,7 +155,7 @@ jobs: python -m pip install --upgrade -r build/test-requirements.txt - name: Run Pyright - uses: jakebailey/pyright-action@b5d50e5cde6547546a5c4ac92e416a8c2c1a1dfe # v2.3.2 + uses: jakebailey/pyright-action@6cabc0f01c4994be48fd45cd9dbacdd6e1ee6e5e # v2.3.3 with: version: 1.1.308 working-directory: 'python_files' @@ -178,13 +178,13 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: path: ${{ env.special-working-directory-relative }} persist-credentials: false - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -218,13 +218,13 @@ jobs: test-suite: [ts-unit, venv, single-workspace, multi-workspace, debugger, functional] steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: path: ${{ env.special-working-directory-relative }} persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: ${{ env.special-working-directory-relative }}/python-env-tools @@ -236,7 +236,7 @@ jobs: sparse-checkout-cone-mode: false - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -252,7 +252,7 @@ jobs: run: npx @vscode/l10n-dev@latest export ./src - name: Install Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -426,12 +426,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: ${{ env.special-working-directory-relative }}/python-env-tools diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 84de97c4dc9a..5528fbbe9c0a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -36,13 +36,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -67,4 +67,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/community-feedback-auto-comment.yml b/.github/workflows/community-feedback-auto-comment.yml index f606148f6e86..27f93400a023 100644 --- a/.github/workflows/community-feedback-auto-comment.yml +++ b/.github/workflows/community-feedback-auto-comment.yml @@ -12,7 +12,7 @@ jobs: issues: write steps: - name: Check For Existing Comment - uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0 + uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad # v4.0.0 id: finder with: issue-number: ${{ github.event.issue.number }} @@ -21,7 +21,7 @@ jobs: - name: Add Community Feedback Comment if: steps.finder.outputs.comment-id == '' - uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 + uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0 with: issue-number: ${{ github.event.issue.number }} body: | diff --git a/.github/workflows/gen-issue-velocity.yml b/.github/workflows/gen-issue-velocity.yml index c28c6c368562..41d79e4074d0 100644 --- a/.github/workflows/gen-issue-velocity.yml +++ b/.github/workflows/gen-issue-velocity.yml @@ -14,12 +14,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: '3.x' diff --git a/.github/workflows/info-needed-closer.yml b/.github/workflows/info-needed-closer.yml index 33f53fc20dca..46892a58e800 100644 --- a/.github/workflows/info-needed-closer.yml +++ b/.github/workflows/info-needed-closer.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Actions - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/vscode-github-triage-actions' path: ./actions diff --git a/.github/workflows/issue-labels.yml b/.github/workflows/issue-labels.yml index a78ca03d5ee9..dcbd114086e2 100644 --- a/.github/workflows/issue-labels.yml +++ b/.github/workflows/issue-labels.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Actions - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/vscode-github-triage-actions' ref: stable diff --git a/.github/workflows/lock-issues.yml b/.github/workflows/lock-issues.yml index cb6ed2e9d54e..544d04ee185e 100644 --- a/.github/workflows/lock-issues.yml +++ b/.github/workflows/lock-issues.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Lock Issues' - uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1 + uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0 with: github-token: ${{ github.token }} issue-inactive-days: '30' diff --git a/.github/workflows/pr-check.yml b/.github/workflows/pr-check.yml index b40c2c6946bf..d7d8d3869505 100644 --- a/.github/workflows/pr-check.yml +++ b/.github/workflows/pr-check.yml @@ -10,7 +10,7 @@ on: permissions: {} env: - NODE_VERSION: 22.17.0 + NODE_VERSION: 22.21.1 PYTHON_VERSION: '3.10' # YML treats 3.10 the number as 3.1, so quotes around 3.10 MOCHA_REPORTER_JUNIT: true # Use the mocha-multi-reporters and send output to both console (spec) and JUnit (mocha-junit-reporter). Also enables a reporter which exits the process running the tests if it haven't already. ARTIFACT_NAME_VSIX: ms-python-insiders-vsix @@ -57,12 +57,12 @@ jobs: # vsix-target: alpine-arm64 steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: 'python-env-tools' @@ -87,7 +87,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false @@ -101,17 +101,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Use Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: 'python-env-tools' @@ -138,7 +138,7 @@ jobs: python -m pip install --upgrade -r build/test-requirements.txt - name: Run Pyright - uses: jakebailey/pyright-action@b5d50e5cde6547546a5c4ac92e416a8c2c1a1dfe # v2.3.2 + uses: jakebailey/pyright-action@6cabc0f01c4994be48fd45cd9dbacdd6e1ee6e5e # v2.3.3 with: version: 1.1.308 working-directory: 'python_files' @@ -162,13 +162,13 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: path: ${{ env.special-working-directory-relative }} persist-credentials: false - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -215,13 +215,13 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: path: ${{ env.special-working-directory-relative }} persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: ${{ env.special-working-directory-relative }}/python-env-tools @@ -233,7 +233,7 @@ jobs: sparse-checkout-cone-mode: false - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -249,7 +249,7 @@ jobs: run: npx @vscode/l10n-dev@latest export ./src - name: Use Python ${{ matrix.python }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python }} @@ -412,13 +412,13 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: path: ${{ env.special-working-directory-relative }} persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: ${{ env.special-working-directory-relative }}/python-env-tools @@ -452,12 +452,12 @@ jobs: steps: # Need the source to have the tests available. - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: python-env-tools @@ -488,12 +488,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: persist-credentials: false - name: Checkout Python Environment Tools - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/python-environment-tools' path: python-env-tools @@ -505,7 +505,7 @@ jobs: sparse-checkout-cone-mode: false - name: Install Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -520,7 +520,7 @@ jobs: run: npx @vscode/l10n-dev@latest export ./src - name: Use Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: 'pip' @@ -680,7 +680,7 @@ jobs: run: npm run test:cover:report - name: Upload HTML report - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: ${{ runner.os }}-coverage-report-html path: ./coverage diff --git a/.github/workflows/pr-file-check.yml b/.github/workflows/pr-file-check.yml index 688c48d865d8..6364e5fa744e 100644 --- a/.github/workflows/pr-file-check.yml +++ b/.github/workflows/pr-file-check.yml @@ -42,17 +42,3 @@ jobs: .github/test_plan.md skip-label: 'skip tests' failure-message: 'TypeScript code was edited without also editing a ${file-pattern} file; see the Testing page in our wiki on testing guidelines (the ${skip-label} label can be used to pass this check)' - - - name: 'Ensure PR has an associated issue' - uses: actions/github-script@v7 - with: - script: | - const labels = context.payload.pull_request.labels.map(label => label.name); - if (!labels.includes('skip-issue-check')) { - const prBody = context.payload.pull_request.body || ''; - const issueLink = prBody.match(/https:\/\/github\.com\/\S+\/issues\/\d+/); - const issueReference = prBody.match(/#\d+/); - if (!issueLink && !issueReference) { - core.setFailed('No associated issue found in the PR description.'); - } - } diff --git a/.github/workflows/pr-issue-check.yml b/.github/workflows/pr-issue-check.yml new file mode 100644 index 000000000000..25ac91bbd279 --- /dev/null +++ b/.github/workflows/pr-issue-check.yml @@ -0,0 +1,31 @@ +name: PR issue check + +on: + pull_request: + types: + - 'opened' + - 'reopened' + - 'synchronize' + - 'labeled' + - 'unlabeled' + +permissions: {} + +jobs: + check-for-attached-issue: + name: 'Check for attached issue' + runs-on: ubuntu-latest + steps: + - name: 'Ensure PR has an associated issue' + uses: actions/github-script@v8 + with: + script: | + const labels = context.payload.pull_request.labels.map(label => label.name); + if (!labels.includes('skip-issue-check')) { + const prBody = context.payload.pull_request.body || ''; + const issueLink = prBody.match(/https:\/\/github\.com\/\S+\/issues\/\d+/); + const issueReference = prBody.match(/#\d+/); + if (!issueLink && !issueReference) { + core.setFailed('No associated issue found in the PR description.'); + } + } diff --git a/.github/workflows/test-plan-item-validator.yml b/.github/workflows/test-plan-item-validator.yml index 6e62058b04c6..57db4a3e18a7 100644 --- a/.github/workflows/test-plan-item-validator.yml +++ b/.github/workflows/test-plan-item-validator.yml @@ -12,7 +12,7 @@ jobs: if: contains(github.event.issue.labels.*.name, 'testplan-item') || contains(github.event.issue.labels.*.name, 'invalid-testplan-item') steps: - name: Checkout Actions - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/vscode-github-triage-actions' path: ./actions diff --git a/.github/workflows/triage-info-needed.yml b/.github/workflows/triage-info-needed.yml index f61d17c033d7..c7a37ba0c78d 100644 --- a/.github/workflows/triage-info-needed.yml +++ b/.github/workflows/triage-info-needed.yml @@ -15,7 +15,7 @@ jobs: issues: write steps: - name: Checkout Actions - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/vscode-github-triage-actions' ref: stable @@ -39,7 +39,7 @@ jobs: issues: write steps: - name: Checkout Actions - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: 'microsoft/vscode-github-triage-actions' ref: stable diff --git a/.nvmrc b/.nvmrc index 67e145bf0f9d..c6a66a6e6a68 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v20.18.0 +v22.21.1 diff --git a/.vscode/tasks.json b/.vscode/tasks.json index e1468bdfc2ad..c5a054ed43cf 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -12,9 +12,7 @@ "type": "npm", "script": "compile", "isBackground": true, - "problemMatcher": [ - "$tsc-watch" - ], + "problemMatcher": ["$tsc-watch"], "group": { "kind": "build", "isDefault": true @@ -34,6 +32,31 @@ "script": "preTestJediLSP", "problemMatcher": [], "label": "preTestJediLSP" + }, + { + "type": "npm", + "script": "check-python", + "problemMatcher": ["$python"], + "label": "npm: check-python", + "detail": "npm run check-python:ruff && npm run check-python:pyright" + }, + { + "label": "npm: check-python (venv)", + "type": "shell", + "command": "bash", + "args": ["-lc", "source .venv/bin/activate && npm run check-python"], + "problemMatcher": [], + "detail": "Activates the repo .venv first (avoids pyenv/shim Python) then runs: npm run check-python", + "windows": { + "command": "pwsh", + "args": [ + "-NoProfile", + "-ExecutionPolicy", + "Bypass", + "-Command", + ".\\.venv\\Scripts\\Activate.ps1; npm run check-python" + ] + } } ] } diff --git a/README.md b/README.md index 929bdef48892..e9dd52a538cd 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Python extension for Visual Studio Code -A [Visual Studio Code](https://code.visualstudio.com/) [extension](https://marketplace.visualstudio.com/VSCode) with rich support for the [Python language](https://www.python.org/) (for all [actively supported Python versions](https://devguide.python.org/versions/#supported-versions)), providing access points for extensions to seamlessly integrate and offer support for IntelliSense (Pylance), debugging (Python Debugger), formatting, linting, code navigation, refactoring, variable explorer, test explorer, and more! +A [Visual Studio Code](https://code.visualstudio.com/) [extension](https://marketplace.visualstudio.com/VSCode) with rich support for the [Python language](https://www.python.org/) (for all [actively supported Python versions](https://devguide.python.org/versions/#supported-versions)), providing access points for extensions to seamlessly integrate and offer support for IntelliSense (Pylance), debugging (Python Debugger), formatting, linting, code navigation, refactoring, variable explorer, test explorer, environment management (**NEW** Python Environments Extension). ## Support for [vscode.dev](https://vscode.dev/) @@ -13,7 +13,7 @@ The Python extension will automatically install the following extensions by defa - [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) – performant Python language support - [Python Debugger](https://marketplace.visualstudio.com/items?itemName=ms-python.debugpy) – seamless debug experience with debugpy -- [Python Environments](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-python-envs) – dedicated environment management (see below) +- **(NEW)** [Python Environments](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-python-envs) – dedicated environment management (see below) These extensions are optional dependencies, meaning the Python extension will remain fully functional if they fail to be installed. Any or all of these extensions can be [disabled](https://code.visualstudio.com/docs/editor/extension-marketplace#_disable-an-extension) or [uninstalled](https://code.visualstudio.com/docs/editor/extension-marketplace#_uninstall-an-extension) at the expense of some features. Extensions installed through the marketplace are subject to the [Marketplace Terms of Use](https://cdn.vsassets.io/v/M146_20190123.39/_content/Microsoft-Visual-Studio-Marketplace-Terms-of-Use.pdf). @@ -90,13 +90,13 @@ To see all available Python commands, open the Command Palette and type `Python` Learn more about the rich features of the Python extension: -- [IntelliSense](https://code.visualstudio.com/docs/python/editing#_autocomplete-and-intellisense): Edit your code with auto-completion, code navigation, syntax checking and more -- [Linting](https://code.visualstudio.com/docs/python/linting): Get additional code analysis with Pylint, Flake8 and more -- [Code formatting](https://code.visualstudio.com/docs/python/formatting): Format your code with black, autopep or yapf -- [Debugging](https://code.visualstudio.com/docs/python/debugging): Debug your Python scripts, web apps, remote or multi-threaded processes +- [IntelliSense](https://code.visualstudio.com/docs/python/editing#_autocomplete-and-intellisense): Edit your code with auto-completion, code navigation, syntax checking and more. +- [Linting](https://code.visualstudio.com/docs/python/linting): Get additional code analysis with Pylint, Flake8 and more. +- [Code formatting](https://code.visualstudio.com/docs/python/formatting): Format your code with black, autopep or yapf. +- [Debugging](https://code.visualstudio.com/docs/python/debugging): Debug your Python scripts, web apps, remote or multi-threaded processes. - [Testing](https://code.visualstudio.com/docs/python/unit-testing): Run and debug tests through the Test Explorer with unittest or pytest. -- [Jupyter Notebooks](https://code.visualstudio.com/docs/python/jupyter-support): Create and edit Jupyter Notebooks, add and run code cells, render plots, visualize variables through the variable explorer, visualize dataframes with the data viewer, and more -- [Environments](https://code.visualstudio.com/docs/python/environments): Automatically activate and switch between virtualenv, venv, pipenv, conda and pyenv environments +- [Jupyter Notebooks](https://code.visualstudio.com/docs/python/jupyter-support): Create and edit Jupyter Notebooks, add and run code cells, render plots, visualize variables through the variable explorer, visualize dataframes with the data viewer, and more. +- [Environments](https://code.visualstudio.com/docs/python/environments): Automatically activate and switch between virtualenv, venv, pipenv, conda and pyenv environments. - [Refactoring](https://code.visualstudio.com/docs/python/editing#_refactoring): Restructure your Python code with variable extraction and method extraction. Additionally, there is componentized support to enable additional refactoring, such as import sorting, through extensions including [isort](https://marketplace.visualstudio.com/items?itemName=ms-python.isort) and [Ruff](https://marketplace.visualstudio.com/items?itemName=charliermarsh.ruff). @@ -121,6 +121,6 @@ The Microsoft Python Extension for Visual Studio Code collects usage data and sends it to Microsoft to help improve our products and services. Read our [privacy statement](https://privacy.microsoft.com/privacystatement) to -learn more. This extension respects the `telemetry.enableTelemetry` +learn more. This extension respects the `telemetry.telemetryLevel` setting which you can learn more about at https://code.visualstudio.com/docs/supporting/faq#_how-to-disable-telemetry-reporting. diff --git a/build/azure-pipeline.stable.yml b/build/azure-pipeline.stable.yml index ce67c69a3df4..237ba08dbc99 100644 --- a/build/azure-pipeline.stable.yml +++ b/build/azure-pipeline.stable.yml @@ -128,7 +128,7 @@ extends: project: 'Monaco' definition: 593 buildVersionToDownload: 'latestFromBranch' - branchName: 'refs/heads/release/2025.12' + branchName: 'refs/heads/release/2026.0' targetPath: '$(Build.SourcesDirectory)/python-env-tools/bin' artifactName: 'bin-$(buildTarget)' itemPattern: | diff --git a/build/azure-pipelines/pipeline.yml b/build/azure-pipelines/pipeline.yml index 46302aa6ff90..0796e38ca598 100644 --- a/build/azure-pipelines/pipeline.yml +++ b/build/azure-pipelines/pipeline.yml @@ -37,13 +37,13 @@ extends: testPlatforms: - name: Linux nodeVersions: - - 22.17.0 + - 22.21.1 - name: MacOS nodeVersions: - - 22.17.0 + - 22.21.1 - name: Windows nodeVersions: - - 22.17.0 + - 22.21.1 testSteps: - template: /build/azure-pipelines/templates/test-steps.yml@self parameters: diff --git a/package-lock.json b/package-lock.json index 9a52111abc5c..863c2a720678 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,15 +1,15 @@ { "name": "python", - "version": "2025.13.0-dev", + "version": "2026.1.0-dev", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "python", - "version": "2025.13.0-dev", + "version": "2026.1.0-dev", "license": "MIT", "dependencies": { - "@iarna/toml": "^2.2.5", + "@iarna/toml": "^3.0.0", "@vscode/extension-telemetry": "^0.8.4", "arch": "^2.1.0", "fs-extra": "^11.2.0", @@ -17,7 +17,7 @@ "iconv-lite": "^0.6.3", "inversify": "^6.0.2", "jsonc-parser": "^3.0.0", - "lodash": "^4.17.21", + "lodash": "^4.17.23", "minimatch": "^5.0.1", "named-js-regexp": "^1.3.3", "node-stream-zip": "^1.6.0", @@ -50,7 +50,7 @@ "@types/glob": "^7.2.0", "@types/lodash": "^4.14.104", "@types/mocha": "^9.1.0", - "@types/node": "^22.5.0", + "@types/node": "^22.19.1", "@types/semver": "^5.5.0", "@types/shortid": "^0.0.29", "@types/sinon": "^17.0.3", @@ -933,11 +933,10 @@ } }, "node_modules/@eslint/eslintrc/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, - "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, @@ -1086,9 +1085,10 @@ "license": "BSD-3-Clause" }, "node_modules/@iarna/toml": { - "version": "2.2.5", - "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", - "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-3.0.0.tgz", + "integrity": "sha512-td6ZUkz2oS3VeleBcN+m//Q6HlCFCPrnI0FZhrt/h4XqLEdOyYp2u21nd8MdsR+WJy5r9PTDaHTDDfhf4H4l6Q==", + "license": "ISC" }, "node_modules/@isaacs/cliui": { "version": "8.0.2", @@ -1894,12 +1894,13 @@ "dev": true }, "node_modules/@types/node": { - "version": "22.5.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.5.0.tgz", - "integrity": "sha512-DkFrJOe+rfdHTqqMg0bSNlGlQ85hSoh2TPzZyhHsXnMtligRWpxUySiyw8FY14ITt24HVCiQPWxS3KO/QlGmWg==", + "version": "22.19.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", + "integrity": "sha512-1N9SBnWYOJTrNZCdh/yJE+t910Y128BoyY+zBLWhL3r0TYzlTmFdXrPwHL9DyFZmlEXNQQolTZh3KHV31QDhyA==", "dev": true, + "license": "MIT", "dependencies": { - "undici-types": "~6.19.2" + "undici-types": "~6.21.0" } }, "node_modules/@types/semver": { @@ -6196,11 +6197,10 @@ } }, "node_modules/eslint/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, - "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, @@ -9162,9 +9162,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "dependencies": { "argparse": "^1.0.7", @@ -9264,23 +9264,23 @@ } }, "node_modules/jsonwebtoken/node_modules/jwa": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", - "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.2.tgz", + "integrity": "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==", "dev": true, "dependencies": { - "buffer-equal-constant-time": "1.0.1", + "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "node_modules/jsonwebtoken/node_modules/jws": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", - "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.3.tgz", + "integrity": "sha512-byiJ0FLRdLdSVSReO/U4E7RoEyOCKnEnEPMjq3HxWtvzLsV08/i5RQKsFVNkCldrCaPr2vDNAOMsfs8T/Hze7g==", "dev": true, "dependencies": { - "jwa": "^1.4.1", + "jwa": "^1.4.2", "safe-buffer": "^5.0.1" } }, @@ -9316,23 +9316,23 @@ "dev": true }, "node_modules/jwa": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", - "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", + "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", "dev": true, "dependencies": { - "buffer-equal-constant-time": "1.0.1", + "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "node_modules/jws": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", - "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz", + "integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==", "dev": true, "dependencies": { - "jwa": "^2.0.0", + "jwa": "^2.0.1", "safe-buffer": "^5.0.1" } }, @@ -9518,9 +9518,9 @@ } }, "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==" }, "node_modules/lodash.flattendeep": { "version": "4.4.0", @@ -10226,9 +10226,9 @@ } }, "node_modules/mocha/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "dependencies": { "argparse": "^2.0.1" @@ -10908,10 +10908,13 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -11803,12 +11806,12 @@ } }, "node_modules/qs": { - "version": "6.12.1", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz", - "integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==", + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", "dev": true, "dependencies": { - "side-channel": "^1.0.6" + "side-channel": "^1.1.0" }, "engines": { "node": ">=0.6" @@ -12444,18 +12447,45 @@ "dev": true }, "node_modules/sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "version": "2.4.12", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.12.tgz", + "integrity": "sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==", "dev": true, "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" + "inherits": "^2.0.4", + "safe-buffer": "^5.2.1", + "to-buffer": "^1.2.0" }, "bin": { "sha.js": "bin.js" + }, + "engines": { + "node": ">= 0.10" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/sha.js/node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/shallow-clone": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", @@ -12505,15 +12535,69 @@ } }, "node_modules/side-channel": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", - "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" }, "engines": { "node": ">= 0.4" @@ -13110,11 +13194,10 @@ } }, "node_modules/tar-fs": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.3.tgz", - "integrity": "sha512-090nwYJDmlhwFwEW3QQl+vaNnxsO2yVsd45eTKRBzSzu+hlb1w2K9inVq5b0ngXuLVqQ4ApvsUHHnu/zQNkWAg==", + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", + "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", "dev": true, - "license": "MIT", "optional": true, "dependencies": { "chownr": "^1.1.1", @@ -14008,10 +14091,11 @@ } }, "node_modules/undici-types": { - "version": "6.19.8", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", - "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", - "dev": true + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" }, "node_modules/unicode": { "version": "14.0.0", @@ -15819,9 +15903,9 @@ } }, "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "requires": { "argparse": "^2.0.1" @@ -15921,9 +16005,9 @@ "dev": true }, "@iarna/toml": { - "version": "2.2.5", - "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", - "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-3.0.0.tgz", + "integrity": "sha512-td6ZUkz2oS3VeleBcN+m//Q6HlCFCPrnI0FZhrt/h4XqLEdOyYp2u21nd8MdsR+WJy5r9PTDaHTDDfhf4H4l6Q==" }, "@isaacs/cliui": { "version": "8.0.2", @@ -16588,12 +16672,12 @@ "dev": true }, "@types/node": { - "version": "22.5.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.5.0.tgz", - "integrity": "sha512-DkFrJOe+rfdHTqqMg0bSNlGlQ85hSoh2TPzZyhHsXnMtligRWpxUySiyw8FY14ITt24HVCiQPWxS3KO/QlGmWg==", + "version": "22.19.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", + "integrity": "sha512-1N9SBnWYOJTrNZCdh/yJE+t910Y128BoyY+zBLWhL3r0TYzlTmFdXrPwHL9DyFZmlEXNQQolTZh3KHV31QDhyA==", "dev": true, "requires": { - "undici-types": "~6.19.2" + "undici-types": "~6.21.0" } }, "@types/semver": { @@ -19614,9 +19698,9 @@ "dev": true }, "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "requires": { "argparse": "^2.0.1" @@ -22012,9 +22096,9 @@ "dev": true }, "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "requires": { "argparse": "^1.0.7", @@ -22089,23 +22173,23 @@ }, "dependencies": { "jwa": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", - "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.2.tgz", + "integrity": "sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==", "dev": true, "requires": { - "buffer-equal-constant-time": "1.0.1", + "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "jws": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", - "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.3.tgz", + "integrity": "sha512-byiJ0FLRdLdSVSReO/U4E7RoEyOCKnEnEPMjq3HxWtvzLsV08/i5RQKsFVNkCldrCaPr2vDNAOMsfs8T/Hze7g==", "dev": true, "requires": { - "jwa": "^1.4.1", + "jwa": "^1.4.2", "safe-buffer": "^5.0.1" } } @@ -22140,23 +22224,23 @@ "dev": true }, "jwa": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", - "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", + "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", "dev": true, "requires": { - "buffer-equal-constant-time": "1.0.1", + "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "jws": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", - "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz", + "integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==", "dev": true, "requires": { - "jwa": "^2.0.0", + "jwa": "^2.0.1", "safe-buffer": "^5.0.1" } }, @@ -22309,9 +22393,9 @@ } }, "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==" }, "lodash.flattendeep": { "version": "4.4.0", @@ -22813,9 +22897,9 @@ "dev": true }, "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "requires": { "argparse": "^2.0.1" @@ -23371,9 +23455,9 @@ "dev": true }, "object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", "dev": true }, "object-is": { @@ -24045,12 +24129,12 @@ "dev": true }, "qs": { - "version": "6.12.1", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz", - "integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==", + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", "dev": true, "requires": { - "side-channel": "^1.0.6" + "side-channel": "^1.1.0" } }, "query-string": { @@ -24525,13 +24609,22 @@ "dev": true }, "sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "version": "2.4.12", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.12.tgz", + "integrity": "sha512-8LzC5+bvI45BjpfXU8V5fdU2mfeKiQe1D1gIMn7XUlF3OTUrpdJpPPH4EMAnF0DsHHdSZqCdSss5qCmJKuiO3w==", "dev": true, "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" + "inherits": "^2.0.4", + "safe-buffer": "^5.2.1", + "to-buffer": "^1.2.0" + }, + "dependencies": { + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true + } } }, "shallow-clone": { @@ -24573,15 +24666,51 @@ } }, "side-channel": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", - "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", "dev": true, "requires": { - "call-bind": "^1.0.7", "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + } + }, + "side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dev": true, + "requires": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + } + }, + "side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dev": true, + "requires": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + } + }, + "side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dev": true, + "requires": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" } }, "signal-exit": { @@ -25012,9 +25141,9 @@ "dev": true }, "tar-fs": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.3.tgz", - "integrity": "sha512-090nwYJDmlhwFwEW3QQl+vaNnxsO2yVsd45eTKRBzSzu+hlb1w2K9inVq5b0ngXuLVqQ4ApvsUHHnu/zQNkWAg==", + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", + "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", "dev": true, "optional": true, "requires": { @@ -25689,9 +25818,9 @@ "dev": true }, "undici-types": { - "version": "6.19.8", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", - "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", "dev": true }, "unicode": { diff --git a/package.json b/package.json index e07b3fc7983e..bc9131276c59 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "python", "displayName": "Python", "description": "Python language support with extension access points for IntelliSense (Pylance), Debugging (Python Debugger), linting, formatting, refactoring, unit tests, and more.", - "version": "2025.13.0-dev", + "version": "2026.1.0-dev", "featureFlags": { "usingNewInterpreterStorage": true }, @@ -1244,6 +1244,13 @@ "when": "controllerId == 'python-tests'" } ], + "testing/item/gutter": [ + { + "command": "python.copyTestId", + "group": "navigation", + "when": "controllerId == 'python-tests'" + } + ], "commandPalette": [ { "category": "Python", @@ -1502,7 +1509,7 @@ "name": "get_python_environment_details", "displayName": "Get Python Environment Info", "userDescription": "%python.languageModelTools.get_python_environment_details.userDescription%", - "modelDescription": "This tool will retrieve the details of the Python Environment for the specified file or workspace. The details returned include the 1. Type of Environment (conda, venv, etec), 2. Version of Python, 3. List of all installed packages with their versions. ALWAYS call configure_python_environment before using this tool.", + "modelDescription": "This tool will retrieve the details of the Python Environment for the specified file or workspace. The details returned include the 1. Type of Python Environment (conda, venv, etc), 2. Version of Python, 3. List of all installed Python packages with their versions. ALWAYS call configure_python_environment before using this tool. IMPORTANT: This tool is only for Python environments (venv, virtualenv, conda, pipenv, poetry, pyenv, pixi, or any other Python environment manager). Do not use this tool for npm packages, system packages, Ruby gems, or any other non-Python dependencies.", "toolReferenceName": "getPythonEnvironmentInfo", "tags": [ "python", @@ -1527,7 +1534,7 @@ "name": "get_python_executable_details", "displayName": "Get Python Executable", "userDescription": "%python.languageModelTools.get_python_executable_details.userDescription%", - "modelDescription": "This tool will retrieve the details of the Python Environment for the specified file or workspace. ALWAYS use this tool before executing any Python command in the terminal. This tool returns the details of how to construct the fully qualified path and or command including details such as arguments required to run Python in a terminal. Note: Instead of executing `python --version` or `python -c 'import sys; print(sys.executable)'`, use this tool to get the Python executable path to replace the `python` command. E.g. instead of using `python -c 'import sys; print(sys.executable)'`, use this tool to build the command `conda run -n -c 'import sys; print(sys.executable)'`. ALWAYS call configure_python_environment before using this tool.", + "modelDescription": "This tool will retrieve the details of the Python Environment for the specified file or workspace. ALWAYS use this tool before executing any Python command in the terminal. This tool returns the details of how to construct the fully qualified path and or command including details such as arguments required to run Python in a terminal. Note: Instead of executing `python --version` or `python -c 'import sys; print(sys.executable)'`, use this tool to get the Python executable path to replace the `python` command. E.g. instead of using `python -c 'import sys; print(sys.executable)'`, use this tool to build the command `conda run -n -c 'import sys; print(sys.executable)'`. ALWAYS call configure_python_environment before using this tool. IMPORTANT: This tool is only for Python environments (venv, virtualenv, conda, pipenv, poetry, pyenv, pixi, or any other Python environment manager). Do not use this tool for npm packages, system packages, Ruby gems, or any other non-Python dependencies.", "toolReferenceName": "getPythonExecutableCommand", "tags": [ "python", @@ -1552,7 +1559,7 @@ "name": "install_python_packages", "displayName": "Install Python Package", "userDescription": "%python.languageModelTools.install_python_packages.userDescription%", - "modelDescription": "Installs Python packages in the given workspace. Use this tool to install packages in the user's chosen environment. ALWAYS call configure_python_environment before using this tool.", + "modelDescription": "Installs Python packages in the given workspace. Use this tool to install Python packages in the user's chosen Python environment. ALWAYS call configure_python_environment before using this tool. IMPORTANT: This tool should only be used to install Python packages using package managers like pip or conda (works with any Python environment: venv, virtualenv, pipenv, poetry, pyenv, pixi, conda, etc.). Do not use this tool to install npm packages, system packages (apt/brew/yum), Ruby gems, or any other non-Python dependencies.", "toolReferenceName": "installPythonPackage", "tags": [ "python", @@ -1571,7 +1578,7 @@ "items": { "type": "string" }, - "description": "The list of packages to install." + "description": "The list of Python packages to install." }, "resourcePath": { "type": "string", @@ -1586,7 +1593,7 @@ { "name": "configure_python_environment", "displayName": "Configure Python Environment", - "modelDescription": "This tool configures a Python environment in the given workspace. ALWAYS Use this tool to set up the user's chosen environment and ALWAYS call this tool before using any other Python related tools or running any Python command in the terminal.", + "modelDescription": "This tool configures a Python environment in the given workspace. ALWAYS Use this tool to set up the user's chosen environment and ALWAYS call this tool before using any other Python related tools or running any Python command in the terminal. IMPORTANT: This tool is only for Python environments (venv, virtualenv, conda, pipenv, poetry, pyenv, pixi, or any other Python environment manager). Do not use this tool for npm packages, system packages, Ruby gems, or any other non-Python dependencies.", "userDescription": "%python.languageModelTools.configure_python_environment.userDescription%", "toolReferenceName": "configurePythonEnvironment", "tags": [ @@ -1688,6 +1695,9 @@ "lint-fix": "eslint --fix src build pythonExtensionApi gulpfile.js", "format-check": "prettier --check 'src/**/*.ts' 'build/**/*.js' '.github/**/*.yml' gulpfile.js", "format-fix": "prettier --write 'src/**/*.ts' 'build/**/*.js' '.github/**/*.yml' gulpfile.js", + "check-python": "npm run check-python:ruff && npm run check-python:pyright", + "check-python:ruff": "cd python_files && python -m pip install -U ruff && python -m ruff check . && python -m ruff format --check", + "check-python:pyright": "cd python_files && npx --yes pyright@1.1.308 .", "clean": "gulp clean", "addExtensionPackDependencies": "gulp addExtensionPackDependencies", "updateBuildNumber": "gulp updateBuildNumber", @@ -1695,7 +1705,7 @@ "webpack": "webpack" }, "dependencies": { - "@iarna/toml": "^2.2.5", + "@iarna/toml": "^3.0.0", "@vscode/extension-telemetry": "^0.8.4", "arch": "^2.1.0", "fs-extra": "^11.2.0", @@ -1703,7 +1713,7 @@ "iconv-lite": "^0.6.3", "inversify": "^6.0.2", "jsonc-parser": "^3.0.0", - "lodash": "^4.17.21", + "lodash": "^4.17.23", "minimatch": "^5.0.1", "named-js-regexp": "^1.3.3", "node-stream-zip": "^1.6.0", @@ -1736,7 +1746,7 @@ "@types/glob": "^7.2.0", "@types/lodash": "^4.14.104", "@types/mocha": "^9.1.0", - "@types/node": "^22.5.0", + "@types/node": "^22.19.1", "@types/semver": "^5.5.0", "@types/shortid": "^0.0.29", "@types/sinon": "^17.0.3", diff --git a/pythonExtensionApi/package.json b/pythonExtensionApi/package.json index e4e956ff6065..11e0445aa8da 100644 --- a/pythonExtensionApi/package.json +++ b/pythonExtensionApi/package.json @@ -13,7 +13,7 @@ "main": "./out/main.js", "types": "./out/main.d.ts", "engines": { - "node": ">=22.17.0", + "node": ">=22.21.1", "vscode": "^1.93.0" }, "license": "MIT", diff --git a/python_files/jedilsp_requirements/requirements.txt b/python_files/jedilsp_requirements/requirements.txt index 0fc5cd76810f..e2599e7bbce4 100644 --- a/python_files/jedilsp_requirements/requirements.txt +++ b/python_files/jedilsp_requirements/requirements.txt @@ -6,32 +6,32 @@ attrs==25.3.0 \ # via # cattrs # lsprotocol -cattrs==24.1.3 \ - --hash=sha256:981a6ef05875b5bb0c7fb68885546186d306f10f0f6718fe9b96c226e68821ff \ - --hash=sha256:adf957dddd26840f27ffbd060a6c4dd3b2192c5b7c2c0525ef1bd8131d8a83f5 +cattrs==25.2.0 \ + --hash=sha256:539d7eedee7d2f0706e4e109182ad096d608ba84633c32c75ef3458f1d11e8f1 \ + --hash=sha256:f46c918e955db0177be6aa559068390f71988e877c603ae2e56c71827165cc06 # via # jedi-language-server # lsprotocol # pygls -docstring-to-markdown==0.16 \ - --hash=sha256:097bf502fdf040b0d019688a7cc1abb89b98196801448721740e8aa3e5075627 \ - --hash=sha256:f92cc42357b2c932f70ca2ebc79f7805039a34011ad381c1b6ac3481e81596ce +docstring-to-markdown==0.17 \ + --hash=sha256:df72a112294c7492487c9da2451cae0faeee06e86008245c188c5761c9590ca3 \ + --hash=sha256:fd7d5094aa83943bf5f9e1a13701866b7c452eac19765380dead666e36d3711c # via jedi-language-server -exceptiongroup==1.2.2 \ - --hash=sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b \ - --hash=sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc +exceptiongroup==1.3.0 \ + --hash=sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10 \ + --hash=sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88 # via cattrs -importlib-metadata==8.6.1 \ - --hash=sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e \ - --hash=sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580 +importlib-metadata==8.7.0 \ + --hash=sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000 \ + --hash=sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd # via docstring-to-markdown jedi==0.19.2 \ --hash=sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0 \ --hash=sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9 # via jedi-language-server -jedi-language-server==0.45.0 \ - --hash=sha256:b44eb380f87c37935b91e4399f048dc935eb7d85829130fdbcecfdad61e1362b \ - --hash=sha256:f9ffd662877324ff28720c770197514184801b049a2d2c43190a7708b061f397 +jedi-language-server==0.45.1 \ + --hash=sha256:8c0c6b4eaeffdbb87be79e9897c9929ffeddf875dff7c1c36dd67768e294942b \ + --hash=sha256:a1fcfba8008f2640e921937fcf1933c3961d74249341eba8b3ef9a0c3f817102 # via -r python_files/jedilsp_requirements/requirements.in lsprotocol==2023.0.1 \ --hash=sha256:c75223c9e4af2f24272b14c6375787438279369236cd568f596d4951052a60f2 \ @@ -39,9 +39,9 @@ lsprotocol==2023.0.1 \ # via # jedi-language-server # pygls -parso==0.8.4 \ - --hash=sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18 \ - --hash=sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d +parso==0.8.5 \ + --hash=sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a \ + --hash=sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887 # via jedi pygls==1.3.1 \ --hash=sha256:140edceefa0da0e9b3c533547c892a42a7d2fd9217ae848c330c53d266a55018 \ @@ -49,14 +49,15 @@ pygls==1.3.1 \ # via # -r python_files/jedilsp_requirements/requirements.in # jedi-language-server -typing-extensions==4.13.2 \ - --hash=sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c \ - --hash=sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 # via # cattrs # docstring-to-markdown + # exceptiongroup # jedi-language-server -zipp==3.21.0 \ - --hash=sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4 \ - --hash=sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931 +zipp==3.23.0 \ + --hash=sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e \ + --hash=sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166 # via importlib-metadata diff --git a/python_files/python_server.py b/python_files/python_server.py index 1689d9b8f7f9..e7ee92794a21 100644 --- a/python_files/python_server.py +++ b/python_files/python_server.py @@ -5,6 +5,7 @@ import sys import traceback import uuid +from pathlib import Path from typing import Dict, List, Optional, Union STDIN = sys.stdin @@ -63,6 +64,9 @@ def custom_input(prompt=""): message_text = STDIN.buffer.read(content_length).decode() message_json = json.loads(message_text) return message_json["result"]["userInput"] + except EOFError: + # Input stream closed, exit gracefully + sys.exit(0) except Exception: print_log(traceback.format_exc()) @@ -73,7 +77,7 @@ def custom_input(prompt=""): def handle_response(request_id): - while not STDIN.closed: + while True: try: headers = get_headers() # Content-Length is the data size in bytes. @@ -87,8 +91,10 @@ def handle_response(request_id): send_response(our_user_input, message_json["id"]) elif message_json["method"] == "exit": sys.exit(0) - - except Exception: # noqa: PERF203 + except EOFError: # noqa: PERF203 + # Input stream closed, exit gracefully + sys.exit(0) + except Exception: print_log(traceback.format_exc()) @@ -163,7 +169,11 @@ def get_value(self) -> str: def get_headers(): headers = {} while True: - line = STDIN.buffer.readline().decode().strip() + raw = STDIN.buffer.readline() + # Detect EOF: readline() returns empty bytes when input stream is closed + if raw == b"": + raise EOFError("EOF reached while reading headers") + line = raw.decode().strip() if not line: break name, value = line.split(":", 1) @@ -172,7 +182,17 @@ def get_headers(): if __name__ == "__main__": - while not STDIN.closed: + # https://docs.python.org/3/tutorial/modules.html#the-module-search-path + # The directory containing the input script (or the current directory when no file is specified). + # Here we emulate the same behavior like no file is specified. + input_script_dir = Path(__file__).parent + script_dir_str = str(input_script_dir) + if script_dir_str in sys.path: + sys.path.remove(script_dir_str) + while "" in sys.path: + sys.path.remove("") + sys.path.insert(0, "") + while True: try: headers = get_headers() # Content-Length is the data size in bytes. @@ -187,6 +207,8 @@ def get_headers(): check_valid_command(request_json) elif request_json["method"] == "exit": sys.exit(0) - - except Exception: # noqa: PERF203 + except EOFError: # noqa: PERF203 + # Input stream closed (VS Code terminated), exit gracefully + sys.exit(0) + except Exception: print_log(traceback.format_exc()) diff --git a/python_files/pythonrc.py b/python_files/pythonrc.py index 005b06bcdd15..3042ffb7a309 100644 --- a/python_files/pythonrc.py +++ b/python_files/pythonrc.py @@ -52,7 +52,9 @@ def __str__(self): result = "" # For non-windows allow recent_command history. if sys.platform != "win32": - result = "{command_executed}{command_line}{command_finished}{prompt_started}{prompt}{command_start}".format( + result = "{soh}{command_executed}{command_line}{command_finished}{prompt_started}{stx}{prompt}{soh}{command_start}{stx}".format( + soh="\001", + stx="\002", command_executed="\x1b]633;C\x07", command_line="\x1b]633;E;" + str(get_last_command()) + "\x07", command_finished="\x1b]633;D;" + str(exit_code) + "\x07", @@ -73,6 +75,9 @@ def __str__(self): return result + def __repr__(self): + return "" + if sys.platform != "win32" and (not is_wsl): sys.ps1 = PS1() diff --git a/python_files/tests/pytestadapter/expected_discovery_test_output.py b/python_files/tests/pytestadapter/expected_discovery_test_output.py index e00db5d660a3..b6f0779cf982 100644 --- a/python_files/tests/pytestadapter/expected_discovery_test_output.py +++ b/python_files/tests/pytestadapter/expected_discovery_test_output.py @@ -1,6 +1,11 @@ import os -from .helpers import TEST_DATA_PATH, find_test_line_number, get_absolute_test_id +from .helpers import ( + TEST_DATA_PATH, + find_class_line_number, + find_test_line_number, + get_absolute_test_id, +) # This file contains the expected output dictionaries for tests discovery and is used in test_discovery.py. @@ -95,6 +100,7 @@ "unittest_pytest_same_file.py::TestExample", unit_pytest_same_file_path, ), + "lineno": find_class_line_number("TestExample", unit_pytest_same_file_path), }, { "name": "test_true_pytest", @@ -207,6 +213,7 @@ "unittest_folder/test_add.py::TestAddFunction", test_add_path, ), + "lineno": find_class_line_number("TestAddFunction", test_add_path), }, { "name": "TestDuplicateFunction", @@ -235,6 +242,9 @@ "unittest_folder/test_add.py::TestDuplicateFunction", test_add_path, ), + "lineno": find_class_line_number( + "TestDuplicateFunction", test_add_path + ), }, ], }, @@ -288,6 +298,9 @@ "unittest_folder/test_subtract.py::TestSubtractFunction", test_subtract_path, ), + "lineno": find_class_line_number( + "TestSubtractFunction", test_subtract_path + ), }, { "name": "TestDuplicateFunction", @@ -316,6 +329,9 @@ "unittest_folder/test_subtract.py::TestDuplicateFunction", test_subtract_path, ), + "lineno": find_class_line_number( + "TestDuplicateFunction", test_subtract_path + ), }, ], }, @@ -553,6 +569,7 @@ "parametrize_tests.py::TestClass", parameterize_tests_path, ), + "lineno": find_class_line_number("TestClass", parameterize_tests_path), "children": [ { "name": "test_adding", @@ -929,6 +946,7 @@ "test_multi_class_nest.py::TestFirstClass", TEST_MULTI_CLASS_NEST_PATH, ), + "lineno": find_class_line_number("TestFirstClass", TEST_MULTI_CLASS_NEST_PATH), "children": [ { "name": "TestSecondClass", @@ -938,6 +956,9 @@ "test_multi_class_nest.py::TestFirstClass::TestSecondClass", TEST_MULTI_CLASS_NEST_PATH, ), + "lineno": find_class_line_number( + "TestSecondClass", TEST_MULTI_CLASS_NEST_PATH + ), "children": [ { "name": "test_second", @@ -982,6 +1003,9 @@ "test_multi_class_nest.py::TestFirstClass::TestSecondClass2", TEST_MULTI_CLASS_NEST_PATH, ), + "lineno": find_class_line_number( + "TestSecondClass2", TEST_MULTI_CLASS_NEST_PATH + ), "children": [ { "name": "test_second2", @@ -1227,6 +1251,9 @@ "same_function_new_class_param.py::TestNotEmpty", TEST_DATA_PATH / "same_function_new_class_param.py", ), + "lineno": find_class_line_number( + "TestNotEmpty", TEST_DATA_PATH / "same_function_new_class_param.py" + ), }, { "name": "TestEmpty", @@ -1298,6 +1325,9 @@ "same_function_new_class_param.py::TestEmpty", TEST_DATA_PATH / "same_function_new_class_param.py", ), + "lineno": find_class_line_number( + "TestEmpty", TEST_DATA_PATH / "same_function_new_class_param.py" + ), }, ], } @@ -1371,6 +1401,9 @@ "test_param_span_class.py::TestClass1", TEST_DATA_PATH / "test_param_span_class.py", ), + "lineno": find_class_line_number( + "TestClass1", TEST_DATA_PATH / "test_param_span_class.py" + ), }, { "name": "TestClass2", @@ -1427,6 +1460,9 @@ "test_param_span_class.py::TestClass2", TEST_DATA_PATH / "test_param_span_class.py", ), + "lineno": find_class_line_number( + "TestClass2", TEST_DATA_PATH / "test_param_span_class.py" + ), }, ], } @@ -1503,6 +1539,7 @@ "pytest_describe_plugin/describe_only.py::describe_A", describe_only_path, ), + "lineno": find_class_line_number("describe_A", describe_only_path), } ], } @@ -1586,6 +1623,9 @@ "pytest_describe_plugin/nested_describe.py::describe_list::describe_append", nested_describe_path, ), + "lineno": find_class_line_number( + "describe_append", nested_describe_path + ), }, { "name": "describe_remove", @@ -1614,12 +1654,16 @@ "pytest_describe_plugin/nested_describe.py::describe_list::describe_remove", nested_describe_path, ), + "lineno": find_class_line_number( + "describe_remove", nested_describe_path + ), }, ], "id_": get_absolute_test_id( "pytest_describe_plugin/nested_describe.py::describe_list", nested_describe_path, ), + "lineno": find_class_line_number("describe_list", nested_describe_path), } ], } diff --git a/python_files/tests/pytestadapter/helpers.py b/python_files/tests/pytestadapter/helpers.py index 4c337585bece..03f1187149df 100644 --- a/python_files/tests/pytestadapter/helpers.py +++ b/python_files/tests/pytestadapter/helpers.py @@ -264,17 +264,34 @@ def runner_with_cwd_env( pipe_name = generate_random_pipe_name("pytest-discovery-test") if "COVERAGE_ENABLED" in env_add and "_TEST_VAR_UNITTEST" not in env_add: - process_args = [ - sys.executable, - "-m", - "pytest", - "-p", - "vscode_pytest", - "--cov=.", - "--cov-branch", - "-s", - *args, - ] + if "_PYTEST_MANUAL_PLUGIN_LOAD" in env_add: + # Test manual plugin loading scenario for issue #25590 + process_args = [ + sys.executable, + "-m", + "pytest", + "--disable-plugin-autoload", + "-p", + "pytest_cov.plugin", + "-p", + "vscode_pytest", + "--cov=.", + "--cov-branch", + "-s", + *args, + ] + else: + process_args = [ + sys.executable, + "-m", + "pytest", + "-p", + "vscode_pytest", + "--cov=.", + "--cov-branch", + "-s", + *args, + ] # Generate pipe name, pipe name specific per OS type. @@ -370,6 +387,28 @@ def find_test_line_number(test_name: str, test_file_path) -> str: raise ValueError(error_str) +def find_class_line_number(class_name: str, test_file_path) -> str: + """Function which finds the correct line number for a class definition. + + Args: + class_name: The name of the class to find the line number for. + test_file_path: The path to the test file where the class is located. + """ + # Look for the class definition line (or function for pytest-describe) + with open(test_file_path) as f: # noqa: PTH123 + for i, line in enumerate(f): + # Match "class ClassName" or "class ClassName(" or "class ClassName:" + # Also match "def ClassName(" for pytest-describe blocks + if ( + line.strip().startswith(f"class {class_name}") + or line.strip().startswith(f"class {class_name}(") + or line.strip().startswith(f"def {class_name}(") + ): + return str(i + 1) + error_str: str = f"Class {class_name!r} not found on any line in {test_file_path}" + raise ValueError(error_str) + + def get_absolute_test_id(test_id: str, test_path: pathlib.Path) -> str: """Get the absolute test id by joining the testPath with the test_id.""" split_id = test_id.split("::")[1:] diff --git a/python_files/tests/pytestadapter/test_coverage.py b/python_files/tests/pytestadapter/test_coverage.py index d2d276172a8d..f2387527698f 100644 --- a/python_files/tests/pytestadapter/test_coverage.py +++ b/python_files/tests/pytestadapter/test_coverage.py @@ -142,3 +142,23 @@ def test_coverage_w_omit_config(): assert results # assert one file is reported and one file (as specified in pyproject.toml) is omitted assert len(results) == 1 + + +def test_pytest_cov_manual_plugin_loading(): + """ + Test that pytest-cov is detected when loaded manually via -p pytest_cov.plugin. + + This test verifies the fix for issue #25590, where pytest-cov detection failed + when using --disable-plugin-autoload with -p pytest_cov.plugin. The plugin is + registered under its module name (pytest_cov.plugin) instead of entry point name + (pytest_cov) in this scenario. + """ + args = ["--collect-only"] + env_add = {"COVERAGE_ENABLED": "True", "_PYTEST_MANUAL_PLUGIN_LOAD": "True"} + cov_folder_path = TEST_DATA_PATH / "coverage_gen" + + # Should NOT raise VSCodePytestError about pytest-cov not being installed + actual = runner_with_cwd_env(args, cov_folder_path, env_add) + assert actual is not None + # Verify discovery succeeded (status != "error") + assert actual[0].get("status") != "error" diff --git a/python_files/tests/pytestadapter/test_utils.py b/python_files/tests/pytestadapter/test_utils.py index ef0ed2daf4e9..70201db7d097 100644 --- a/python_files/tests/pytestadapter/test_utils.py +++ b/python_files/tests/pytestadapter/test_utils.py @@ -12,7 +12,7 @@ script_dir = pathlib.Path(__file__).parent.parent.parent sys.path.append(os.fspath(script_dir)) -from vscode_pytest import has_symlink_parent # noqa: E402 +from vscode_pytest import cached_fsdecode, has_symlink_parent # noqa: E402 def test_has_symlink_parent_with_symlink(): @@ -33,3 +33,25 @@ def test_has_symlink_parent_without_symlink(): folder_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" # Check that has_symlink_parent correctly identifies that there are no symbolic links assert not has_symlink_parent(folder_path) + + +def test_cached_fsdecode(): + """Test that cached_fsdecode correctly caches path-to-string conversions.""" + # Create a test path + test_path = TEST_DATA_PATH / "simple_pytest.py" + + # First call should compute and cache + result1 = cached_fsdecode(test_path) + assert result1 == os.fspath(test_path) + assert isinstance(result1, str) + + # Second call should return cached value (same object) + result2 = cached_fsdecode(test_path) + assert result2 == result1 + assert result2 is result1 # Should be the same object from cache + + # Different path should be cached independently + test_path2 = TEST_DATA_PATH / "parametrize_tests.py" + result3 = cached_fsdecode(test_path2) + assert result3 == os.fspath(test_path2) + assert result3 != result1 diff --git a/python_files/tests/test_python_server.py b/python_files/tests/test_python_server.py new file mode 100644 index 000000000000..ca542b8ea292 --- /dev/null +++ b/python_files/tests/test_python_server.py @@ -0,0 +1,162 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +"""Tests for python_server.py, specifically EOF handling to prevent infinite loops.""" + +import io +from unittest import mock + +import pytest + + +class TestGetHeaders: + """Tests for the get_headers function.""" + + def test_get_headers_normal(self): + """Test get_headers with valid headers.""" + # Arrange: Import the module + import python_server + + # Create a mock stdin with valid headers + mock_input = b"Content-Length: 100\r\nContent-Type: application/json\r\n\r\n" + mock_stdin = io.BytesIO(mock_input) + + # Act + with mock.patch.object(python_server, "STDIN", mock.Mock(buffer=mock_stdin)): + headers = python_server.get_headers() + + # Assert + assert headers == {"Content-Length": "100", "Content-Type": "application/json"} + + def test_get_headers_eof_raises_error(self): + """Test that get_headers raises EOFError when stdin is closed (EOF).""" + # Arrange: Import the module + import python_server + + # Create a mock stdin that returns empty bytes (EOF) + mock_stdin = io.BytesIO(b"") + + # Act & Assert + with mock.patch.object(python_server, "STDIN", mock.Mock(buffer=mock_stdin)), pytest.raises( + EOFError, match="EOF reached while reading headers" + ): + python_server.get_headers() + + def test_get_headers_eof_mid_headers_raises_error(self): + """Test that get_headers raises EOFError when EOF occurs mid-headers.""" + # Arrange: Import the module + import python_server + + # Create a mock stdin with partial headers then EOF + mock_input = b"Content-Length: 100\r\n" # No terminating empty line + mock_stdin = io.BytesIO(mock_input) + + # Act & Assert + with mock.patch.object(python_server, "STDIN", mock.Mock(buffer=mock_stdin)), pytest.raises( + EOFError, match="EOF reached while reading headers" + ): + python_server.get_headers() + + def test_get_headers_empty_line_terminates(self): + """Test that an empty line (not EOF) properly terminates header reading.""" + # Arrange: Import the module + import python_server + + # Create a mock stdin with headers followed by empty line + mock_input = b"Content-Length: 50\r\n\r\nsome body content" + mock_stdin = io.BytesIO(mock_input) + + # Act + with mock.patch.object(python_server, "STDIN", mock.Mock(buffer=mock_stdin)): + headers = python_server.get_headers() + + # Assert + assert headers == {"Content-Length": "50"} + + +class TestEOFHandling: + """Tests for EOF handling in various functions that use get_headers.""" + + def test_custom_input_exits_on_eof(self): + """Test that custom_input exits gracefully on EOF.""" + # Arrange: Import the module + import python_server + + # Create a mock stdin that returns empty bytes (EOF) + mock_stdin = io.BytesIO(b"") + mock_stdout = io.BytesIO() + + # Act & Assert + with mock.patch.object( + python_server, "STDIN", mock.Mock(buffer=mock_stdin) + ), mock.patch.object(python_server, "STDOUT", mock.Mock(buffer=mock_stdout)), pytest.raises( + SystemExit + ) as exc_info: + python_server.custom_input("prompt> ") + + # Should exit with code 0 (graceful exit) + assert exc_info.value.code == 0 + + def test_handle_response_exits_on_eof(self): + """Test that handle_response exits gracefully on EOF.""" + # Arrange: Import the module + import python_server + + # Create a mock stdin that returns empty bytes (EOF) + mock_stdin = io.BytesIO(b"") + + # Act & Assert + with mock.patch.object(python_server, "STDIN", mock.Mock(buffer=mock_stdin)), pytest.raises( + SystemExit + ) as exc_info: + python_server.handle_response("test-request-id") + + # Should exit with code 0 (graceful exit) + assert exc_info.value.code == 0 + + +class TestMainLoopEOFHandling: + """Tests that simulate the main loop EOF scenario.""" + + def test_main_loop_exits_on_eof(self): + """Test that the main loop pattern exits gracefully on EOF. + + This test verifies the fix for GitHub issue #25620 where the server + would spin at 100% CPU instead of exiting when VS Code closes. + """ + # Arrange: Import the module + import python_server + + # Create a mock stdin that returns empty bytes (EOF) + mock_stdin = io.BytesIO(b"") + + # Simulate what happens in the main loop + with mock.patch.object(python_server, "STDIN", mock.Mock(buffer=mock_stdin)): + try: + python_server.get_headers() + # If we get here without raising EOFError, the fix isn't working + pytest.fail("Expected EOFError to be raised on EOF") + except EOFError: + # This is the expected behavior - the fix is working + pass + + def test_readline_eof_vs_empty_line(self): + """Test that we correctly distinguish between EOF and empty line. + + EOF: readline() returns b'' (empty bytes) + Empty line: readline() returns b'\\r\\n' or b'\\n' (newline bytes) + """ + # Test EOF case + eof_stream = io.BytesIO(b"") + result = eof_stream.readline() + assert result == b"", "EOF should return empty bytes" + + # Test empty line case + empty_line_stream = io.BytesIO(b"\r\n") + result = empty_line_stream.readline() + assert result == b"\r\n", "Empty line should return newline bytes" + + # Test empty line with just newline + empty_line_stream2 = io.BytesIO(b"\n") + result = empty_line_stream2.readline() + assert result == b"\n", "Empty line should return newline bytes" diff --git a/python_files/tests/test_shell_integration.py b/python_files/tests/test_shell_integration.py index 574edfc056b4..7503a725b6d1 100644 --- a/python_files/tests/test_shell_integration.py +++ b/python_files/tests/test_shell_integration.py @@ -17,7 +17,7 @@ def test_decoration_success(): if sys.platform != "win32" and (not is_wsl): assert ( result - == "\x1b]633;C\x07\x1b]633;E;None\x07\x1b]633;D;0\x07\x1b]633;A\x07>>> \x1b]633;B\x07" + == "\x01\x1b]633;C\x07\x1b]633;E;None\x07\x1b]633;D;0\x07\x1b]633;A\x07\x02>>> \x01\x1b]633;B\x07\x02" ) else: pass @@ -32,7 +32,7 @@ def test_decoration_failure(): if sys.platform != "win32" and (not is_wsl): assert ( result - == "\x1b]633;C\x07\x1b]633;E;None\x07\x1b]633;D;1\x07\x1b]633;A\x07>>> \x1b]633;B\x07" + == "\x01\x1b]633;C\x07\x1b]633;E;None\x07\x1b]633;D;1\x07\x1b]633;A\x07\x02>>> \x01\x1b]633;B\x07\x02" ) else: pass diff --git a/python_files/tests/unittestadapter/.data/doctest_patched_module.py b/python_files/tests/unittestadapter/.data/doctest_patched_module.py new file mode 100644 index 000000000000..636c5320b6d6 --- /dev/null +++ b/python_files/tests/unittestadapter/.data/doctest_patched_module.py @@ -0,0 +1,17 @@ +""" +Patched doctest module. +This module's doctests will be patched to have proper IDs. + +>>> 2 + 2 +4 +""" + + +def example_function(): + """ + Example function with doctest. + + >>> example_function() + 'works' + """ + return "works" diff --git a/python_files/tests/unittestadapter/.data/doctest_standard.py b/python_files/tests/unittestadapter/.data/doctest_standard.py new file mode 100644 index 000000000000..52a10aa46a7f --- /dev/null +++ b/python_files/tests/unittestadapter/.data/doctest_standard.py @@ -0,0 +1,7 @@ +""" +Standard doctest module that should be blocked. +This has a simple doctest with short ID. + +>>> 2 + 2 +4 +""" diff --git a/python_files/tests/unittestadapter/.data/test_doctest_patched.py b/python_files/tests/unittestadapter/.data/test_doctest_patched.py new file mode 100644 index 000000000000..3a719c7139ca --- /dev/null +++ b/python_files/tests/unittestadapter/.data/test_doctest_patched.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +"""Test file with patched doctest integration that should work.""" + +import unittest +import doctest +import sys +import doctest_patched_module + + +# Patch DocTestCase to modify test IDs to be compatible with the extension +original_init = doctest.DocTestCase.__init__ + + +def patched_init(self, test, optionflags=0, setUp=None, tearDown=None, checker=None): + """Patch to modify doctest names to have proper hierarchy.""" + if hasattr(test, 'name'): + # Get module name + module_hierarchy = test.name.split('.') + module_name = module_hierarchy[0] if module_hierarchy else 'unknown' + + # Reconstruct with proper formatting to have enough components + # Format: module.file.class.function + if test.filename.endswith('.py'): + file_base = test.filename.split('/')[-1].replace('.py', '') + test_name = test.name.split('.')[-1] if '.' in test.name else test.name + # Create a properly formatted ID with enough components + test.name = f"{module_name}.{file_base}._DocTests.{test_name}" + + # Call original init + original_init(self, test, optionflags, setUp, tearDown, checker) + + +# Apply the patch +doctest.DocTestCase.__init__ = patched_init + + +def load_tests(loader, tests, ignore): + """ + Standard hook for unittest to load tests. + This uses patched doctest to create compatible test IDs. + """ + tests.addTests(doctest.DocTestSuite(doctest_patched_module)) + return tests + + +# Clean up the patch after loading +def tearDownModule(): + """Restore original DocTestCase.__init__""" + doctest.DocTestCase.__init__ = original_init diff --git a/python_files/tests/unittestadapter/.data/test_doctest_standard.py b/python_files/tests/unittestadapter/.data/test_doctest_standard.py new file mode 100644 index 000000000000..f5dba1209b98 --- /dev/null +++ b/python_files/tests/unittestadapter/.data/test_doctest_standard.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +"""Test file with standard doctest integration that should be blocked.""" + +import unittest +import doctest +import doctest_standard + + +def load_tests(loader, tests, ignore): + """ + Standard hook for unittest to load tests. + This uses standard doctest without any patching. + """ + tests.addTests(doctest.DocTestSuite(doctest_standard)) + return tests diff --git a/python_files/tests/unittestadapter/expected_discovery_test_output.py b/python_files/tests/unittestadapter/expected_discovery_test_output.py index 9de0eff8238c..0901f21bfbc2 100644 --- a/python_files/tests/unittestadapter/expected_discovery_test_output.py +++ b/python_files/tests/unittestadapter/expected_discovery_test_output.py @@ -9,6 +9,25 @@ TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" +def find_class_line_number(class_name: str, test_file_path) -> str: + """Function which finds the correct line number for a class definition. + + Args: + class_name: The name of the class to find the line number for. + test_file_path: The path to the test file where the class is located. + """ + # Look for the class definition line + with pathlib.Path(test_file_path).open() as f: + for i, line in enumerate(f): + # Match "class ClassName" or "class ClassName(" or "class ClassName:" + if line.strip().startswith(f"class {class_name}") or line.strip().startswith( + f"class {class_name}(" + ): + return str(i + 1) + error_str: str = f"Class {class_name!r} not found on any line in {test_file_path}" + raise ValueError(error_str) + + skip_unittest_folder_discovery_output = { "path": os.fspath(TEST_DATA_PATH / "unittest_skip"), "name": "unittest_skip", @@ -49,6 +68,10 @@ ], "id_": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py") + "\\SimpleTest", + "lineno": find_class_line_number( + "SimpleTest", + TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py", + ), } ], "id_": os.fspath(TEST_DATA_PATH / "unittest_skip" / "unittest_skip_function.py"), @@ -114,6 +137,16 @@ }, ], "id_": complex_tree_file_path + "\\" + "TreeOne", + "lineno": find_class_line_number( + "TreeOne", + pathlib.PurePath( + TEST_DATA_PATH, + "utils_complex_tree", + "test_outer_folder", + "test_inner_folder", + "test_utils_complex_tree.py", + ), + ), } ], "id_": complex_tree_file_path, diff --git a/python_files/tests/unittestadapter/test_utils.py b/python_files/tests/unittestadapter/test_utils.py index 390b0779a44d..dc8a81175e70 100644 --- a/python_files/tests/unittestadapter/test_utils.py +++ b/python_files/tests/unittestadapter/test_utils.py @@ -291,3 +291,49 @@ def test_build_empty_tree() -> None: assert tests is not None assert tests.get("children") == [] assert not errors + + +def test_doctest_standard_blocked() -> None: + """Standard doctests with short IDs should be skipped with an error message.""" + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "test_doctest_standard*" + + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern) + tests, errors = build_test_tree(suite, start_dir) + + # Should return a tree but with no test children (since doctests are skipped) + assert tests is not None + # Check that we got an error about doctests not being supported + assert len(errors) > 0 + assert "Skipping doctest as it is not supported for the extension" in errors[0] + + +def test_doctest_patched_works() -> None: + """Patched doctests with properly formatted IDs should be processed normally.""" + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "test_doctest_patched*" + + loader = unittest.TestLoader() + suite = loader.discover(start_dir, pattern) + tests, errors = build_test_tree(suite, start_dir) + + # Should successfully build a tree with the patched doctest + assert tests is not None + + # The patched doctests should have proper IDs and be included + # We should find at least one test child (the doctests that were patched) + def count_tests(node): + """Recursively count test nodes.""" + if node.get("type_") == "test": + return 1 + count = 0 + for child in node.get("children", []): + count += count_tests(child) + return count + + test_count = count_tests(tests) + # We expect at least the module doctest and function doctest + assert test_count > 0, "Patched doctests should be included in the tree" + # Should not have doctest-related errors since they're properly formatted + assert not any("doctest" in str(e).lower() for e in errors) diff --git a/python_files/unittestadapter/execution.py b/python_files/unittestadapter/execution.py index a0a48c61470a..951289850884 100644 --- a/python_files/unittestadapter/execution.py +++ b/python_files/unittestadapter/execution.py @@ -284,10 +284,10 @@ def send_run_data(raw_data, test_run_pipe): run_test_ids_pipe = os.environ.get("RUN_TEST_IDS_PIPE") test_run_pipe = os.getenv("TEST_RUN_PIPE") if not run_test_ids_pipe: - print("Error[vscode-unittest]: RUN_TEST_IDS_PIPE env var is not set.") + print("Error[vscode-unittest]: RUN_TEST_IDS_PIPE env var is not set.", file=sys.stderr) raise VSCodeUnittestError("Error[vscode-unittest]: RUN_TEST_IDS_PIPE env var is not set.") if not test_run_pipe: - print("Error[vscode-unittest]: TEST_RUN_PIPE env var is not set.") + print("Error[vscode-unittest]: TEST_RUN_PIPE env var is not set.", file=sys.stderr) raise VSCodeUnittestError("Error[vscode-unittest]: TEST_RUN_PIPE env var is not set.") test_ids = [] cwd = pathlib.Path(start_dir).absolute() @@ -295,11 +295,10 @@ def send_run_data(raw_data, test_run_pipe): # Read the test ids from the file, attempt to delete file afterwords. ids_path = pathlib.Path(run_test_ids_pipe) test_ids = ids_path.read_text(encoding="utf-8").splitlines() - print("Received test ids from temp file.") try: ids_path.unlink() except Exception as e: - print("Error[vscode-pytest]: unable to delete temp file" + str(e)) + print(f"Error[vscode-unittest]: unable to delete temp file: {e}", file=sys.stderr) except Exception as e: # No test ids received from buffer, return error payload @@ -318,10 +317,6 @@ def send_run_data(raw_data, test_run_pipe): is_coverage_run = os.environ.get("COVERAGE_ENABLED") is not None include_branches = False if is_coverage_run: - print( - "COVERAGE_ENABLED env var set, starting coverage. workspace_root used as parent dir:", - workspace_root, - ) import coverage # insert "python_files/lib/python" into the path so packaging can be imported @@ -350,7 +345,6 @@ def send_run_data(raw_data, test_run_pipe): # If no error occurred, we will have test ids to run. if manage_py_path := os.environ.get("MANAGE_PY_PATH"): - print("MANAGE_PY_PATH env var set, running Django test suite.") args = argv[index + 1 :] or [] django_execution_runner(manage_py_path, test_ids, args) else: diff --git a/python_files/unittestadapter/pvsc_utils.py b/python_files/unittestadapter/pvsc_utils.py index 09a5ec9f3be5..d6920592a4d4 100644 --- a/python_files/unittestadapter/pvsc_utils.py +++ b/python_files/unittestadapter/pvsc_utils.py @@ -44,6 +44,7 @@ class TestItem(TestData): class TestNode(TestData): children: "List[TestNode | TestItem]" + lineno: NotRequired[str] # Optional field for class nodes class TestExecutionStatus(str, enum.Enum): @@ -101,6 +102,16 @@ def get_test_case(suite): yield from get_test_case(test) +def get_class_line(test_case: unittest.TestCase) -> Optional[str]: + """Get the line number where a test class is defined.""" + try: + test_class = test_case.__class__ + _sourcelines, lineno = inspect.getsourcelines(test_class) + return str(lineno) + except Exception: + return None + + def get_source_line(obj) -> str: """Get the line number of a test case start line.""" try: @@ -203,12 +214,6 @@ def build_test_tree( root = build_test_node(top_level_directory, directory_path.name, TestNodeTypeEnum.folder) for test_case in get_test_case(suite): - if isinstance(test_case, doctest.DocTestCase): - print( - "Skipping doctest as it is not supported for the extension. Test case: ", test_case - ) - error = ["Skipping doctest as it is not supported for the extension."] - continue test_id = test_case.id() if test_id.startswith("unittest.loader._FailedTest"): error.append(str(test_case._exception)) # type: ignore # noqa: SLF001 @@ -221,6 +226,14 @@ def build_test_tree( else: # Get the static test path components: filename, class name and function name. components = test_id.split(".") + # Check if this is a doctest with insufficient components that would cause unpacking to fail + if len(components) < 3 and isinstance(test_case, doctest.DocTestCase): + print( + "Skipping doctest as it is not supported for the extension. Test case: ", + test_case, + ) + error = ["Skipping doctest as it is not supported for the extension."] + continue *folders, filename, class_name, function_name = components py_filename = f"{filename}.py" @@ -247,6 +260,12 @@ def build_test_tree( class_name, file_path, TestNodeTypeEnum.class_, current_node ) + # Add line number to class node if not already present. + if "lineno" not in current_node: + class_lineno = get_class_line(test_case) + if class_lineno is not None: + current_node["lineno"] = class_lineno + # Get test line number. test_method = getattr(test_case, test_case._testMethodName) # noqa: SLF001 lineno = get_source_line(test_method) diff --git a/python_files/vscode_pytest/__init__.py b/python_files/vscode_pytest/__init__.py index 72eaa7a787d5..89565dab1264 100644 --- a/python_files/vscode_pytest/__init__.py +++ b/python_files/vscode_pytest/__init__.py @@ -10,12 +10,22 @@ import pathlib import sys import traceback -from typing import TYPE_CHECKING, Any, Dict, Generator, Literal, TypedDict +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generator, + Literal, + Protocol, + TypedDict, + cast, +) import pytest if TYPE_CHECKING: from pluggy import Result + from typing_extensions import NotRequired USES_PYTEST_DESCRIBE = False @@ -25,6 +35,13 @@ USES_PYTEST_DESCRIBE = True +class HasPathOrFspath(Protocol): + """Protocol defining objects that have either a path or fspath attribute.""" + + path: pathlib.Path | None = None + fspath: Any | None = None + + class TestData(TypedDict): """A general class that all test objects inherit from.""" @@ -45,6 +62,7 @@ class TestNode(TestData): """A general class that handles all test data which contains children.""" children: list[TestNode | TestItem | None] + lineno: NotRequired[str] # Optional field for class/function nodes class VSCodePytestError(Exception): @@ -57,14 +75,21 @@ def __init__(self, message): ERRORS = [] IS_DISCOVERY = False map_id_to_path = {} -collected_tests_so_far = [] +collected_tests_so_far = set() TEST_RUN_PIPE = os.getenv("TEST_RUN_PIPE") SYMLINK_PATH = None INCLUDE_BRANCHES = False +# Performance optimization caches for path resolution +_path_cache: dict[int, pathlib.Path] = {} # Cache node paths by object id +_path_to_str_cache: dict[pathlib.Path, str] = {} # Cache path-to-string conversions +_CACHED_CWD: pathlib.Path | None = None + def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001 - has_pytest_cov = early_config.pluginmanager.hasplugin("pytest_cov") + has_pytest_cov = early_config.pluginmanager.hasplugin( + "pytest_cov" + ) or early_config.pluginmanager.hasplugin("pytest_cov.plugin") has_cov_arg = any("--cov" in arg for arg in args) if has_cov_arg and not has_pytest_cov: raise VSCodePytestError( @@ -152,7 +177,7 @@ def pytest_exception_interact(node, call, report): report_value = "failure" node_id = get_absolute_test_id(node.nodeid, get_node_path(node)) if node_id not in collected_tests_so_far: - collected_tests_so_far.append(node_id) + collected_tests_so_far.add(node_id) item_result = create_test_outcome( node_id, report_value, @@ -277,7 +302,7 @@ def pytest_report_teststatus(report, config): # noqa: ARG001 # Calculate the absolute test id and use this as the ID moving forward. absolute_node_id = get_absolute_test_id(report.nodeid, node_path) if absolute_node_id not in collected_tests_so_far: - collected_tests_so_far.append(absolute_node_id) + collected_tests_so_far.add(absolute_node_id) item_result = create_test_outcome( absolute_node_id, report_value, @@ -311,7 +336,7 @@ def pytest_runtest_protocol(item, nextitem): # noqa: ARG001 report_value = "skipped" cwd = pathlib.Path.cwd() if absolute_node_id not in collected_tests_so_far: - collected_tests_so_far.append(absolute_node_id) + collected_tests_so_far.add(absolute_node_id) item_result = create_test_outcome( absolute_node_id, report_value, @@ -522,11 +547,131 @@ def pytest_sessionfinish(session, exitstatus): send_message(payload) +def construct_nested_folders( + file_nodes_dict: dict[str, TestNode], + session_node: TestNode, + session_children_dict: dict[str, TestNode], +) -> dict[str, TestNode]: + """Iterate through all files and construct them into nested folders. + + Keyword arguments: + file_nodes_dict -- Dictionary of all file nodes + session_node -- The session node that will be parent to the folder structure + session_children_dict -- Dictionary of session's children nodes indexed by ID + + Returns: + dict[str, TestNode] -- Updated session_children_dict with folder nodes added + """ + created_files_folders_dict: dict[str, TestNode] = {} + for file_node in file_nodes_dict.values(): + # Iterate through all the files that exist and construct them into nested folders. + root_folder_node: TestNode + try: + root_folder_node: TestNode = build_nested_folders( + file_node, created_files_folders_dict, session_node + ) + except ValueError: + # This exception is raised when the session node is not a parent of the file node. + print( + "[vscode-pytest]: Session path not a parent of test paths, adjusting session node to common parent." + ) + file_path_str: str = str(file_node["path"]) + session_path_str: str = str(session_node["path"]) + common_parent = os.path.commonpath([file_path_str, session_path_str]) + common_parent_path = pathlib.Path(common_parent) + print("[vscode-pytest]: Session node now set to: ", common_parent) + session_node["path"] = common_parent_path # pathlib.Path + session_node["id_"] = common_parent # str + session_node["name"] = common_parent_path.name # str + root_folder_node = build_nested_folders( + file_node, created_files_folders_dict, session_node + ) + # The final folder we get to is the highest folder in the path + # and therefore we add this as a child to the session. + root_id = root_folder_node.get("id_") + if root_id and root_id not in session_children_dict: + session_children_dict[root_id] = root_folder_node + + return session_children_dict + + +def process_parameterized_test( + test_case: pytest.Item, + test_node: TestItem, + function_nodes_dict: dict[str, TestNode], + file_nodes_dict: dict[str, TestNode], +) -> TestNode: + """Process a parameterized test case and create appropriate function nodes. + + Keyword arguments: + test_case -- the parameterized pytest test case; must have callspec attribute + test_node -- the test node created from the test case + function_nodes_dict -- dictionary of function nodes indexed by ID + file_nodes_dict -- dictionary of file nodes indexed by path + + Returns: + TestNode -- the node to use for further processing (function node or original test node) + """ + function_name: str = "" + # parameterized test cases cut the repetitive part of the name off. + parent_part, parameterized_section = test_node["name"].split("[", 1) + test_node["name"] = "[" + parameterized_section + + first_split = test_case.nodeid.rsplit( + "::", 1 + ) # splits the parameterized test name from the rest of the nodeid + second_split = first_split[0].rsplit( + ".py", 1 + ) # splits the file path from the rest of the nodeid + + class_and_method = second_split[1] + "::" # This has "::" separator at both ends + # construct the parent id, so it is absolute path :: any class and method :: parent_part + parent_id = cached_fsdecode(get_node_path(test_case)) + class_and_method + parent_part + + try: + function_name = test_case.originalname # type: ignore + except AttributeError: # actual error has occurred + ERRORS.append( + f"unable to find original name for {test_case.name} with parameterization detected." + ) + raise VSCodePytestError( + "Unable to find original name for parameterized test case" + ) from None + + function_test_node = function_nodes_dict.get(parent_id) + if function_test_node is None: + function_test_node = create_parameterized_function_node( + function_name, get_node_path(test_case), parent_id + ) + function_nodes_dict[parent_id] = function_test_node + + if test_node not in function_test_node["children"]: + function_test_node["children"].append(test_node) + + # Check if the parent node of the function is file, if so create/add to this file node. + if isinstance(test_case.parent, pytest.File): + # calculate the parent path of the test case + parent_path = get_node_path(test_case.parent) + parent_path_key = cached_fsdecode(parent_path) + parent_test_case = file_nodes_dict.get(parent_path_key) + if parent_test_case is None: + parent_test_case = create_file_node(parent_path) + file_nodes_dict[parent_path_key] = parent_test_case + if function_test_node not in parent_test_case["children"]: + parent_test_case["children"].append(function_test_node) + + # Return the function node as the test node to handle subsequent nesting + return function_test_node + + def build_test_tree(session: pytest.Session) -> TestNode: """Builds a tree made up of testing nodes from the pytest session. Keyword arguments: - session -- the pytest session object. + session -- the pytest session object that contains test items. + + Returns: + TestNode -- The root node of the constructed test tree. """ session_node = create_session_node(session) session_children_dict: dict[str, TestNode] = {} @@ -542,54 +687,10 @@ def build_test_tree(session: pytest.Session) -> TestNode: for test_case in session.items: test_node = create_test_node(test_case) if hasattr(test_case, "callspec"): # This means it is a parameterized test. - function_name: str = "" - # parameterized test cases cut the repetitive part of the name off. - parent_part, parameterized_section = test_node["name"].split("[", 1) - test_node["name"] = "[" + parameterized_section - - first_split = test_case.nodeid.rsplit( - "::", 1 - ) # splits the parameterized test name from the rest of the nodeid - second_split = first_split[0].rsplit( - ".py", 1 - ) # splits the file path from the rest of the nodeid - - class_and_method = second_split[1] + "::" # This has "::" separator at both ends - # construct the parent id, so it is absolute path :: any class and method :: parent_part - parent_id = os.fspath(get_node_path(test_case)) + class_and_method + parent_part - # file, middle, param = test_case.nodeid.rsplit("::", 2) - # parent_id = test_case.nodeid.rsplit("::", 1)[0] + "::" + parent_part - # parent_path = os.fspath(get_node_path(test_case)) + "::" + parent_part - try: - function_name = test_case.originalname # type: ignore - function_test_node = function_nodes_dict[parent_id] - except AttributeError: # actual error has occurred - ERRORS.append( - f"unable to find original name for {test_case.name} with parameterization detected." - ) - raise VSCodePytestError( - "Unable to find original name for parameterized test case" - ) from None - except KeyError: - function_test_node: TestNode = create_parameterized_function_node( - function_name, get_node_path(test_case), parent_id - ) - function_nodes_dict[parent_id] = function_test_node - if test_node not in function_test_node["children"]: - function_test_node["children"].append(test_node) - # Check if the parent node of the function is file, if so create/add to this file node. - if isinstance(test_case.parent, pytest.File): - # calculate the parent path of the test case - parent_path = get_node_path(test_case.parent) - try: - parent_test_case = file_nodes_dict[os.fspath(parent_path)] - except KeyError: - parent_test_case = create_file_node(parent_path) - file_nodes_dict[os.fspath(parent_path)] = parent_test_case - if function_test_node not in parent_test_case["children"]: - parent_test_case["children"].append(function_test_node) - # If the parent is not a file, it is a class, add the function node as the test node to handle subsequent nesting. - test_node = function_test_node + # Process parameterized test and get the function node to use for further processing + test_node = process_parameterized_test( + test_case, test_node, function_nodes_dict, file_nodes_dict + ) if isinstance(test_case.parent, pytest.Class) or ( USES_PYTEST_DESCRIBE and isinstance(test_case.parent, DescribeBlock) ): @@ -600,9 +701,8 @@ def build_test_tree(session: pytest.Session) -> TestNode: USES_PYTEST_DESCRIBE and isinstance(case_iter, DescribeBlock) ): # While the given node is a class, create a class and nest the previous node as a child. - try: - test_class_node = class_nodes_dict[case_iter.nodeid] - except KeyError: + test_class_node = class_nodes_dict.get(case_iter.nodeid) + if test_class_node is None: test_class_node = create_class_node(case_iter) class_nodes_dict[case_iter.nodeid] = test_class_node # Check if the class already has the child node. This will occur if the test is parameterized. @@ -619,50 +719,35 @@ def build_test_tree(session: pytest.Session) -> TestNode: break parent_path = get_node_path(parent_module) # Create a file node that has the last class as a child. - try: - test_file_node: TestNode = file_nodes_dict[os.fspath(parent_path)] - except KeyError: + parent_path_key = cached_fsdecode(parent_path) + test_file_node = file_nodes_dict.get(parent_path_key) + if test_file_node is None: test_file_node = create_file_node(parent_path) - file_nodes_dict[os.fspath(parent_path)] = test_file_node + file_nodes_dict[parent_path_key] = test_file_node # Check if the class is already a child of the file node. if test_class_node is not None and test_class_node not in test_file_node["children"]: test_file_node["children"].append(test_class_node) elif not hasattr(test_case, "callspec"): # This includes test cases that are pytest functions or a doctests. - parent_path = get_node_path(test_case.parent) - try: - parent_test_case = file_nodes_dict[os.fspath(parent_path)] - except KeyError: + if test_case.parent is None: + ERRORS.append(f"Test case {test_case.name} has no parent") + continue + parent_path = get_node_path( + cast( + "pytest.Session | pytest.Item | pytest.File | pytest.Class | pytest.Module | HasPathOrFspath", + test_case.parent, + ) + ) + parent_path_key = cached_fsdecode(parent_path) + parent_test_case = file_nodes_dict.get(parent_path_key) + if parent_test_case is None: parent_test_case = create_file_node(parent_path) - file_nodes_dict[os.fspath(parent_path)] = parent_test_case + file_nodes_dict[parent_path_key] = parent_test_case parent_test_case["children"].append(test_node) - created_files_folders_dict: dict[str, TestNode] = {} - for file_node in file_nodes_dict.values(): - # Iterate through all the files that exist and construct them into nested folders. - root_folder_node: TestNode - try: - root_folder_node: TestNode = build_nested_folders( - file_node, created_files_folders_dict, session_node - ) - except ValueError: - # This exception is raised when the session node is not a parent of the file node. - print( - "[vscode-pytest]: Session path not a parent of test paths, adjusting session node to common parent." - ) - common_parent = os.path.commonpath([file_node["path"], get_node_path(session)]) - common_parent_path = pathlib.Path(common_parent) - print("[vscode-pytest]: Session node now set to: ", common_parent) - session_node["path"] = common_parent_path # pathlib.Path - session_node["id_"] = common_parent # str - session_node["name"] = common_parent_path.name # str - root_folder_node = build_nested_folders( - file_node, created_files_folders_dict, session_node - ) - # The final folder we get to is the highest folder in the path - # and therefore we add this as a child to the session. - root_id = root_folder_node.get("id_") - if root_id and root_id not in session_children_dict: - session_children_dict[root_id] = root_folder_node + # Process all files and construct them into nested folders + session_children_dict = construct_nested_folders( + file_nodes_dict, session_node, session_children_dict + ) session_node["children"] = list(session_children_dict.values()) return session_node @@ -698,11 +783,11 @@ def build_nested_folders( max_iter = 100 while iterator_path != session_node_path: curr_folder_name = iterator_path.name - try: - curr_folder_node: TestNode = created_files_folders_dict[os.fspath(iterator_path)] - except KeyError: - curr_folder_node: TestNode = create_folder_node(curr_folder_name, iterator_path) - created_files_folders_dict[os.fspath(iterator_path)] = curr_folder_node + iterator_path_key = cached_fsdecode(iterator_path) + curr_folder_node = created_files_folders_dict.get(iterator_path_key) + if curr_folder_node is None: + curr_folder_node = create_folder_node(curr_folder_name, iterator_path) + created_files_folders_dict[iterator_path_key] = curr_folder_node if prev_folder_node not in curr_folder_node["children"]: curr_folder_node["children"].append(prev_folder_node) iterator_path = iterator_path.parent @@ -763,12 +848,25 @@ def create_class_node(class_module: pytest.Class | DescribeBlock) -> TestNode: Keyword arguments: class_module -- the pytest object representing a class module. """ + # Get line number for the class definition + class_line = "" + try: + if hasattr(class_module, "obj"): + import inspect + + _, lineno = inspect.getsourcelines(class_module.obj) + class_line = str(lineno) + except (OSError, TypeError): + # If we can't get the source lines, leave lineno empty + pass + return { "name": class_module.name, "path": get_node_path(class_module), "type_": "class", "children": [], "id_": get_absolute_test_id(class_module.nodeid, get_node_path(class_module)), + "lineno": class_line, } @@ -851,12 +949,50 @@ class CoveragePayloadDict(Dict): error: str | None # Currently unused need to check -def get_node_path(node: Any) -> pathlib.Path: +def cached_fsdecode(path: pathlib.Path) -> str: + """Convert path to string with caching for performance. + + This function caches path-to-string conversions to avoid redundant + os.fsdecode() calls during test tree building. + + Parameters: + path: The pathlib.Path object to convert to string. + + Returns: + str: The string representation of the path. + """ + if path not in _path_to_str_cache: + _path_to_str_cache[path] = os.fspath(path) + return _path_to_str_cache[path] + + +def get_node_path( + node: pytest.Session + | pytest.Item + | pytest.File + | pytest.Class + | pytest.Module + | HasPathOrFspath, +) -> pathlib.Path: """A function that returns the path of a node given the switch to pathlib.Path. It also evaluates if the node is a symlink and returns the equivalent path. + + Parameters: + node: A pytest object or any object that has a path or fspath attribute. + Do NOT pass a pathlib.Path object directly; use it directly instead. + + Returns: + pathlib.Path: The resolved path for the node. """ - node_path = getattr(node, "path", None) or pathlib.Path(node.fspath) + cache_key = id(node) + if cache_key in _path_cache: + return _path_cache[cache_key] + + node_path = getattr(node, "path", None) + if node_path is None: + fspath = getattr(node, "fspath", None) + node_path = pathlib.Path(fspath) if fspath is not None else None if not node_path: raise VSCodePytestError( @@ -868,22 +1004,34 @@ def get_node_path(node: Any) -> pathlib.Path: # Get relative between the cwd (resolved path) and the node path. try: # Check to see if the node path contains the symlink root already - common_path = os.path.commonpath([SYMLINK_PATH, node_path]) + # Convert Path objects to strings for os.path.commonpath + symlink_str: str = str(SYMLINK_PATH) + node_path_str: str = str(node_path) + common_path = os.path.commonpath([symlink_str, node_path_str]) if common_path == os.fsdecode(SYMLINK_PATH): # The node path is already relative to the SYMLINK_PATH root therefore return - return node_path + result = node_path else: # If the node path is not a symlink, then we need to calculate the equivalent symlink path # get the relative path between the cwd and the node path (as the node path is not a symlink). - rel_path = node_path.relative_to(pathlib.Path.cwd()) + # Use cached cwd to avoid repeated system calls + global _CACHED_CWD + if _CACHED_CWD is None: + _CACHED_CWD = pathlib.Path.cwd() + rel_path = node_path.relative_to(_CACHED_CWD) # combine the difference between the cwd and the node path with the symlink path - return pathlib.Path(SYMLINK_PATH, rel_path) + result = pathlib.Path(SYMLINK_PATH, rel_path) except Exception as e: raise VSCodePytestError( f"Error occurred while calculating symlink equivalent from node path: {e}" - f"\n SYMLINK_PATH: {SYMLINK_PATH}, \n node path: {node_path}, \n cwd: {pathlib.Path.cwd()}" + f"\n SYMLINK_PATH: {SYMLINK_PATH}, \n node path: {node_path}, \n cwd: {_CACHED_CWD if _CACHED_CWD else pathlib.Path.cwd()}" ) from e - return node_path + else: + result = node_path + + # Cache before returning + _path_cache[cache_key] = result + return result __writer = None diff --git a/python_files/vscode_pytest/run_pytest_script.py b/python_files/vscode_pytest/run_pytest_script.py index c0f5114b375c..50ab12a35423 100644 --- a/python_files/vscode_pytest/run_pytest_script.py +++ b/python_files/vscode_pytest/run_pytest_script.py @@ -51,20 +51,23 @@ def run_pytest(args): run_test_ids_pipe = os.environ.get("RUN_TEST_IDS_PIPE") if run_test_ids_pipe: + ids_path = pathlib.Path(run_test_ids_pipe) try: - # Read the test ids from the file, delete file, and run pytest. - ids_path = pathlib.Path(run_test_ids_pipe) + # Read the test ids from the file and run pytest. ids = ids_path.read_text(encoding="utf-8").splitlines() + except Exception as e: + print("Error[vscode-pytest]: unable to read testIds from temp file" + str(e)) + run_pytest(args) + else: + arg_array = ["-p", "vscode_pytest", *args, *ids] + print("Running pytest with args: " + str(arg_array)) + pytest.main(arg_array) + finally: + # Delete the test ids temp file. try: ids_path.unlink() except Exception as e: print("Error[vscode-pytest]: unable to delete temp file" + str(e)) - arg_array = ["-p", "vscode_pytest", *args, *ids] - print("Running pytest with args: " + str(arg_array)) - pytest.main(arg_array) - except Exception as e: - print("Error[vscode-pytest]: unable to read testIds from temp file" + str(e)) - run_pytest(args) else: print("Error[vscode-pytest]: RUN_TEST_IDS_PIPE env var is not set.") run_pytest(args) diff --git a/requirements.in b/requirements.in index ba2339b1e966..8bbc9a0f3728 100644 --- a/requirements.in +++ b/requirements.in @@ -4,7 +4,7 @@ # 2) uv pip compile --generate-hashes --upgrade requirements.in -o requirements.txt # Unittest test adapter -typing-extensions==4.14.1 +typing-extensions==4.15.0 # Fallback env creator for debian microvenv diff --git a/requirements.txt b/requirements.txt index 2e6b6ee07783..ae747359d4e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ # This file was autogenerated by uv via the following command: # uv pip compile --generate-hashes requirements.in -o requirements.txt -importlib-metadata==8.7.0 \ - --hash=sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000 \ - --hash=sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd +importlib-metadata==8.7.1 \ + --hash=sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb \ + --hash=sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151 # via -r requirements.in microvenv==2025.0 \ --hash=sha256:568155ec18af01c89f270d35d123ab803b09672b480c3702d15fd69e9cc5bd1e \ @@ -12,43 +12,53 @@ packaging==25.0 \ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via -r requirements.in -tomli==2.2.1 \ - --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \ - --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \ - --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \ - --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \ - --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \ - --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \ - --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \ - --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \ - --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \ - --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \ - --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \ - --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \ - --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \ - --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \ - --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \ - --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \ - --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \ - --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \ - --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \ - --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \ - --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \ - --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \ - --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \ - --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \ - --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \ - --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \ - --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \ - --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \ - --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \ - --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \ - --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ - --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 +tomli==2.3.0 \ + --hash=sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456 \ + --hash=sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845 \ + --hash=sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999 \ + --hash=sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0 \ + --hash=sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878 \ + --hash=sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf \ + --hash=sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3 \ + --hash=sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be \ + --hash=sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52 \ + --hash=sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b \ + --hash=sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67 \ + --hash=sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549 \ + --hash=sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba \ + --hash=sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22 \ + --hash=sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c \ + --hash=sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f \ + --hash=sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6 \ + --hash=sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba \ + --hash=sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45 \ + --hash=sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f \ + --hash=sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77 \ + --hash=sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606 \ + --hash=sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441 \ + --hash=sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0 \ + --hash=sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f \ + --hash=sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530 \ + --hash=sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05 \ + --hash=sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8 \ + --hash=sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005 \ + --hash=sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879 \ + --hash=sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae \ + --hash=sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc \ + --hash=sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b \ + --hash=sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b \ + --hash=sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e \ + --hash=sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf \ + --hash=sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac \ + --hash=sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8 \ + --hash=sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b \ + --hash=sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf \ + --hash=sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463 \ + --hash=sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876 # via -r requirements.in -typing-extensions==4.14.1 \ - --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \ - --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76 +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 # via -r requirements.in zipp==3.21.0 \ --hash=sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4 \ diff --git a/src/client/chat/utils.ts b/src/client/chat/utils.ts index bddd26049668..84df2901341b 100644 --- a/src/client/chat/utils.ts +++ b/src/client/chat/utils.ts @@ -19,6 +19,7 @@ import { JUPYTER_EXTENSION_ID, NotebookCellScheme } from '../common/constants'; import { dirname, join } from 'path'; import { resolveEnvironment, useEnvExtension } from '../envExt/api.internal'; import { ErrorWithTelemetrySafeReason } from '../common/errors/errorUtils'; +import { getWorkspaceFolders } from '../common/vscodeApis/workspaceApis'; export interface IResourceReference { resourcePath?: string; @@ -26,14 +27,21 @@ export interface IResourceReference { export function resolveFilePath(filepath?: string): Uri | undefined { if (!filepath) { - return workspace.workspaceFolders ? workspace.workspaceFolders[0].uri : undefined; + const folders = getWorkspaceFolders() ?? []; + return folders.length > 0 ? folders[0].uri : undefined; } - // starts with a scheme - try { - return Uri.parse(filepath); - } catch (e) { - return Uri.file(filepath); + // Check if it's a URI with a scheme (contains "://") + // This handles schemes like "file://", "vscode-notebook://", etc. + // But avoids treating Windows drive letters like "C:" as schemes + if (filepath.includes('://')) { + try { + return Uri.parse(filepath); + } catch { + return Uri.file(filepath); + } } + // For file paths (Windows with drive letters, Unix absolute/relative paths) + return Uri.file(filepath); } /** diff --git a/src/client/common/application/commands.ts b/src/client/common/application/commands.ts index 402025ee38db..b43dc0a1e4a4 100644 --- a/src/client/common/application/commands.ts +++ b/src/client/common/application/commands.ts @@ -87,7 +87,7 @@ export interface ICommandNameArgumentTypeMapping extends ICommandNameWithoutArgu ['jupyter.opennotebook']: [undefined | Uri, undefined | CommandSource]; ['jupyter.runallcells']: [Uri]; ['extension.open']: [string]; - ['workbench.action.openIssueReporter']: [{ extensionId: string; issueBody: string }]; + ['workbench.action.openIssueReporter']: [{ extensionId: string; issueBody: string; extensionData?: string }]; [Commands.GetSelectedInterpreterPath]: [{ workspaceFolder: string } | string[]]; [Commands.TriggerEnvironmentSelection]: [undefined | Uri]; [Commands.Start_Native_REPL]: [undefined | Uri]; diff --git a/src/client/common/application/commands/reportIssueCommand.ts b/src/client/common/application/commands/reportIssueCommand.ts index e5633f4a4389..9ae099e44b4f 100644 --- a/src/client/common/application/commands/reportIssueCommand.ts +++ b/src/client/common/application/commands/reportIssueCommand.ts @@ -121,7 +121,7 @@ export class ReportIssueCommandHandler implements IExtensionSingleActivationServ await this.commandManager.executeCommand('workbench.action.openIssueReporter', { extensionId: 'ms-python.python', issueBody: template, - data: userTemplate.format( + extensionData: userTemplate.format( pythonVersion, virtualEnvKind, languageServer, diff --git a/src/client/common/application/terminalManager.ts b/src/client/common/application/terminalManager.ts index 9d0536e85243..dc2603e84a56 100644 --- a/src/client/common/application/terminalManager.ts +++ b/src/client/common/application/terminalManager.ts @@ -38,6 +38,9 @@ export class TerminalManager implements ITerminalManager { public onDidEndTerminalShellExecution(handler: (e: TerminalShellExecutionEndEvent) => void): Disposable { return window.onDidEndTerminalShellExecution(handler); } + public onDidChangeTerminalState(handler: (e: Terminal) => void): Disposable { + return window.onDidChangeTerminalState(handler); + } } /** diff --git a/src/client/common/application/types.ts b/src/client/common/application/types.ts index 65a8833a691c..34a95fb604f0 100644 --- a/src/client/common/application/types.ts +++ b/src/client/common/application/types.ts @@ -939,6 +939,8 @@ export interface ITerminalManager { onDidChangeTerminalShellIntegration(handler: (e: TerminalShellIntegrationChangeEvent) => void): Disposable; onDidEndTerminalShellExecution(handler: (e: TerminalShellExecutionEndEvent) => void): Disposable; + + onDidChangeTerminalState(handler: (e: Terminal) => void): Disposable; } export const IDebugService = Symbol('IDebugManager'); diff --git a/src/client/common/process/rawProcessApis.ts b/src/client/common/process/rawProcessApis.ts index 5e3641328b69..864191851c91 100644 --- a/src/client/common/process/rawProcessApis.ts +++ b/src/client/common/process/rawProcessApis.ts @@ -58,7 +58,8 @@ export function shellExec( const shellOptions = getDefaultOptions(options, defaultEnv); if (!options.doNotLog) { const processLogger = new ProcessLogger(new WorkspaceService()); - processLogger.logProcess(command, undefined, shellOptions); + const loggingOptions = { ...shellOptions, encoding: shellOptions.encoding ?? undefined }; + processLogger.logProcess(command, undefined, loggingOptions); } return new Promise((resolve, reject) => { // eslint-disable-next-line @typescript-eslint/no-explicit-any diff --git a/src/client/common/terminal/service.ts b/src/client/common/terminal/service.ts index e92fbd3d494f..0dffd5615ae1 100644 --- a/src/client/common/terminal/service.ts +++ b/src/client/common/terminal/service.ts @@ -9,7 +9,7 @@ import { IServiceContainer } from '../../ioc/types'; import { captureTelemetry } from '../../telemetry'; import { EventName } from '../../telemetry/constants'; import { ITerminalAutoActivation } from '../../terminals/types'; -import { ITerminalManager } from '../application/types'; +import { IApplicationShell, ITerminalManager } from '../application/types'; import { _SCRIPTS_DIR } from '../process/internal/scripts/constants'; import { IConfigurationService, IDisposableRegistry } from '../types'; import { @@ -20,12 +20,9 @@ import { TerminalShellType, } from './types'; import { traceVerbose } from '../../logging'; -import { getConfiguration } from '../vscodeApis/workspaceApis'; +import { sleep } from '../utils/async'; import { useEnvExtension } from '../../envExt/api.internal'; import { ensureTerminalLegacy } from '../../envExt/api.legacy'; -import { sleep } from '../utils/async'; -import { isWindows } from '../utils/platform'; -import { getPythonMinorVersion } from '../../repl/replUtils'; @injectable() export class TerminalService implements ITerminalService, Disposable { @@ -36,8 +33,13 @@ export class TerminalService implements ITerminalService, Disposable { private terminalHelper: ITerminalHelper; private terminalActivator: ITerminalActivator; private terminalAutoActivator: ITerminalAutoActivation; + private applicationShell: IApplicationShell; private readonly executeCommandListeners: Set = new Set(); private _terminalFirstLaunched: boolean = true; + private pythonReplCommandQueue: string[] = []; + private isReplReady: boolean = false; + private replPromptListener?: Disposable; + private replShellTypeListener?: Disposable; public get onDidCloseTerminal(): Event { return this.terminalClosed.event.bind(this.terminalClosed); } @@ -51,11 +53,13 @@ export class TerminalService implements ITerminalService, Disposable { this.terminalHelper = this.serviceContainer.get(ITerminalHelper); this.terminalManager = this.serviceContainer.get(ITerminalManager); this.terminalAutoActivator = this.serviceContainer.get(ITerminalAutoActivation); + this.applicationShell = this.serviceContainer.get(IApplicationShell); this.terminalManager.onDidCloseTerminal(this.terminalCloseHandler, this, disposableRegistry); this.terminalActivator = this.serviceContainer.get(ITerminalActivator); } public dispose() { this.terminal?.dispose(); + this.disposeReplListener(); if (this.executeCommandListeners && this.executeCommandListeners.size > 0) { this.executeCommandListeners.forEach((d) => { @@ -84,7 +88,86 @@ export class TerminalService implements ITerminalService, Disposable { commandLine: string, isPythonShell: boolean, ): Promise { - const terminal = this.terminal!; + if (isPythonShell) { + if (this.isReplReady) { + this.terminal?.sendText(commandLine); + traceVerbose(`Python REPL sendText: ${commandLine}`); + } else { + // Queue command to run once REPL is ready. + this.pythonReplCommandQueue.push(commandLine); + traceVerbose(`Python REPL queued command: ${commandLine}`); + this.startReplListener(); + } + return undefined; + } + + // Non-REPL code execution + return this.executeCommandInternal(commandLine); + } + + private startReplListener(): void { + if (this.replPromptListener || this.replShellTypeListener) { + return; + } + + this.replShellTypeListener = this.terminalManager.onDidChangeTerminalState((terminal) => { + if (this.terminal && terminal === this.terminal) { + if (terminal.state.shell == 'python') { + traceVerbose('Python REPL ready from terminal shell api'); + this.onReplReady(); + } + } + }); + + let terminalData = ''; + this.replPromptListener = this.applicationShell.onDidWriteTerminalData((e) => { + if (this.terminal && e.terminal === this.terminal) { + terminalData += e.data; + if (/>>>\s*$/.test(terminalData)) { + traceVerbose('Python REPL ready, from >>> prompt detection'); + this.onReplReady(); + } + } + }); + } + + private onReplReady(): void { + if (this.isReplReady) { + return; + } + this.isReplReady = true; + this.flushReplQueue(); + this.disposeReplListener(); + } + + private disposeReplListener(): void { + if (this.replPromptListener) { + this.replPromptListener.dispose(); + this.replPromptListener = undefined; + } + if (this.replShellTypeListener) { + this.replShellTypeListener.dispose(); + this.replShellTypeListener = undefined; + } + } + + private flushReplQueue(): void { + while (this.pythonReplCommandQueue.length > 0) { + const commandLine = this.pythonReplCommandQueue.shift(); + if (commandLine) { + traceVerbose(`Executing queued REPL command: ${commandLine}`); + this.terminal?.sendText(commandLine); + } + } + } + + private async executeCommandInternal(commandLine: string): Promise { + const terminal = this.terminal; + if (!terminal) { + traceVerbose('Terminal not available, cannot execute command'); + return undefined; + } + if (!this.options?.hideFromUser) { terminal.show(true); } @@ -108,21 +191,7 @@ export class TerminalService implements ITerminalService, Disposable { await promise; } - const config = getConfiguration('python'); - const pythonrcSetting = config.get('terminal.shellIntegration.enabled'); - - const minorVersion = this.options?.resource - ? await getPythonMinorVersion( - this.options.resource, - this.serviceContainer.get(IInterpreterService), - ) - : undefined; - - if ((isPythonShell && !pythonrcSetting) || (isPythonShell && isWindows()) || (minorVersion ?? 0) >= 13) { - // If user has explicitly disabled SI for Python, use sendText for inside Terminal REPL. - terminal.sendText(commandLine); - return undefined; - } else if (terminal.shellIntegration) { + if (terminal.shellIntegration) { const execution = terminal.shellIntegration.executeCommand(commandLine); traceVerbose(`Shell Integration is enabled, executeCommand: ${commandLine}`); return execution; @@ -151,6 +220,7 @@ export class TerminalService implements ITerminalService, Disposable { name: this.options?.title || 'Python', hideFromUser: this.options?.hideFromUser, }); + return; } else { this.terminalShellType = this.terminalHelper.identifyTerminalShell(this.terminal); this.terminal = this.terminalManager.createTerminal({ @@ -180,6 +250,9 @@ export class TerminalService implements ITerminalService, Disposable { if (terminal === this.terminal) { this.terminalClosed.fire(); this.terminal = undefined; + this.isReplReady = false; + this.disposeReplListener(); + this.pythonReplCommandQueue = []; } } diff --git a/src/client/common/utils/localize.ts b/src/client/common/utils/localize.ts index 067275ad732c..a084fc647025 100644 --- a/src/client/common/utils/localize.ts +++ b/src/client/common/utils/localize.ts @@ -516,3 +516,15 @@ export namespace CreateEnv { ); } } + +export namespace PythonLocator { + export const startupFailedNotification = l10n.t( + 'Python Locator failed to start. Python environment discovery may not work correctly.', + ); + export const windowsRuntimeMissing = l10n.t( + 'Missing Windows runtime dependencies detected. The Python Locator requires the Microsoft Visual C++ Redistributable. This is often missing on clean Windows installations.', + ); + export const windowsStartupFailed = l10n.t( + 'Python Locator failed to start on Windows. This might be due to missing system dependencies such as the Microsoft Visual C++ Redistributable.', + ); +} diff --git a/src/client/common/vscodeApis/windowApis.ts b/src/client/common/vscodeApis/windowApis.ts index fade0a028487..90a06e7ed75a 100644 --- a/src/client/common/vscodeApis/windowApis.ts +++ b/src/client/common/vscodeApis/windowApis.ts @@ -25,6 +25,7 @@ import { NotebookDocument, NotebookEditor, NotebookDocumentShowOptions, + Terminal, } from 'vscode'; import { createDeferred, Deferred } from '../utils/async'; import { Resource } from '../types'; @@ -124,6 +125,10 @@ export function onDidStartTerminalShellExecution(handler: (e: TerminalShellExecu return window.onDidStartTerminalShellExecution(handler); } +export function onDidChangeTerminalState(handler: (e: Terminal) => void): Disposable { + return window.onDidChangeTerminalState(handler); +} + export enum MultiStepAction { Back = 'Back', Cancel = 'Cancel', diff --git a/src/client/envExt/api.legacy.ts b/src/client/envExt/api.legacy.ts index d1a9f404d541..6f2e60774033 100644 --- a/src/client/envExt/api.legacy.ts +++ b/src/client/envExt/api.legacy.ts @@ -148,5 +148,24 @@ export async function ensureTerminalLegacy( const terminal = await api.createTerminal(pythonEnv, fixedOptions); return terminal; } + traceError('ensureTerminalLegacy - Did not return terminal successfully.'); + traceError( + 'ensureTerminalLegacy - pythonEnv:', + pythonEnv + ? `id=${pythonEnv.envId.id}, managerId=${pythonEnv.envId.managerId}, name=${pythonEnv.name}, version=${pythonEnv.version}, executable=${pythonEnv.execInfo.run.executable}` + : 'undefined', + ); + traceError( + 'ensureTerminalLegacy - project:', + project ? `name=${project.name}, uri=${project.uri.toString()}` : 'undefined', + ); + traceError( + 'ensureTerminalLegacy - options:', + options + ? `name=${options.name}, cwd=${options.cwd?.toString()}, hideFromUser=${options.hideFromUser}` + : 'undefined', + ); + traceError('ensureTerminalLegacy - resource:', resource?.toString() || 'undefined'); + throw new Error('Invalid arguments to create terminal'); } diff --git a/src/client/extension.ts b/src/client/extension.ts index af26a5657330..c3fb2a3ab3b0 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -45,6 +45,8 @@ import { buildProposedApi } from './proposedApi'; import { GLOBAL_PERSISTENT_KEYS } from './common/persistentState'; import { registerTools } from './chat'; import { IRecommendedEnvironmentService } from './interpreter/configuration/types'; +import { registerTypes as unitTestsRegisterTypes } from './testing/serviceRegistry'; +import { registerTestCommands } from './testing/main'; durations.codeLoadingTime = stopWatch.elapsedTime; @@ -121,6 +123,11 @@ async function activateUnsafe( // Note standard utils especially experiment and platform code are fundamental to the extension // and should be available before we activate anything else.Hence register them first. initializeStandard(ext); + + // Register test services and commands early to prevent race conditions. + unitTestsRegisterTypes(ext.legacyIOC.serviceManager); + registerTestCommands(activatedServiceContainer); + // We need to activate experiments before initializing components as objects are created or not created based on experiments. const experimentService = activatedServiceContainer.get(IExperimentService); // This guarantees that all experiment information has loaded & all telemetry will contain experiment info. diff --git a/src/client/extensionActivation.ts b/src/client/extensionActivation.ts index 8330d5010f7a..6e870e37ef3e 100644 --- a/src/client/extensionActivation.ts +++ b/src/client/extensionActivation.ts @@ -33,7 +33,6 @@ import { setExtensionInstallTelemetryProperties } from './telemetry/extensionIns import { registerTypes as tensorBoardRegisterTypes } from './tensorBoard/serviceRegistry'; import { registerTypes as commonRegisterTerminalTypes } from './terminals/serviceRegistry'; import { ICodeExecutionHelper, ICodeExecutionManager, ITerminalAutoActivation } from './terminals/types'; -import { registerTypes as unitTestsRegisterTypes } from './testing/serviceRegistry'; // components import * as pythonEnvironments from './pythonEnvironments'; @@ -144,7 +143,6 @@ async function activateLegacy(ext: ExtensionState, startupStopWatch: StopWatch): const { enableProposedApi } = applicationEnv.packageJson; serviceManager.addSingletonInstance(UseProposedApi, enableProposedApi); // Feature specific registrations. - unitTestsRegisterTypes(serviceManager); installerRegisterTypes(serviceManager); commonRegisterTerminalTypes(serviceManager); debugConfigurationRegisterTypes(serviceManager); diff --git a/src/client/interpreter/configuration/interpreterSelector/commands/setInterpreter.ts b/src/client/interpreter/configuration/interpreterSelector/commands/setInterpreter.ts index 54440485da02..a629d1bc793c 100644 --- a/src/client/interpreter/configuration/interpreterSelector/commands/setInterpreter.ts +++ b/src/client/interpreter/configuration/interpreterSelector/commands/setInterpreter.ts @@ -554,6 +554,7 @@ export class SetInterpreterCommand extends BaseInterpreterSelectorCommand implem openLabel: InterpreterQuickPickList.browsePath.openButtonLabel, canSelectMany: false, title: InterpreterQuickPickList.browsePath.title, + defaultUri: state.workspace, }); if (uris && uris.length > 0) { state.path = uris[0].fsPath; diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts index cb5ab63077c9..ea0d63cd7552 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonFinder.ts @@ -15,18 +15,24 @@ import { noop } from '../../../../common/utils/misc'; import { getConfiguration, getWorkspaceFolderPaths, isTrusted } from '../../../../common/vscodeApis/workspaceApis'; import { CONDAPATH_SETTING_KEY } from '../../../common/environmentManagers/conda'; import { VENVFOLDERS_SETTING_KEY, VENVPATH_SETTING_KEY } from '../lowLevel/customVirtualEnvLocator'; -import { createLogOutputChannel } from '../../../../common/vscodeApis/windowApis'; +import { createLogOutputChannel, showWarningMessage } from '../../../../common/vscodeApis/windowApis'; import { sendNativeTelemetry, NativePythonTelemetry } from './nativePythonTelemetry'; import { NativePythonEnvironmentKind } from './nativePythonUtils'; import type { IExtensionContext } from '../../../../common/types'; import { StopWatch } from '../../../../common/utils/stopWatch'; import { untildify } from '../../../../common/helpers'; import { traceError } from '../../../../logging'; +import { Common, PythonLocator } from '../../../../common/utils/localize'; +import { Commands } from '../../../../common/constants'; +import { executeCommand } from '../../../../common/vscodeApis/commandApis'; +import { getGlobalStorage, IPersistentStorage } from '../../../../common/persistentState'; const PYTHON_ENV_TOOLS_PATH = isWindows() ? path.join(EXTENSION_ROOT_DIR, 'python-env-tools', 'bin', 'pet.exe') : path.join(EXTENSION_ROOT_DIR, 'python-env-tools', 'bin', 'pet'); +const DONT_SHOW_SPAWN_ERROR_AGAIN = 'DONT_SHOW_NATIVE_FINDER_SPAWN_ERROR_AGAIN'; + export interface NativeEnvInfo { displayName?: string; name?: string; @@ -106,8 +112,13 @@ class NativePythonFinderImpl extends DisposableBase implements NativePythonFinde timeToRefresh: 0, }; - constructor(private readonly cacheDirectory?: Uri) { + private readonly suppressErrorNotification: IPersistentStorage; + + constructor(private readonly cacheDirectory?: Uri, private readonly context?: IExtensionContext) { super(); + this.suppressErrorNotification = this.context + ? getGlobalStorage(this.context, DONT_SHOW_SPAWN_ERROR_AGAIN, false) + : ({ get: () => false, set: async () => {} } as IPersistentStorage); this.connection = this.start(); void this.configure(); this.firstRefreshResults = this.refreshFirstTime(); @@ -212,6 +223,30 @@ class NativePythonFinderImpl extends DisposableBase implements NativePythonFinde proc.stderr.on('data', (data) => this.outputChannel.error(data.toString())); writable.pipe(proc.stdin, { end: false }); + // Handle spawn errors (e.g., missing DLLs on Windows) + proc.on('error', (error) => { + this.outputChannel.error(`Python Locator process error: ${error.message}`); + this.outputChannel.error(`Error details: ${JSON.stringify(error)}`); + this.handleSpawnError(error.message); + }); + + // Handle immediate exits with error codes + let hasStarted = false; + setTimeout(() => { + hasStarted = true; + }, 1000); + + proc.on('exit', (code, signal) => { + if (!hasStarted && code !== null && code !== 0) { + const errorMessage = `Python Locator process exited immediately with code ${code}`; + this.outputChannel.error(errorMessage); + if (signal) { + this.outputChannel.error(`Exit signal: ${signal}`); + } + this.handleSpawnError(errorMessage); + } + }); + disposables.push({ dispose: () => { try { @@ -397,6 +432,33 @@ class NativePythonFinderImpl extends DisposableBase implements NativePythonFinde async getCondaInfo(): Promise { return this.connection.sendRequest('condaInfo'); } + + private async handleSpawnError(errorMessage: string): Promise { + // Check if user has chosen to not see this error again + if (this.suppressErrorNotification.get()) { + return; + } + + // Check for Windows runtime DLL issues + if (isWindows() && errorMessage.toLowerCase().includes('vcruntime')) { + this.outputChannel.error(PythonLocator.windowsRuntimeMissing); + } else if (isWindows()) { + this.outputChannel.error(PythonLocator.windowsStartupFailed); + } + + // Show notification to user + const selection = await showWarningMessage( + PythonLocator.startupFailedNotification, + Common.openOutputPanel, + Common.doNotShowAgain, + ); + + if (selection === Common.openOutputPanel) { + await executeCommand(Commands.ViewOutput); + } else if (selection === Common.doNotShowAgain) { + await this.suppressErrorNotification.set(true); + } + } } type ConfigurationOptions = { @@ -461,7 +523,7 @@ export function getNativePythonFinder(context?: IExtensionContext): NativePython } if (!_finder) { const cacheDirectory = context ? getCacheDirectory(context) : undefined; - _finder = new NativePythonFinderImpl(cacheDirectory); + _finder = new NativePythonFinderImpl(cacheDirectory, context); if (context) { context.subscriptions.push(_finder); } diff --git a/src/client/pythonEnvironments/base/locators/common/nativePythonUtils.ts b/src/client/pythonEnvironments/base/locators/common/nativePythonUtils.ts index 86135924537f..716bdd444633 100644 --- a/src/client/pythonEnvironments/base/locators/common/nativePythonUtils.ts +++ b/src/client/pythonEnvironments/base/locators/common/nativePythonUtils.ts @@ -23,6 +23,7 @@ export enum NativePythonEnvironmentKind { VirtualEnvWrapper = 'VirtualEnvWrapper', WindowsStore = 'WindowsStore', WindowsRegistry = 'WindowsRegistry', + VenvUv = 'Uv', } const mapping = new Map([ @@ -36,6 +37,7 @@ const mapping = new Map([ [NativePythonEnvironmentKind.VirtualEnv, PythonEnvKind.VirtualEnv], [NativePythonEnvironmentKind.VirtualEnvWrapper, PythonEnvKind.VirtualEnvWrapper], [NativePythonEnvironmentKind.Venv, PythonEnvKind.Venv], + [NativePythonEnvironmentKind.VenvUv, PythonEnvKind.Venv], [NativePythonEnvironmentKind.WindowsRegistry, PythonEnvKind.System], [NativePythonEnvironmentKind.WindowsStore, PythonEnvKind.MicrosoftStore], [NativePythonEnvironmentKind.Homebrew, PythonEnvKind.System], diff --git a/src/client/startupTelemetry.ts b/src/client/startupTelemetry.ts index 5a2c12e2dd37..f7a2a6aea517 100644 --- a/src/client/startupTelemetry.ts +++ b/src/client/startupTelemetry.ts @@ -136,6 +136,7 @@ async function getActivationTelemetryProps( const usingGlobalInterpreter = interpreter ? isUsingGlobalInterpreterInWorkspace(interpreter.path, serviceContainer) : false; + const usingEnvironmentsExtension = useEnvExtension(); return { condaVersion, @@ -148,5 +149,6 @@ async function getActivationTelemetryProps( usingGlobalInterpreter, appName, isFirstSession, + usingEnvironmentsExtension, }; } diff --git a/src/client/telemetry/index.ts b/src/client/telemetry/index.ts index 581fcfed1f63..738c5f8a2776 100644 --- a/src/client/telemetry/index.ts +++ b/src/client/telemetry/index.ts @@ -404,6 +404,10 @@ export interface IEventNamePropertyMapping { * to approximately guess if it's the first session. */ isFirstSession?: boolean; + /** + * If user has enabled the Python Environments extension integration + */ + usingEnvironmentsExtension?: boolean; }; /** * Telemetry event sent when substituting Environment variables to calculate value of variables diff --git a/src/client/telemetry/pylance.ts b/src/client/telemetry/pylance.ts index 3d1ba05779dd..63bd113893e2 100644 --- a/src/client/telemetry/pylance.ts +++ b/src/client/telemetry/pylance.ts @@ -467,6 +467,11 @@ "failed" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" } } */ +/* __GDPR__ + "language_server/copilot_hover" : { + "symbolName" : { "classification": "SystemMetaData", "purpose": "PerformanceAndHealth" } + } +*/ /** * Telemetry event sent when LSP server crashes */ diff --git a/src/client/terminals/codeExecution/codeExecutionManager.ts b/src/client/terminals/codeExecution/codeExecutionManager.ts index 30e5b7facd2d..48165adcd169 100644 --- a/src/client/terminals/codeExecution/codeExecutionManager.ts +++ b/src/client/terminals/codeExecution/codeExecutionManager.ts @@ -131,6 +131,11 @@ export class CodeExecutionManager implements ICodeExecutionManager { return; } + const fileAfterSave = await codeExecutionHelper.saveFileIfDirty(fileToExecute); + if (fileAfterSave) { + fileToExecute = fileAfterSave; + } + // Check on setting terminal.executeInFileDir const pythonSettings = this.configSettings.getSettings(file); let cwd = pythonSettings.terminal.executeInFileDir ? path.dirname(fileToExecute.fsPath) : undefined; @@ -139,11 +144,6 @@ export class CodeExecutionManager implements ICodeExecutionManager { const launchArgs = pythonSettings.terminal.launchArgs; const totalArgs = [...launchArgs, fileToExecute.fsPath.fileToCommandArgumentForPythonExt()]; - const fileAfterSave = await codeExecutionHelper.saveFileIfDirty(fileToExecute); - if (fileAfterSave) { - fileToExecute = fileAfterSave; - } - const show = this.shouldTerminalFocusOnStart(fileToExecute); let terminal: Terminal | undefined; if (dedicated) { diff --git a/src/client/testing/main.ts b/src/client/testing/main.ts index e794d5711f2a..eed4d70e852c 100644 --- a/src/client/testing/main.ts +++ b/src/client/testing/main.ts @@ -18,7 +18,7 @@ import { IDisposableRegistry, Product } from '../common/types'; import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { EventName } from '../telemetry/constants'; -import { captureTelemetry, sendTelemetryEvent } from '../telemetry/index'; +import { sendTelemetryEvent } from '../telemetry/index'; import { selectTestWorkspace } from './common/testUtils'; import { TestSettingsPropertyNames } from './configuration/types'; import { ITestConfigurationService, ITestsHelper } from './common/types'; @@ -42,6 +42,91 @@ export class TestingService implements ITestingService { } } +/** + * Registers command handlers but defers service resolution until the commands are actually invoked, + * allowing registration to happen before all services are fully initialized. + */ +export function registerTestCommands(serviceContainer: IServiceContainer): void { + // Resolve only the essential services needed for command registration itself + const disposableRegistry = serviceContainer.get(IDisposableRegistry); + const commandManager = serviceContainer.get(ICommandManager); + + // Helper function to configure tests - services are resolved when invoked, not at registration time + const configureTestsHandler = async (resource?: Uri) => { + sendTelemetryEvent(EventName.UNITTEST_CONFIGURE); + + // Resolve services lazily when the command is invoked + const workspaceService = serviceContainer.get(IWorkspaceService); + + let wkspace: Uri | undefined; + if (resource) { + const wkspaceFolder = workspaceService.getWorkspaceFolder(resource); + wkspace = wkspaceFolder ? wkspaceFolder.uri : undefined; + } else { + const appShell = serviceContainer.get(IApplicationShell); + wkspace = await selectTestWorkspace(appShell); + } + if (!wkspace) { + return; + } + const interpreterService = serviceContainer.get(IInterpreterService); + const cmdManager = serviceContainer.get(ICommandManager); + if (!(await interpreterService.getActiveInterpreter(wkspace))) { + cmdManager.executeCommand(constants.Commands.TriggerEnvironmentSelection, wkspace); + return; + } + const configurationService = serviceContainer.get(ITestConfigurationService); + await configurationService.promptToEnableAndConfigureTestFramework(wkspace); + }; + + disposableRegistry.push( + // Command: python.configureTests - prompts user to configure test framework + commandManager.registerCommand( + constants.Commands.Tests_Configure, + (_, _cmdSource: constants.CommandSource = constants.CommandSource.commandPalette, resource?: Uri) => { + // Invoke configuration handler (errors are ignored as this can be called from multiple places) + configureTestsHandler(resource).ignoreErrors(); + traceVerbose('Testing: Trigger refresh after config change'); + // Refresh test data if test controller is available (resolved lazily) + if (tests && !!tests.createTestController) { + const testController = serviceContainer.get(ITestController); + testController?.refreshTestData(resource, { forceRefresh: true }); + } + }, + ), + // Command: python.tests.copilotSetup - Copilot integration for test setup + commandManager.registerCommand(constants.Commands.Tests_CopilotSetup, (resource?: Uri): + | { message: string; command: Command } + | undefined => { + // Resolve services lazily when the command is invoked + const workspaceService = serviceContainer.get(IWorkspaceService); + const wkspaceFolder = + workspaceService.getWorkspaceFolder(resource) || workspaceService.workspaceFolders?.at(0); + if (!wkspaceFolder) { + return undefined; + } + + const configurationService = serviceContainer.get(ITestConfigurationService); + if (configurationService.hasConfiguredTests(wkspaceFolder.uri)) { + return undefined; + } + + return { + message: Testing.copilotSetupMessage, + command: { + title: Testing.configureTests, + command: constants.Commands.Tests_Configure, + arguments: [undefined, constants.CommandSource.ui, resource], + }, + }; + }), + // Command: python.copyTestId - copies test ID to clipboard + commandManager.registerCommand(constants.Commands.CopyTestId, async (testItem: TestItem) => { + writeTestIdToClipboard(testItem); + }), + ); +} + @injectable() export class UnitTestManagementService implements IExtensionActivationService { private activatedOnce: boolean = false; @@ -80,7 +165,6 @@ export class UnitTestManagementService implements IExtensionActivationService { this.activatedOnce = true; this.registerHandlers(); - this.registerCommands(); if (!!tests.testResults) { await this.updateTestUIButtons(); @@ -130,73 +214,6 @@ export class UnitTestManagementService implements IExtensionActivationService { await Promise.all(changedWorkspaces.map((u) => this.testController?.refreshTestData(u))); } - @captureTelemetry(EventName.UNITTEST_CONFIGURE, undefined, false) - private async configureTests(resource?: Uri) { - let wkspace: Uri | undefined; - if (resource) { - const wkspaceFolder = this.workspaceService.getWorkspaceFolder(resource); - wkspace = wkspaceFolder ? wkspaceFolder.uri : undefined; - } else { - const appShell = this.serviceContainer.get(IApplicationShell); - wkspace = await selectTestWorkspace(appShell); - } - if (!wkspace) { - return; - } - const interpreterService = this.serviceContainer.get(IInterpreterService); - const commandManager = this.serviceContainer.get(ICommandManager); - if (!(await interpreterService.getActiveInterpreter(wkspace))) { - commandManager.executeCommand(constants.Commands.TriggerEnvironmentSelection, wkspace); - return; - } - const configurationService = this.serviceContainer.get(ITestConfigurationService); - await configurationService.promptToEnableAndConfigureTestFramework(wkspace!); - } - - private registerCommands(): void { - const commandManager = this.serviceContainer.get(ICommandManager); - this.disposableRegistry.push( - commandManager.registerCommand( - constants.Commands.Tests_Configure, - (_, _cmdSource: constants.CommandSource = constants.CommandSource.commandPalette, resource?: Uri) => { - // Ignore the exceptions returned. - // This command will be invoked from other places of the extension. - this.configureTests(resource).ignoreErrors(); - traceVerbose('Testing: Trigger refresh after config change'); - this.testController?.refreshTestData(resource, { forceRefresh: true }); - }, - ), - commandManager.registerCommand(constants.Commands.Tests_CopilotSetup, (resource?: Uri): - | { message: string; command: Command } - | undefined => { - const wkspaceFolder = - this.workspaceService.getWorkspaceFolder(resource) || this.workspaceService.workspaceFolders?.at(0); - if (!wkspaceFolder) { - return undefined; - } - - const configurationService = this.serviceContainer.get( - ITestConfigurationService, - ); - if (configurationService.hasConfiguredTests(wkspaceFolder.uri)) { - return undefined; - } - - return { - message: Testing.copilotSetupMessage, - command: { - title: Testing.configureTests, - command: constants.Commands.Tests_Configure, - arguments: [undefined, constants.CommandSource.ui, resource], - }, - }; - }), - commandManager.registerCommand(constants.Commands.CopyTestId, async (testItem: TestItem) => { - writeTestIdToClipboard(testItem); - }), - ); - } - private registerHandlers() { const interpreterService = this.serviceContainer.get(IInterpreterService); this.disposableRegistry.push( diff --git a/src/client/testing/testController/common/discoveryHelpers.ts b/src/client/testing/testController/common/discoveryHelpers.ts new file mode 100644 index 000000000000..e170ad576ae8 --- /dev/null +++ b/src/client/testing/testController/common/discoveryHelpers.ts @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +import { CancellationToken, CancellationTokenSource, Disposable, Uri } from 'vscode'; +import { Deferred } from '../../../common/utils/async'; +import { traceError, traceInfo, traceVerbose } from '../../../logging'; +import { createDiscoveryErrorPayload, fixLogLinesNoTrailing, startDiscoveryNamedPipe } from './utils'; +import { DiscoveredTestPayload, ITestResultResolver } from './types'; + +/** + * Test provider type for logging purposes. + */ +export type TestProvider = 'pytest' | 'unittest'; + +/** + * Sets up the discovery named pipe and wires up cancellation. + * @param resultResolver The resolver to handle discovered test data + * @param token Optional cancellation token from the caller + * @param uri Workspace URI for logging + * @returns Object containing the pipe name, cancellation source, and disposable for the external token handler + */ +export async function setupDiscoveryPipe( + resultResolver: ITestResultResolver | undefined, + token: CancellationToken | undefined, + uri: Uri, +): Promise<{ pipeName: string; cancellation: CancellationTokenSource; tokenDisposable: Disposable | undefined }> { + const discoveryPipeCancellation = new CancellationTokenSource(); + + // Wire up cancellation from external token and store the disposable + const tokenDisposable = token?.onCancellationRequested(() => { + traceInfo(`Test discovery cancelled.`); + discoveryPipeCancellation.cancel(); + }); + + // Start the named pipe with the discovery listener + const discoveryPipeName = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { + if (!token?.isCancellationRequested) { + resultResolver?.resolveDiscovery(data); + } + }, discoveryPipeCancellation.token); + + traceVerbose(`Created discovery pipe: ${discoveryPipeName} for workspace ${uri.fsPath}`); + + return { + pipeName: discoveryPipeName, + cancellation: discoveryPipeCancellation, + tokenDisposable, + }; +} + +/** + * Creates standard process event handlers for test discovery subprocess. + * Handles stdout/stderr logging and error reporting on process exit. + * + * @param testProvider - The test framework being used ('pytest' or 'unittest') + * @param uri - The workspace URI + * @param cwd - The current working directory + * @param resultResolver - Resolver for test discovery results + * @param deferredTillExecClose - Deferred to resolve when process closes + * @param allowedSuccessCodes - Additional exit codes to treat as success (e.g., pytest exit code 5 for no tests found) + */ +export function createProcessHandlers( + testProvider: TestProvider, + uri: Uri, + cwd: string, + resultResolver: ITestResultResolver | undefined, + deferredTillExecClose: Deferred, + allowedSuccessCodes: number[] = [], +): { + onStdout: (data: any) => void; + onStderr: (data: any) => void; + onExit: (code: number | null, signal: NodeJS.Signals | null) => void; + onClose: (code: number | null, signal: NodeJS.Signals | null) => void; +} { + const isSuccessCode = (code: number | null): boolean => { + return code === 0 || (code !== null && allowedSuccessCodes.includes(code)); + }; + + return { + onStdout: (data: any) => { + const out = fixLogLinesNoTrailing(data.toString()); + traceInfo(out); + }, + onStderr: (data: any) => { + const out = fixLogLinesNoTrailing(data.toString()); + traceError(out); + }, + onExit: (code: number | null, _signal: NodeJS.Signals | null) => { + // The 'exit' event fires when the process terminates, but streams may still be open. + // Only log verbose success message here; error handling happens in onClose. + if (isSuccessCode(code)) { + traceVerbose(`${testProvider} discovery subprocess exited successfully for workspace ${uri.fsPath}`); + } + }, + onClose: (code: number | null, signal: NodeJS.Signals | null) => { + // We resolve the deferred here to ensure all output has been captured. + if (!isSuccessCode(code)) { + traceError( + `${testProvider} discovery failed with exit code ${code} and signal ${signal} for workspace ${uri.fsPath}. Creating error payload.`, + ); + resultResolver?.resolveDiscovery(createDiscoveryErrorPayload(code, signal, cwd)); + } else { + traceVerbose(`${testProvider} discovery subprocess streams closed for workspace ${uri.fsPath}`); + } + deferredTillExecClose?.resolve(); + }, + }; +} + +/** + * Handles cleanup when test discovery is cancelled. + * Kills the subprocess (if running), resolves the completion deferred, and cancels the discovery pipe. + * + * @param testProvider - The test framework being used ('pytest' or 'unittest') + * @param proc - The process to kill + * @param processCompletion - Deferred to resolve + * @param pipeCancellation - Cancellation token source to cancel + * @param uri - The workspace URI + */ +export function cleanupOnCancellation( + testProvider: TestProvider, + proc: { kill: () => void } | undefined, + processCompletion: Deferred, + pipeCancellation: CancellationTokenSource, + uri: Uri, +): void { + traceInfo(`Test discovery cancelled, killing ${testProvider} subprocess for workspace ${uri.fsPath}`); + if (proc) { + traceVerbose(`Killing ${testProvider} subprocess for workspace ${uri.fsPath}`); + proc.kill(); + } else { + traceVerbose(`No ${testProvider} subprocess to kill for workspace ${uri.fsPath} (proc is undefined)`); + } + traceVerbose(`Resolving process completion deferred for ${testProvider} discovery in workspace ${uri.fsPath}`); + processCompletion.resolve(); + traceVerbose(`Cancelling discovery pipe for ${testProvider} discovery in workspace ${uri.fsPath}`); + pipeCancellation.cancel(); +} diff --git a/src/client/testing/testController/common/resultResolver.ts b/src/client/testing/testController/common/resultResolver.ts index 82856627e0c9..959d08fee1a9 100644 --- a/src/client/testing/testController/common/resultResolver.ts +++ b/src/client/testing/testController/common/resultResolver.ts @@ -1,339 +1,106 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import { - CancellationToken, - TestController, - TestItem, - Uri, - TestMessage, - Location, - TestRun, - MarkdownString, - TestCoverageCount, - FileCoverage, - FileCoverageDetail, - StatementCoverage, - Range, -} from 'vscode'; -import * as util from 'util'; -import { - CoveragePayload, - DiscoveredTestPayload, - ExecutionTestPayload, - FileCoverageMetrics, - ITestResultResolver, -} from './types'; +import { CancellationToken, TestController, TestItem, Uri, TestRun, FileCoverageDetail } from 'vscode'; +import { CoveragePayload, DiscoveredTestPayload, ExecutionTestPayload, ITestResultResolver } from './types'; import { TestProvider } from '../../types'; -import { traceError, traceVerbose } from '../../../logging'; -import { Testing } from '../../../common/utils/localize'; -import { clearAllChildren, createErrorTestItem, getTestCaseNodes } from './testItemUtilities'; +import { traceInfo } from '../../../logging'; import { sendTelemetryEvent } from '../../../telemetry'; import { EventName } from '../../../telemetry/constants'; -import { splitLines } from '../../../common/stringUtils'; -import { buildErrorNodeOptions, populateTestTree, splitTestNameWithRegex } from './utils'; +import { TestItemIndex } from './testItemIndex'; +import { TestDiscoveryHandler } from './testDiscoveryHandler'; +import { TestExecutionHandler } from './testExecutionHandler'; +import { TestCoverageHandler } from './testCoverageHandler'; export class PythonResultResolver implements ITestResultResolver { testController: TestController; testProvider: TestProvider; - public runIdToTestItem: Map; + private testItemIndex: TestItemIndex; - public runIdToVSid: Map; - - public vsIdToRunId: Map; - - public subTestStats: Map = new Map(); + // Shared singleton handlers + private static discoveryHandler: TestDiscoveryHandler = new TestDiscoveryHandler(); + private static executionHandler: TestExecutionHandler = new TestExecutionHandler(); + private static coverageHandler: TestCoverageHandler = new TestCoverageHandler(); public detailedCoverageMap = new Map(); constructor(testController: TestController, testProvider: TestProvider, private workspaceUri: Uri) { this.testController = testController; this.testProvider = testProvider; - - this.runIdToTestItem = new Map(); - this.runIdToVSid = new Map(); - this.vsIdToRunId = new Map(); + // Initialize a new TestItemIndex which will be used to track test items in this workspace + this.testItemIndex = new TestItemIndex(); } - public resolveDiscovery(payload: DiscoveredTestPayload, token?: CancellationToken): void { - if (!payload) { - // No test data is available - } else { - this._resolveDiscovery(payload as DiscoveredTestPayload, token); - } + // Expose for backward compatibility (WorkspaceTestAdapter accesses these) + public get runIdToTestItem(): Map { + return this.testItemIndex.runIdToTestItemMap; } - public _resolveDiscovery(payload: DiscoveredTestPayload, token?: CancellationToken): void { - const workspacePath = this.workspaceUri.fsPath; - const rawTestData = payload as DiscoveredTestPayload; - // Check if there were any errors in the discovery process. - if (rawTestData.status === 'error') { - const testingErrorConst = - this.testProvider === 'pytest' ? Testing.errorPytestDiscovery : Testing.errorUnittestDiscovery; - const { error } = rawTestData; - traceError(testingErrorConst, 'for workspace: ', workspacePath, '\r\n', error?.join('\r\n\r\n') ?? ''); - - let errorNode = this.testController.items.get(`DiscoveryError:${workspacePath}`); - const message = util.format( - `${testingErrorConst} ${Testing.seePythonOutput}\r\n`, - error?.join('\r\n\r\n') ?? '', - ); - - if (errorNode === undefined) { - const options = buildErrorNodeOptions(this.workspaceUri, message, this.testProvider); - errorNode = createErrorTestItem(this.testController, options); - this.testController.items.add(errorNode); - } - const errorNodeLabel: MarkdownString = new MarkdownString( - `[Show output](command:python.viewOutput) to view error logs`, - ); - errorNodeLabel.isTrusted = true; - errorNode.error = errorNodeLabel; - } else { - // remove error node only if no errors exist. - this.testController.items.delete(`DiscoveryError:${workspacePath}`); - } - if (rawTestData.tests || rawTestData.tests === null) { - // if any tests exist, they should be populated in the test tree, regardless of whether there were errors or not. - // parse and insert test data. + public get runIdToVSid(): Map { + return this.testItemIndex.runIdToVSidMap; + } - // If the test root for this folder exists: Workspace refresh, update its children. - // Otherwise, it is a freshly discovered workspace, and we need to create a new test root and populate the test tree. - populateTestTree(this.testController, rawTestData.tests, undefined, this, token); - } + public get vsIdToRunId(): Map { + return this.testItemIndex.vsIdToRunIdMap; + } + public resolveDiscovery(payload: DiscoveredTestPayload, token?: CancellationToken): void { + PythonResultResolver.discoveryHandler.processDiscovery( + payload, + this.testController, + this.testItemIndex, + this.workspaceUri, + this.testProvider, + token, + ); sendTelemetryEvent(EventName.UNITTEST_DISCOVERY_DONE, undefined, { tool: this.testProvider, failed: false, }); } + public _resolveDiscovery(payload: DiscoveredTestPayload, token?: CancellationToken): void { + // Delegate to the public method for backward compatibility + this.resolveDiscovery(payload, token); + } + public resolveExecution(payload: ExecutionTestPayload | CoveragePayload, runInstance: TestRun): void { if ('coverage' in payload) { // coverage data is sent once per connection - traceVerbose('Coverage data received.'); - this._resolveCoverage(payload as CoveragePayload, runInstance); + traceInfo('Coverage data received, processing...'); + this.detailedCoverageMap = PythonResultResolver.coverageHandler.processCoverage( + payload as CoveragePayload, + runInstance, + ); + traceInfo('Coverage data processing complete.'); } else { - this._resolveExecution(payload as ExecutionTestPayload, runInstance); - } - } - - public _resolveCoverage(payload: CoveragePayload, runInstance: TestRun): void { - if (payload.result === undefined) { - return; - } - for (const [key, value] of Object.entries(payload.result)) { - const fileNameStr = key; - const fileCoverageMetrics: FileCoverageMetrics = value; - const linesCovered = fileCoverageMetrics.lines_covered ? fileCoverageMetrics.lines_covered : []; // undefined if no lines covered - const linesMissed = fileCoverageMetrics.lines_missed ? fileCoverageMetrics.lines_missed : []; // undefined if no lines missed - const executedBranches = fileCoverageMetrics.executed_branches; - const totalBranches = fileCoverageMetrics.total_branches; - - const lineCoverageCount = new TestCoverageCount( - linesCovered.length, - linesCovered.length + linesMissed.length, + PythonResultResolver.executionHandler.processExecution( + payload as ExecutionTestPayload, + runInstance, + this.testItemIndex, + this.testController, ); - let fileCoverage: FileCoverage; - const uri = Uri.file(fileNameStr); - if (totalBranches === -1) { - // branch coverage was not enabled and should not be displayed - fileCoverage = new FileCoverage(uri, lineCoverageCount); - } else { - const branchCoverageCount = new TestCoverageCount(executedBranches, totalBranches); - fileCoverage = new FileCoverage(uri, lineCoverageCount, branchCoverageCount); - } - runInstance.addCoverage(fileCoverage); - - // create detailed coverage array for each file (only line coverage on detailed, not branch) - const detailedCoverageArray: FileCoverageDetail[] = []; - // go through all covered lines, create new StatementCoverage, and add to detailedCoverageArray - for (const line of linesCovered) { - // line is 1-indexed, so we need to subtract 1 to get the 0-indexed line number - // true value means line is covered - const statementCoverage = new StatementCoverage( - true, - new Range(line - 1, 0, line - 1, Number.MAX_SAFE_INTEGER), - ); - detailedCoverageArray.push(statementCoverage); - } - for (const line of linesMissed) { - // line is 1-indexed, so we need to subtract 1 to get the 0-indexed line number - // false value means line is NOT covered - const statementCoverage = new StatementCoverage( - false, - new Range(line - 1, 0, line - 1, Number.MAX_SAFE_INTEGER), - ); - detailedCoverageArray.push(statementCoverage); - } - - this.detailedCoverageMap.set(uri.fsPath, detailedCoverageArray); } } public _resolveExecution(payload: ExecutionTestPayload, runInstance: TestRun): void { - const rawTestExecData = payload as ExecutionTestPayload; - if (rawTestExecData !== undefined && rawTestExecData.result !== undefined) { - // Map which holds the subtest information for each test item. - - // iterate through payload and update the UI accordingly. - for (const keyTemp of Object.keys(rawTestExecData.result)) { - const testCases: TestItem[] = []; - - // grab leaf level test items - this.testController.items.forEach((i) => { - const tempArr: TestItem[] = getTestCaseNodes(i); - testCases.push(...tempArr); - }); - const testItem = rawTestExecData.result[keyTemp]; - - if (testItem.outcome === 'error') { - const rawTraceback = testItem.traceback ?? ''; - const traceback = splitLines(rawTraceback, { - trim: false, - removeEmptyEntries: true, - }).join('\r\n'); - const text = `${testItem.test} failed with error: ${ - testItem.message ?? testItem.outcome - }\r\n${traceback}`; - const message = new TestMessage(text); - - const grabVSid = this.runIdToVSid.get(keyTemp); - // search through freshly built array of testItem to find the failed test and update UI. - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - if (indiItem.range) { - message.location = new Location(indiItem.uri, indiItem.range); - } - runInstance.errored(indiItem, message); - } - } - }); - } else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') { - const rawTraceback = testItem.traceback ?? ''; - const traceback = splitLines(rawTraceback, { - trim: false, - removeEmptyEntries: true, - }).join('\r\n'); - - const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; - const message = new TestMessage(text); + // Delegate to the public method for backward compatibility + this.resolveExecution(payload, runInstance); + } - // note that keyTemp is a runId for unittest library... - const grabVSid = this.runIdToVSid.get(keyTemp); - // search through freshly built array of testItem to find the failed test and update UI. - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - if (indiItem.range) { - message.location = new Location(indiItem.uri, indiItem.range); - } - runInstance.failed(indiItem, message); - } - } - }); - } else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') { - const grabTestItem = this.runIdToTestItem.get(keyTemp); - const grabVSid = this.runIdToVSid.get(keyTemp); - if (grabTestItem !== undefined) { - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - runInstance.passed(grabTestItem); - } - } - }); - } - } else if (testItem.outcome === 'skipped') { - const grabTestItem = this.runIdToTestItem.get(keyTemp); - const grabVSid = this.runIdToVSid.get(keyTemp); - if (grabTestItem !== undefined) { - testCases.forEach((indiItem) => { - if (indiItem.id === grabVSid) { - if (indiItem.uri) { - runInstance.skipped(grabTestItem); - } - } - }); - } - } else if (testItem.outcome === 'subtest-failure') { - // split on [] or () based on how the subtest is setup. - const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); - const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); - const data = testItem; - // find the subtest's parent test item - if (parentTestItem) { - const subtestStats = this.subTestStats.get(parentTestCaseId); - if (subtestStats) { - subtestStats.failed += 1; - } else { - this.subTestStats.set(parentTestCaseId, { - failed: 1, - passed: 0, - }); - // clear since subtest items don't persist between runs - clearAllChildren(parentTestItem); - } - const subTestItem = this.testController?.createTestItem( - subtestId, - subtestId, - parentTestItem.uri, - ); - // create a new test item for the subtest - if (subTestItem) { - const traceback = data.traceback ?? ''; - const text = `${data.subtest} failed: ${ - testItem.message ?? testItem.outcome - }\r\n${traceback}`; - parentTestItem.children.add(subTestItem); - runInstance.started(subTestItem); - const message = new TestMessage(text); - if (parentTestItem.uri && parentTestItem.range) { - message.location = new Location(parentTestItem.uri, parentTestItem.range); - } - runInstance.failed(subTestItem, message); - } else { - throw new Error('Unable to create new child node for subtest'); - } - } else { - throw new Error('Parent test item not found'); - } - } else if (testItem.outcome === 'subtest-success') { - // split on [] or () based on how the subtest is setup. - const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); - const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); + public _resolveCoverage(payload: CoveragePayload, runInstance: TestRun): void { + // Delegate to the public method for backward compatibility + this.resolveExecution(payload, runInstance); + } - // find the subtest's parent test item - if (parentTestItem) { - const subtestStats = this.subTestStats.get(parentTestCaseId); - if (subtestStats) { - subtestStats.passed += 1; - } else { - this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 }); - // clear since subtest items don't persist between runs - clearAllChildren(parentTestItem); - } - const subTestItem = this.testController?.createTestItem( - subtestId, - subtestId, - parentTestItem.uri, - ); - // create a new test item for the subtest - if (subTestItem) { - parentTestItem.children.add(subTestItem); - runInstance.started(subTestItem); - runInstance.passed(subTestItem); - } else { - throw new Error('Unable to create new child node for subtest'); - } - } else { - throw new Error('Parent test item not found'); - } - } - } - } + /** + * Clean up stale test item references from the cache maps. + * Validates cached items and removes any that are no longer in the test tree. + * Delegates to TestItemIndex. + */ + public cleanupStaleReferences(): void { + this.testItemIndex.cleanupStaleReferences(this.testController); } } diff --git a/src/client/testing/testController/common/testCoverageHandler.ts b/src/client/testing/testController/common/testCoverageHandler.ts new file mode 100644 index 000000000000..81ec80579730 --- /dev/null +++ b/src/client/testing/testController/common/testCoverageHandler.ts @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestRun, Uri, TestCoverageCount, FileCoverage, FileCoverageDetail, StatementCoverage, Range } from 'vscode'; +import { CoveragePayload, FileCoverageMetrics } from './types'; + +/** + * Stateless handler for processing coverage payloads and creating coverage objects. + * This handler is shared across all workspaces and contains no instance state. + */ +export class TestCoverageHandler { + /** + * Process coverage payload + * Pure function - returns coverage data without storing it + */ + public processCoverage(payload: CoveragePayload, runInstance: TestRun): Map { + const detailedCoverageMap = new Map(); + + if (payload.result === undefined) { + return detailedCoverageMap; + } + + for (const [key, value] of Object.entries(payload.result)) { + const fileNameStr = key; + const fileCoverageMetrics: FileCoverageMetrics = value; + + // Create FileCoverage object and add to run instance + const fileCoverage = this.createFileCoverage(Uri.file(fileNameStr), fileCoverageMetrics); + runInstance.addCoverage(fileCoverage); + + // Create detailed coverage array for this file + const detailedCoverage = this.createDetailedCoverage( + fileCoverageMetrics.lines_covered ?? [], + fileCoverageMetrics.lines_missed ?? [], + ); + detailedCoverageMap.set(Uri.file(fileNameStr).fsPath, detailedCoverage); + } + + return detailedCoverageMap; + } + + /** + * Create FileCoverage object from metrics + */ + private createFileCoverage(uri: Uri, metrics: FileCoverageMetrics): FileCoverage { + const linesCovered = metrics.lines_covered ?? []; + const linesMissed = metrics.lines_missed ?? []; + const executedBranches = metrics.executed_branches; + const totalBranches = metrics.total_branches; + + const lineCoverageCount = new TestCoverageCount(linesCovered.length, linesCovered.length + linesMissed.length); + + if (totalBranches === -1) { + // branch coverage was not enabled and should not be displayed + return new FileCoverage(uri, lineCoverageCount); + } else { + const branchCoverageCount = new TestCoverageCount(executedBranches, totalBranches); + return new FileCoverage(uri, lineCoverageCount, branchCoverageCount); + } + } + + /** + * Create detailed coverage array for a file + * Only line coverage on detailed, not branch coverage + */ + private createDetailedCoverage(linesCovered: number[], linesMissed: number[]): FileCoverageDetail[] { + const detailedCoverageArray: FileCoverageDetail[] = []; + + // Add covered lines + for (const line of linesCovered) { + // line is 1-indexed, so we need to subtract 1 to get the 0-indexed line number + // true value means line is covered + const statementCoverage = new StatementCoverage( + true, + new Range(line - 1, 0, line - 1, Number.MAX_SAFE_INTEGER), + ); + detailedCoverageArray.push(statementCoverage); + } + + // Add missed lines + for (const line of linesMissed) { + // line is 1-indexed, so we need to subtract 1 to get the 0-indexed line number + // false value means line is NOT covered + const statementCoverage = new StatementCoverage( + false, + new Range(line - 1, 0, line - 1, Number.MAX_SAFE_INTEGER), + ); + detailedCoverageArray.push(statementCoverage); + } + + return detailedCoverageArray; + } +} diff --git a/src/client/testing/testController/common/testDiscoveryHandler.ts b/src/client/testing/testController/common/testDiscoveryHandler.ts new file mode 100644 index 000000000000..50f4fa71406a --- /dev/null +++ b/src/client/testing/testController/common/testDiscoveryHandler.ts @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { CancellationToken, TestController, Uri, MarkdownString } from 'vscode'; +import * as util from 'util'; +import { DiscoveredTestPayload } from './types'; +import { TestProvider } from '../../types'; +import { traceError } from '../../../logging'; +import { Testing } from '../../../common/utils/localize'; +import { createErrorTestItem } from './testItemUtilities'; +import { buildErrorNodeOptions, populateTestTree } from './utils'; +import { TestItemIndex } from './testItemIndex'; + +/** + * Stateless handler for processing discovery payloads and building/updating the TestItem tree. + * This handler is shared across all workspaces and contains no instance state. + */ +export class TestDiscoveryHandler { + /** + * Process discovery payload and update test tree + * Pure function - no instance state used + */ + public processDiscovery( + payload: DiscoveredTestPayload, + testController: TestController, + testItemIndex: TestItemIndex, + workspaceUri: Uri, + testProvider: TestProvider, + token?: CancellationToken, + ): void { + if (!payload) { + // No test data is available + return; + } + + const workspacePath = workspaceUri.fsPath; + const rawTestData = payload as DiscoveredTestPayload; + + // Check if there were any errors in the discovery process. + if (rawTestData.status === 'error') { + this.createErrorNode(testController, workspaceUri, rawTestData.error, testProvider); + } else { + // remove error node only if no errors exist. + testController.items.delete(`DiscoveryError:${workspacePath}`); + } + + if (rawTestData.tests || rawTestData.tests === null) { + // if any tests exist, they should be populated in the test tree, regardless of whether there were errors or not. + // parse and insert test data. + + // Clear existing mappings before rebuilding test tree + testItemIndex.clear(); + + // If the test root for this folder exists: Workspace refresh, update its children. + // Otherwise, it is a freshly discovered workspace, and we need to create a new test root and populate the test tree. + // Note: populateTestTree will call testItemIndex.registerTestItem() for each discovered test + populateTestTree( + testController, + rawTestData.tests, + undefined, + { + runIdToTestItem: testItemIndex.runIdToTestItemMap, + runIdToVSid: testItemIndex.runIdToVSidMap, + vsIdToRunId: testItemIndex.vsIdToRunIdMap, + } as any, + token, + ); + } + } + + /** + * Create an error node for discovery failures + */ + public createErrorNode( + testController: TestController, + workspaceUri: Uri, + error: string[] | undefined, + testProvider: TestProvider, + ): void { + const workspacePath = workspaceUri.fsPath; + const testingErrorConst = + testProvider === 'pytest' ? Testing.errorPytestDiscovery : Testing.errorUnittestDiscovery; + + traceError(testingErrorConst, 'for workspace: ', workspacePath, '\r\n', error?.join('\r\n\r\n') ?? ''); + + let errorNode = testController.items.get(`DiscoveryError:${workspacePath}`); + const message = util.format( + `${testingErrorConst} ${Testing.seePythonOutput}\r\n`, + error?.join('\r\n\r\n') ?? '', + ); + + if (errorNode === undefined) { + const options = buildErrorNodeOptions(workspaceUri, message, testProvider); + errorNode = createErrorTestItem(testController, options); + testController.items.add(errorNode); + } + + const errorNodeLabel: MarkdownString = new MarkdownString( + `[Show output](command:python.viewOutput) to view error logs`, + ); + errorNodeLabel.isTrusted = true; + errorNode.error = errorNodeLabel; + } +} diff --git a/src/client/testing/testController/common/testExecutionHandler.ts b/src/client/testing/testController/common/testExecutionHandler.ts new file mode 100644 index 000000000000..127e6980ae46 --- /dev/null +++ b/src/client/testing/testController/common/testExecutionHandler.ts @@ -0,0 +1,231 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestController, TestRun, TestMessage, Location } from 'vscode'; +import { ExecutionTestPayload } from './types'; +import { TestItemIndex } from './testItemIndex'; +import { splitLines } from '../../../common/stringUtils'; +import { splitTestNameWithRegex } from './utils'; +import { clearAllChildren } from './testItemUtilities'; + +/** + * Stateless handler for processing execution payloads and updating TestRun instances. + * This handler is shared across all workspaces and contains no instance state. + */ +export class TestExecutionHandler { + /** + * Process execution payload and update test run + * Pure function - no instance state used + */ + public processExecution( + payload: ExecutionTestPayload, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const rawTestExecData = payload as ExecutionTestPayload; + + if (rawTestExecData !== undefined && rawTestExecData.result !== undefined) { + for (const keyTemp of Object.keys(rawTestExecData.result)) { + const testItem = rawTestExecData.result[keyTemp]; + + // Delegate to specific outcome handlers + this.handleTestOutcome(keyTemp, testItem, runInstance, testItemIndex, testController); + } + } + } + + /** + * Handle a single test result based on outcome + */ + private handleTestOutcome( + runId: string, + testItem: any, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + if (testItem.outcome === 'error') { + this.handleTestError(runId, testItem, runInstance, testItemIndex, testController); + } else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') { + this.handleTestFailure(runId, testItem, runInstance, testItemIndex, testController); + } else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') { + this.handleTestSuccess(runId, runInstance, testItemIndex, testController); + } else if (testItem.outcome === 'skipped') { + this.handleTestSkipped(runId, runInstance, testItemIndex, testController); + } else if (testItem.outcome === 'subtest-failure') { + this.handleSubtestFailure(runId, testItem, runInstance, testItemIndex, testController); + } else if (testItem.outcome === 'subtest-success') { + this.handleSubtestSuccess(runId, runInstance, testItemIndex, testController); + } + } + + /** + * Handle test items that errored during execution + */ + private handleTestError( + runId: string, + testItem: any, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const rawTraceback = testItem.traceback ?? ''; + const traceback = splitLines(rawTraceback, { + trim: false, + removeEmptyEntries: true, + }).join('\r\n'); + const text = `${testItem.test} failed with error: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; + const message = new TestMessage(text); + + const foundItem = testItemIndex.getTestItem(runId, testController); + + if (foundItem?.uri) { + if (foundItem.range) { + message.location = new Location(foundItem.uri, foundItem.range); + } + runInstance.errored(foundItem, message); + } + } + + /** + * Handle test items that failed during execution + */ + private handleTestFailure( + runId: string, + testItem: any, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const rawTraceback = testItem.traceback ?? ''; + const traceback = splitLines(rawTraceback, { + trim: false, + removeEmptyEntries: true, + }).join('\r\n'); + + const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; + const message = new TestMessage(text); + + const foundItem = testItemIndex.getTestItem(runId, testController); + + if (foundItem?.uri) { + if (foundItem.range) { + message.location = new Location(foundItem.uri, foundItem.range); + } + runInstance.failed(foundItem, message); + } + } + + /** + * Handle test items that passed during execution + */ + private handleTestSuccess( + runId: string, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const foundItem = testItemIndex.getTestItem(runId, testController); + + if (foundItem !== undefined && foundItem.uri) { + runInstance.passed(foundItem); + } + } + + /** + * Handle test items that were skipped during execution + */ + private handleTestSkipped( + runId: string, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const foundItem = testItemIndex.getTestItem(runId, testController); + + if (foundItem !== undefined && foundItem.uri) { + runInstance.skipped(foundItem); + } + } + + /** + * Handle subtest failures + */ + private handleSubtestFailure( + runId: string, + testItem: any, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const [parentTestCaseId, subtestId] = splitTestNameWithRegex(runId); + const parentTestItem = testItemIndex.getTestItem(parentTestCaseId, testController); + + if (parentTestItem) { + const stats = testItemIndex.getSubtestStats(parentTestCaseId); + if (stats) { + stats.failed += 1; + } else { + testItemIndex.setSubtestStats(parentTestCaseId, { + failed: 1, + passed: 0, + }); + clearAllChildren(parentTestItem); + } + + const subTestItem = testController?.createTestItem(subtestId, subtestId, parentTestItem.uri); + + if (subTestItem) { + const traceback = testItem.traceback ?? ''; + const text = `${testItem.subtest} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}`; + parentTestItem.children.add(subTestItem); + runInstance.started(subTestItem); + const message = new TestMessage(text); + if (parentTestItem.uri && parentTestItem.range) { + message.location = new Location(parentTestItem.uri, parentTestItem.range); + } + runInstance.failed(subTestItem, message); + } else { + throw new Error('Unable to create new child node for subtest'); + } + } else { + throw new Error('Parent test item not found'); + } + } + + /** + * Handle subtest successes + */ + private handleSubtestSuccess( + runId: string, + runInstance: TestRun, + testItemIndex: TestItemIndex, + testController: TestController, + ): void { + const [parentTestCaseId, subtestId] = splitTestNameWithRegex(runId); + const parentTestItem = testItemIndex.getTestItem(parentTestCaseId, testController); + + if (parentTestItem) { + const stats = testItemIndex.getSubtestStats(parentTestCaseId); + if (stats) { + stats.passed += 1; + } else { + testItemIndex.setSubtestStats(parentTestCaseId, { failed: 0, passed: 1 }); + clearAllChildren(parentTestItem); + } + + const subTestItem = testController?.createTestItem(subtestId, subtestId, parentTestItem.uri); + + if (subTestItem) { + parentTestItem.children.add(subTestItem); + runInstance.started(subTestItem); + runInstance.passed(subTestItem); + } else { + throw new Error('Unable to create new child node for subtest'); + } + } else { + throw new Error('Parent test item not found'); + } + } +} diff --git a/src/client/testing/testController/common/testItemIndex.ts b/src/client/testing/testController/common/testItemIndex.ts new file mode 100644 index 000000000000..448903eae7d5 --- /dev/null +++ b/src/client/testing/testController/common/testItemIndex.ts @@ -0,0 +1,225 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestController, TestItem } from 'vscode'; +import { traceError, traceVerbose } from '../../../logging'; +import { getTestCaseNodes } from './testItemUtilities'; + +export interface SubtestStats { + passed: number; + failed: number; +} + +/** + * Maintains persistent ID mappings between Python test IDs and VS Code TestItems. + * This is a stateful component that bridges discovery and execution phases. + * + * Lifecycle: + * - Created: When PythonResultResolver is instantiated (during workspace activation) + * - Populated: During discovery - each discovered test registers its mappings + * - Queried: During execution - to look up TestItems by Python run ID + * - Cleared: When discovery runs again (fresh start) or workspace is disposed + * - Cleaned: Periodically to remove stale references to deleted tests + */ +export class TestItemIndex { + // THE STATE - these maps persist across discovery and execution + private runIdToTestItem: Map; + private runIdToVSid: Map; + private vsIdToRunId: Map; + private subtestStatsMap: Map; + + constructor() { + this.runIdToTestItem = new Map(); + this.runIdToVSid = new Map(); + this.vsIdToRunId = new Map(); + this.subtestStatsMap = new Map(); + } + + /** + * Register a test item with its Python run ID and VS Code ID + * Called during DISCOVERY to populate the index + */ + public registerTestItem(runId: string, vsId: string, testItem: TestItem): void { + this.runIdToTestItem.set(runId, testItem); + this.runIdToVSid.set(runId, vsId); + this.vsIdToRunId.set(vsId, runId); + } + + /** + * Get TestItem by Python run ID (with validation and fallback strategies) + * Called during EXECUTION to look up tests + * + * Uses a three-tier approach: + * 1. Direct O(1) lookup in runIdToTestItem map + * 2. If stale, try vsId mapping and search by VS Code ID + * 3. Last resort: full tree search + */ + public getTestItem(runId: string, testController: TestController): TestItem | undefined { + // Try direct O(1) lookup first + const directItem = this.runIdToTestItem.get(runId); + if (directItem) { + // Validate the item is still in the test tree + if (this.isTestItemValid(directItem, testController)) { + return directItem; + } else { + // Clean up stale reference + this.runIdToTestItem.delete(runId); + } + } + + // Try vsId mapping as fallback + const vsId = this.runIdToVSid.get(runId); + if (vsId) { + // Search by VS Code ID in the controller + let foundItem: TestItem | undefined; + testController.items.forEach((item) => { + if (item.id === vsId) { + foundItem = item; + return; + } + if (!foundItem) { + item.children.forEach((child) => { + if (child.id === vsId) { + foundItem = child; + } + }); + } + }); + + if (foundItem) { + // Cache for future lookups + this.runIdToTestItem.set(runId, foundItem); + return foundItem; + } else { + // Clean up stale mapping + this.runIdToVSid.delete(runId); + this.vsIdToRunId.delete(vsId); + } + } + + // Last resort: full tree search + traceError(`Falling back to tree search for test: ${runId}`); + const testCases = this.collectAllTestCases(testController); + return testCases.find((item) => item.id === vsId); + } + + /** + * Get Python run ID from VS Code ID + * Called by WorkspaceTestAdapter.executeTests() to convert selected tests to Python IDs + */ + public getRunId(vsId: string): string | undefined { + return this.vsIdToRunId.get(vsId); + } + + /** + * Get VS Code ID from Python run ID + */ + public getVSId(runId: string): string | undefined { + return this.runIdToVSid.get(runId); + } + + /** + * Check if a TestItem reference is still valid in the tree + * + * Time Complexity: O(depth) where depth is the maximum nesting level of the test tree. + * In most cases this is O(1) to O(3) since test trees are typically shallow. + */ + public isTestItemValid(testItem: TestItem, testController: TestController): boolean { + // Simple validation: check if the item's parent chain leads back to the controller + let current: TestItem | undefined = testItem; + while (current?.parent) { + current = current.parent; + } + + // If we reached a root item, check if it's in the controller + if (current) { + return testController.items.get(current.id) === current; + } + + // If no parent chain, check if it's directly in the controller + return testController.items.get(testItem.id) === testItem; + } + + /** + * Get subtest statistics for a parent test case + * Returns undefined if no stats exist yet for this parent + */ + public getSubtestStats(parentId: string): SubtestStats | undefined { + return this.subtestStatsMap.get(parentId); + } + + /** + * Set subtest statistics for a parent test case + */ + public setSubtestStats(parentId: string, stats: SubtestStats): void { + this.subtestStatsMap.set(parentId, stats); + } + + /** + * Remove all mappings + * Called at the start of discovery to ensure clean state + */ + public clear(): void { + this.runIdToTestItem.clear(); + this.runIdToVSid.clear(); + this.vsIdToRunId.clear(); + this.subtestStatsMap.clear(); + } + + /** + * Clean up stale references that no longer exist in the test tree + * Called after test tree modifications + */ + public cleanupStaleReferences(testController: TestController): void { + const staleRunIds: string[] = []; + + // Check all runId->TestItem mappings + this.runIdToTestItem.forEach((testItem, runId) => { + if (!this.isTestItemValid(testItem, testController)) { + staleRunIds.push(runId); + } + }); + + // Remove stale entries + staleRunIds.forEach((runId) => { + const vsId = this.runIdToVSid.get(runId); + this.runIdToTestItem.delete(runId); + this.runIdToVSid.delete(runId); + if (vsId) { + this.vsIdToRunId.delete(vsId); + } + }); + + if (staleRunIds.length > 0) { + traceVerbose(`Cleaned up ${staleRunIds.length} stale test item references`); + } + } + + /** + * Collect all test case items from the test controller tree. + * Note: This performs full tree traversal - use cached lookups when possible. + */ + private collectAllTestCases(testController: TestController): TestItem[] { + const testCases: TestItem[] = []; + + testController.items.forEach((i) => { + const tempArr: TestItem[] = getTestCaseNodes(i); + testCases.push(...tempArr); + }); + + return testCases; + } + + // Expose maps for backward compatibility (read-only access) + public get runIdToTestItemMap(): Map { + return this.runIdToTestItem; + } + + public get runIdToVSidMap(): Map { + return this.runIdToVSid; + } + + public get vsIdToRunIdMap(): Map { + return this.vsIdToRunId; + } +} diff --git a/src/client/testing/testController/common/types.ts b/src/client/testing/testController/common/types.ts index b4d95af6c30e..6121b3e24442 100644 --- a/src/client/testing/testController/common/types.ts +++ b/src/client/testing/testController/common/types.ts @@ -155,11 +155,9 @@ export interface ITestResultResolver { _resolveCoverage(payload: CoveragePayload, runInstance: TestRun): void; } export interface ITestDiscoveryAdapter { - // ** first line old method signature, second line new method signature - discoverTests(uri: Uri): Promise; discoverTests( uri: Uri, - executionFactory?: IPythonExecutionFactory, + executionFactory: IPythonExecutionFactory, token?: CancellationToken, interpreter?: PythonEnvironment, ): Promise; @@ -167,21 +165,19 @@ export interface ITestDiscoveryAdapter { // interface for execution/runner adapter export interface ITestExecutionAdapter { - // ** first line old method signature, second line new method signature - runTests(uri: Uri, testIds: string[], profileKind?: boolean | TestRunProfileKind): Promise; runTests( uri: Uri, testIds: string[], - profileKind?: boolean | TestRunProfileKind, - runInstance?: TestRun, - executionFactory?: IPythonExecutionFactory, + profileKind: boolean | TestRunProfileKind | undefined, + runInstance: TestRun, + executionFactory: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise; } // Same types as in python_files/unittestadapter/utils.py -export type DiscoveredTestType = 'folder' | 'file' | 'class' | 'test'; +export type DiscoveredTestType = 'folder' | 'file' | 'class' | 'function' | 'test'; export type DiscoveredTestCommon = { path: string; @@ -198,6 +194,7 @@ export type DiscoveredTestItem = DiscoveredTestCommon & { export type DiscoveredTestNode = DiscoveredTestCommon & { children: (DiscoveredTestNode | DiscoveredTestItem)[]; + lineno?: number | string; }; export type DiscoveredTestPayload = { diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index 0bbf0e449dcd..606865e5ad7e 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -257,6 +257,21 @@ export function populateTestTree( node.canResolveChildren = true; node.tags = [RunTestTag, DebugTestTag]; + + // Set range for class nodes (and other nodes) if lineno is available + let range: Range | undefined; + if ('lineno' in child && child.lineno) { + if (Number(child.lineno) === 0) { + range = new Range(new Position(0, 0), new Position(0, 0)); + } else { + range = new Range( + new Position(Number(child.lineno) - 1, 0), + new Position(Number(child.lineno), 0), + ); + } + node.range = range; + } + testRoot!.children.add(node); } populateTestTree(testController, child, node, resultResolver, token); diff --git a/src/client/testing/testController/controller.ts b/src/client/testing/testController/controller.ts index aefc97117da5..8c8ce422e3c1 100644 --- a/src/client/testing/testController/controller.ts +++ b/src/client/testing/testController/controller.ts @@ -29,7 +29,7 @@ import { IConfigurationService, IDisposableRegistry, Resource } from '../../comm import { DelayedTrigger, IDelayedTrigger } from '../../common/utils/delayTrigger'; import { noop } from '../../common/utils/misc'; import { IInterpreterService } from '../../interpreter/contracts'; -import { traceError, traceInfo, traceVerbose } from '../../logging'; +import { traceError, traceVerbose } from '../../logging'; import { IEventNamePropertyMapping, sendTelemetryEvent } from '../../telemetry'; import { EventName } from '../../telemetry/constants'; import { PYTEST_PROVIDER, UNITTEST_PROVIDER } from '../common/constants'; @@ -253,101 +253,121 @@ export class PythonTestController implements ITestController, IExtensionSingleAc private async refreshTestDataInternal(uri?: Resource): Promise { this.refreshingStartedEvent.fire(); - if (uri) { - const settings = this.configSettings.getSettings(uri); - const workspace = this.workspaceService.getWorkspaceFolder(uri); - traceInfo(`Discover tests for workspace name: ${workspace?.name} - uri: ${uri.fsPath}`); - // Ensure we send test telemetry if it gets disabled again - this.sendTestDisabledTelemetry = true; - // ** experiment to roll out NEW test discovery mechanism - if (settings.testing.pytestEnabled) { - if (workspace && workspace.uri) { - const testAdapter = this.testAdapters.get(workspace.uri); - if (testAdapter) { - const testProviderInAdapter = testAdapter.getTestProvider(); - if (testProviderInAdapter !== 'pytest') { - traceError('Test provider in adapter is not pytest. Please reload window.'); - this.surfaceErrorNode( - workspace.uri, - 'Test provider types are not aligned, please reload your VS Code window.', - 'pytest', - ); - return Promise.resolve(); - } - await testAdapter.discoverTests( - this.testController, - this.refreshCancellation.token, - this.pythonExecFactory, - await this.interpreterService.getActiveInterpreter(workspace.uri), - ); - } else { - traceError('Unable to find test adapter for workspace.'); - } - } else { - traceError('Unable to find workspace for given file'); - } - } else if (settings.testing.unittestEnabled) { - if (workspace && workspace.uri) { - const testAdapter = this.testAdapters.get(workspace.uri); - if (testAdapter) { - const testProviderInAdapter = testAdapter.getTestProvider(); - if (testProviderInAdapter !== 'unittest') { - traceError('Test provider in adapter is not unittest. Please reload window.'); - this.surfaceErrorNode( - workspace.uri, - 'Test provider types are not aligned, please reload your VS Code window.', - 'unittest', - ); - return Promise.resolve(); - } - await testAdapter.discoverTests( - this.testController, - this.refreshCancellation.token, - this.pythonExecFactory, - await this.interpreterService.getActiveInterpreter(workspace.uri), - ); - } else { - traceError('Unable to find test adapter for workspace.'); - } - } else { - traceError('Unable to find workspace for given file'); - } + try { + if (uri) { + await this.refreshSingleWorkspace(uri); } else { - if (this.sendTestDisabledTelemetry) { - this.sendTestDisabledTelemetry = false; - sendTelemetryEvent(EventName.UNITTEST_DISABLED); - } - // If we are here we may have to remove an existing node from the tree - // This handles the case where user removes test settings. Which should remove the - // tests for that particular case from the tree view - if (workspace) { - const toDelete: string[] = []; - this.testController.items.forEach((i: TestItem) => { - const w = this.workspaceService.getWorkspaceFolder(i.uri); - if (w?.uri.fsPath === workspace.uri.fsPath) { - toDelete.push(i.id); - } - }); - toDelete.forEach((i) => this.testController.items.delete(i)); - } + await this.refreshAllWorkspaces(); } + } finally { + this.refreshingCompletedEvent.fire(); + } + } + + /** + * Discovers tests for a single workspace. + */ + private async refreshSingleWorkspace(uri: Uri): Promise { + const workspace = this.workspaceService.getWorkspaceFolder(uri); + if (!workspace?.uri) { + traceError('Unable to find workspace for given file'); + return; + } + + const settings = this.configSettings.getSettings(uri); + traceVerbose(`Discover tests for workspace name: ${workspace.name} - uri: ${uri.fsPath}`); + + // Ensure we send test telemetry if it gets disabled again + this.sendTestDisabledTelemetry = true; + + if (settings.testing.pytestEnabled) { + await this.discoverTestsForProvider(workspace.uri, 'pytest'); + } else if (settings.testing.unittestEnabled) { + await this.discoverTestsForProvider(workspace.uri, 'unittest'); } else { - traceVerbose('Testing: Refreshing all test data'); - const workspaces: readonly WorkspaceFolder[] = this.workspaceService.workspaceFolders || []; - await Promise.all( - workspaces.map(async (workspace) => { - if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { - this.commandManager - .executeCommand(constants.Commands.TriggerEnvironmentSelection, workspace.uri) - .then(noop, noop); - return; - } - await this.refreshTestDataInternal(workspace.uri); - }), + await this.handleNoTestProviderEnabled(workspace); + } + } + + /** + * Discovers tests for all workspaces in the workspace folders. + */ + private async refreshAllWorkspaces(): Promise { + traceVerbose('Testing: Refreshing all test data'); + const workspaces: readonly WorkspaceFolder[] = this.workspaceService.workspaceFolders || []; + + await Promise.all( + workspaces.map(async (workspace) => { + if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { + this.commandManager + .executeCommand(constants.Commands.TriggerEnvironmentSelection, workspace.uri) + .then(noop, noop); + return; + } + await this.refreshSingleWorkspace(workspace.uri); + }), + ); + } + + /** + * Discovers tests for a specific test provider (pytest or unittest). + * Validates that the adapter's provider matches the expected provider. + */ + private async discoverTestsForProvider(workspaceUri: Uri, expectedProvider: TestProvider): Promise { + const testAdapter = this.testAdapters.get(workspaceUri); + + if (!testAdapter) { + traceError('Unable to find test adapter for workspace.'); + return; + } + + const actualProvider = testAdapter.getTestProvider(); + if (actualProvider !== expectedProvider) { + traceError(`Test provider in adapter is not ${expectedProvider}. Please reload window.`); + this.surfaceErrorNode( + workspaceUri, + 'Test provider types are not aligned, please reload your VS Code window.', + expectedProvider, ); + return; } - this.refreshingCompletedEvent.fire(); - return Promise.resolve(); + + await testAdapter.discoverTests( + this.testController, + this.pythonExecFactory, + this.refreshCancellation.token, + await this.interpreterService.getActiveInterpreter(workspaceUri), + ); + } + + /** + * Handles the case when no test provider is enabled. + * Sends telemetry and removes test items for the workspace from the tree. + */ + private async handleNoTestProviderEnabled(workspace: WorkspaceFolder): Promise { + if (this.sendTestDisabledTelemetry) { + this.sendTestDisabledTelemetry = false; + sendTelemetryEvent(EventName.UNITTEST_DISABLED); + } + + this.removeTestItemsForWorkspace(workspace); + } + + /** + * Removes all test items belonging to a specific workspace from the test controller. + * This is used when test discovery is disabled for a workspace. + */ + private removeTestItemsForWorkspace(workspace: WorkspaceFolder): void { + const itemsToDelete: string[] = []; + + this.testController.items.forEach((testItem: TestItem) => { + const itemWorkspace = this.workspaceService.getWorkspaceFolder(testItem.uri); + if (itemWorkspace?.uri.fsPath === workspace.uri.fsPath) { + itemsToDelete.push(testItem.id); + } + }); + + itemsToDelete.forEach((id) => this.testController.items.delete(id)); } private async resolveChildren(item: TestItem | undefined): Promise { @@ -378,21 +398,13 @@ export class PythonTestController implements ITestController, IExtensionSingleAc } private async runTests(request: TestRunRequest, token: CancellationToken): Promise { - const workspaces: WorkspaceFolder[] = []; - if (request.include) { - uniq(request.include.map((r) => this.workspaceService.getWorkspaceFolder(r.uri))).forEach((w) => { - if (w) { - workspaces.push(w); - } - }); - } else { - (this.workspaceService.workspaceFolders || []).forEach((w) => workspaces.push(w)); - } + const workspaces = this.getWorkspacesForTestRun(request); const runInstance = this.testController.createTestRun( request, `Running Tests for Workspace(s): ${workspaces.map((w) => w.uri.fsPath).join(';')}`, true, ); + const dispose = token.onCancellationRequested(() => { runInstance.appendOutput(`\nRun instance cancelled.\r\n`); runInstance.end(); @@ -402,87 +414,9 @@ export class PythonTestController implements ITestController, IExtensionSingleAc try { await Promise.all( - workspaces.map(async (workspace) => { - if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { - this.commandManager - .executeCommand(constants.Commands.TriggerEnvironmentSelection, workspace.uri) - .then(noop, noop); - return undefined; - } - const testItems: TestItem[] = []; - // If the run request includes test items then collect only items that belong to - // `workspace`. If there are no items in the run request then just run the `workspace` - // root test node. Include will be `undefined` in the "run all" scenario. - (request.include ?? this.testController.items).forEach((i: TestItem) => { - const w = this.workspaceService.getWorkspaceFolder(i.uri); - if (w?.uri.fsPath === workspace.uri.fsPath) { - testItems.push(i); - } - }); - - const settings = this.configSettings.getSettings(workspace.uri); - if (testItems.length > 0) { - const testAdapter = - this.testAdapters.get(workspace.uri) || - (this.testAdapters.values().next().value as WorkspaceTestAdapter); - - // no profile will have TestRunProfileKind.Coverage if rewrite isn't enabled - if (request.profile?.kind && request.profile?.kind === TestRunProfileKind.Coverage) { - request.profile.loadDetailedCoverage = ( - _testRun: TestRun, - fileCoverage, - _token, - ): Thenable => { - const details = testAdapter.resultResolver.detailedCoverageMap.get( - fileCoverage.uri.fsPath, - ); - if (details === undefined) { - // given file has no detailed coverage data - return Promise.resolve([]); - } - return Promise.resolve(details); - }; - } - - if (settings.testing.pytestEnabled) { - sendTelemetryEvent(EventName.UNITTEST_RUN, undefined, { - tool: 'pytest', - debugging: request.profile?.kind === TestRunProfileKind.Debug, - }); - return testAdapter.executeTests( - this.testController, - runInstance, - testItems, - token, - request.profile?.kind, - this.pythonExecFactory, - this.debugLauncher, - await this.interpreterService.getActiveInterpreter(workspace.uri), - ); - } - if (settings.testing.unittestEnabled) { - sendTelemetryEvent(EventName.UNITTEST_RUN, undefined, { - tool: 'unittest', - debugging: request.profile?.kind === TestRunProfileKind.Debug, - }); - // ** experiment to roll out NEW test discovery mechanism - return testAdapter.executeTests( - this.testController, - runInstance, - testItems, - token, - request.profile?.kind, - this.pythonExecFactory, - this.debugLauncher, - await this.interpreterService.getActiveInterpreter(workspace.uri), - ); - } - } - if (!settings.testing.pytestEnabled && !settings.testing.unittestEnabled) { - unconfiguredWorkspaces.push(workspace); - } - return Promise.resolve(); - }), + workspaces.map((workspace) => + this.runTestsForWorkspace(workspace, request, runInstance, token, unconfiguredWorkspaces), + ), ); } finally { traceVerbose('Finished running tests, ending runInstance.'); @@ -495,6 +429,146 @@ export class PythonTestController implements ITestController, IExtensionSingleAc } } + /** + * Gets the list of workspaces to run tests for based on the test run request. + */ + private getWorkspacesForTestRun(request: TestRunRequest): WorkspaceFolder[] { + if (request.include) { + const workspaces: WorkspaceFolder[] = []; + uniq(request.include.map((r) => this.workspaceService.getWorkspaceFolder(r.uri))).forEach((w) => { + if (w) { + workspaces.push(w); + } + }); + return workspaces; + } + return Array.from(this.workspaceService.workspaceFolders || []); + } + + /** + * Runs tests for a single workspace. + */ + private async runTestsForWorkspace( + workspace: WorkspaceFolder, + request: TestRunRequest, + runInstance: TestRun, + token: CancellationToken, + unconfiguredWorkspaces: WorkspaceFolder[], + ): Promise { + if (!(await this.interpreterService.getActiveInterpreter(workspace.uri))) { + this.commandManager + .executeCommand(constants.Commands.TriggerEnvironmentSelection, workspace.uri) + .then(noop, noop); + return; + } + + const testItems = this.getTestItemsForWorkspace(workspace, request); + const settings = this.configSettings.getSettings(workspace.uri); + + if (testItems.length === 0) { + if (!settings.testing.pytestEnabled && !settings.testing.unittestEnabled) { + unconfiguredWorkspaces.push(workspace); + } + return; + } + + const testAdapter = + this.testAdapters.get(workspace.uri) || (this.testAdapters.values().next().value as WorkspaceTestAdapter); + + this.setupCoverageIfNeeded(request, testAdapter); + + if (settings.testing.pytestEnabled) { + await this.executeTestsForProvider( + workspace, + testAdapter, + testItems, + runInstance, + request, + token, + 'pytest', + ); + } else if (settings.testing.unittestEnabled) { + await this.executeTestsForProvider( + workspace, + testAdapter, + testItems, + runInstance, + request, + token, + 'unittest', + ); + } else { + unconfiguredWorkspaces.push(workspace); + } + } + + /** + * Gets test items that belong to a specific workspace from the run request. + */ + private getTestItemsForWorkspace(workspace: WorkspaceFolder, request: TestRunRequest): TestItem[] { + const testItems: TestItem[] = []; + // If the run request includes test items then collect only items that belong to + // `workspace`. If there are no items in the run request then just run the `workspace` + // root test node. Include will be `undefined` in the "run all" scenario. + (request.include ?? this.testController.items).forEach((i: TestItem) => { + const w = this.workspaceService.getWorkspaceFolder(i.uri); + if (w?.uri.fsPath === workspace.uri.fsPath) { + testItems.push(i); + } + }); + return testItems; + } + + /** + * Sets up detailed coverage loading if the run profile is for coverage. + */ + private setupCoverageIfNeeded(request: TestRunRequest, testAdapter: WorkspaceTestAdapter): void { + // no profile will have TestRunProfileKind.Coverage if rewrite isn't enabled + if (request.profile?.kind && request.profile?.kind === TestRunProfileKind.Coverage) { + request.profile.loadDetailedCoverage = ( + _testRun: TestRun, + fileCoverage, + _token, + ): Thenable => { + const details = testAdapter.resultResolver.detailedCoverageMap.get(fileCoverage.uri.fsPath); + if (details === undefined) { + // given file has no detailed coverage data + return Promise.resolve([]); + } + return Promise.resolve(details); + }; + } + } + + /** + * Executes tests using the test adapter for a specific test provider. + */ + private async executeTestsForProvider( + workspace: WorkspaceFolder, + testAdapter: WorkspaceTestAdapter, + testItems: TestItem[], + runInstance: TestRun, + request: TestRunRequest, + token: CancellationToken, + provider: TestProvider, + ): Promise { + sendTelemetryEvent(EventName.UNITTEST_RUN, undefined, { + tool: provider, + debugging: request.profile?.kind === TestRunProfileKind.Debug, + }); + + await testAdapter.executeTests( + this.testController, + runInstance, + testItems, + this.pythonExecFactory, + token, + request.profile?.kind, + this.debugLauncher, + await this.interpreterService.getActiveInterpreter(workspace.uri), + ); + } + private invalidateTests(uri: Uri) { this.testController.items.forEach((root) => { const item = getNodeByUri(root, uri); diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index 04258ddbddf2..7ad69c71fa0e 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -1,8 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. import * as path from 'path'; -import { CancellationToken, CancellationTokenSource, Uri } from 'vscode'; -import * as fs from 'fs'; +import { CancellationToken, Disposable, Uri } from 'vscode'; import { ChildProcess } from 'child_process'; import { ExecutionFactoryCreateWithEnvironmentOptions, @@ -10,24 +9,37 @@ import { SpawnOptions, } from '../../../common/process/types'; import { IConfigurationService } from '../../../common/types'; -import { createDeferred, Deferred } from '../../../common/utils/async'; +import { Deferred } from '../../../common/utils/async'; import { EXTENSION_ROOT_DIR } from '../../../constants'; -import { traceError, traceInfo, traceVerbose, traceWarn } from '../../../logging'; -import { DiscoveredTestPayload, ITestDiscoveryAdapter, ITestResultResolver } from '../common/types'; -import { - createDiscoveryErrorPayload, - createTestingDeferred, - fixLogLinesNoTrailing, - startDiscoveryNamedPipe, - addValueIfKeyNotExist, - hasSymlinkParent, -} from '../common/utils'; +import { traceError, traceInfo, traceVerbose } from '../../../logging'; +import { ITestDiscoveryAdapter, ITestResultResolver } from '../common/types'; +import { createTestingDeferred } from '../common/utils'; import { IEnvironmentVariablesProvider } from '../../../common/variables/types'; import { PythonEnvironment } from '../../../pythonEnvironments/info'; import { useEnvExtension, getEnvironment, runInBackground } from '../../../envExt/api.internal'; +import { buildPytestEnv as configureSubprocessEnv, handleSymlinkAndRootDir } from './pytestHelpers'; +import { cleanupOnCancellation, createProcessHandlers, setupDiscoveryPipe } from '../common/discoveryHelpers'; + +/** + * Configures the subprocess environment for pytest discovery. + * @param envVarsService Service to retrieve environment variables + * @param uri Workspace URI + * @param discoveryPipeName Name of the discovery pipe to pass to the subprocess + * @returns Configured environment variables for the subprocess + */ +async function configureDiscoveryEnv( + envVarsService: IEnvironmentVariablesProvider | undefined, + uri: Uri, + discoveryPipeName: string, +): Promise { + const fullPluginPath = path.join(EXTENSION_ROOT_DIR, 'python_files'); + const envVars = await envVarsService?.getEnvironmentVariables(uri); + const mutableEnv = configureSubprocessEnv(envVars, fullPluginPath, discoveryPipeName); + return mutableEnv; +} /** - * Wrapper class for unittest test discovery. This is where we call `runTestCommand`. #this seems incorrectly copied + * Wrapper class for pytest test discovery. This is where we call the pytest subprocess. */ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { constructor( @@ -38,189 +50,157 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { async discoverTests( uri: Uri, - executionFactory?: IPythonExecutionFactory, + executionFactory: IPythonExecutionFactory, token?: CancellationToken, interpreter?: PythonEnvironment, ): Promise { - const cSource = new CancellationTokenSource(); - const deferredReturn = createDeferred(); - - token?.onCancellationRequested(() => { - traceInfo(`Test discovery cancelled.`); - cSource.cancel(); - deferredReturn.resolve(); - }); - - const name = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { - // if the token is cancelled, we don't want process the data - if (!token?.isCancellationRequested) { - this.resultResolver?.resolveDiscovery(data); - } - }, cSource.token); - - this.runPytestDiscovery(uri, name, cSource, executionFactory, interpreter, token).then(() => { - deferredReturn.resolve(); - }); + // Setup discovery pipe and cancellation + const { + pipeName: discoveryPipeName, + cancellation: discoveryPipeCancellation, + tokenDisposable, + } = await setupDiscoveryPipe(this.resultResolver, token, uri); + + // Setup process handlers deferred (used by both execution paths) + const deferredTillExecClose: Deferred = createTestingDeferred(); - return deferredReturn.promise; - } + // Collect all disposables related to discovery to handle cleanup in finally block + const disposables: Disposable[] = []; + if (tokenDisposable) { + disposables.push(tokenDisposable); + } - async runPytestDiscovery( - uri: Uri, - discoveryPipeName: string, - cSource: CancellationTokenSource, - executionFactory?: IPythonExecutionFactory, - interpreter?: PythonEnvironment, - token?: CancellationToken, - ): Promise { - const relativePathToPytest = 'python_files'; - const fullPluginPath = path.join(EXTENSION_ROOT_DIR, relativePathToPytest); - const settings = this.configSettings.getSettings(uri); - let { pytestArgs } = settings.testing; - const cwd = settings.testing.cwd && settings.testing.cwd.length > 0 ? settings.testing.cwd : uri.fsPath; - - // check for symbolic path - const stats = await fs.promises.lstat(cwd); - const resolvedPath = await fs.promises.realpath(cwd); - let isSymbolicLink = false; - if (stats.isSymbolicLink()) { - isSymbolicLink = true; - traceWarn('The cwd is a symbolic link.'); - } else if (resolvedPath !== cwd) { - traceWarn( - 'The cwd resolves to a different path, checking if it has a symbolic link somewhere in its path.', + try { + // Build pytest command and arguments + const settings = this.configSettings.getSettings(uri); + let { pytestArgs } = settings.testing; + const cwd = settings.testing.cwd && settings.testing.cwd.length > 0 ? settings.testing.cwd : uri.fsPath; + pytestArgs = await handleSymlinkAndRootDir(cwd, pytestArgs); + const commandArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); + traceVerbose( + `Running pytest discovery with command: ${commandArgs.join(' ')} for workspace ${uri.fsPath}.`, ); - isSymbolicLink = await hasSymlinkParent(cwd); - } - if (isSymbolicLink) { - traceWarn("Symlink found, adding '--rootdir' to pytestArgs only if it doesn't already exist. cwd: ", cwd); - pytestArgs = addValueIfKeyNotExist(pytestArgs, '--rootdir', cwd); - } - // if user has provided `--rootdir` then use that, otherwise add `cwd` - // root dir is required so pytest can find the relative paths and for symlinks - addValueIfKeyNotExist(pytestArgs, '--rootdir', cwd); - - // get and edit env vars - const mutableEnv = { - ...(await this.envVarsService?.getEnvironmentVariables(uri)), - }; - // get python path from mutable env, it contains process.env as well - const pythonPathParts: string[] = mutableEnv.PYTHONPATH?.split(path.delimiter) ?? []; - const pythonPathCommand = [fullPluginPath, ...pythonPathParts].join(path.delimiter); - mutableEnv.PYTHONPATH = pythonPathCommand; - mutableEnv.TEST_RUN_PIPE = discoveryPipeName; - traceInfo( - `Environment variables set for pytest discovery: PYTHONPATH=${mutableEnv.PYTHONPATH}, TEST_RUN_PIPE=${mutableEnv.TEST_RUN_PIPE}`, - ); - - // delete UUID following entire discovery finishing. - const execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); - traceVerbose(`Running pytest discovery with command: ${execArgs.join(' ')} for workspace ${uri.fsPath}.`); - - if (useEnvExtension()) { - const pythonEnv = await getEnvironment(uri); - if (pythonEnv) { - const deferredTillExecClose: Deferred = createTestingDeferred(); + + // Configure subprocess environment + const mutableEnv = await configureDiscoveryEnv(this.envVarsService, uri, discoveryPipeName); + + // Setup process handlers (shared by both execution paths) + const handlers = createProcessHandlers('pytest', uri, cwd, this.resultResolver, deferredTillExecClose, [5]); + + // Execute using environment extension if available + if (useEnvExtension()) { + traceInfo(`Using environment extension for pytest discovery in workspace ${uri.fsPath}`); + const pythonEnv = await getEnvironment(uri); + if (!pythonEnv) { + traceError( + `Python environment not found for workspace ${uri.fsPath}. Cannot proceed with test discovery.`, + ); + deferredTillExecClose.resolve(); + return; + } + traceVerbose(`Using Python environment: ${JSON.stringify(pythonEnv)}`); const proc = await runInBackground(pythonEnv, { cwd, - args: execArgs, + args: commandArgs, env: (mutableEnv as unknown) as { [key: string]: string }, }); - token?.onCancellationRequested(() => { - traceInfo(`Test discovery cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); - proc.kill(); - deferredTillExecClose.resolve(); - cSource.cancel(); - }); - proc.stdout.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceInfo(out); - }); - proc.stderr.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceError(out); + traceInfo(`Started pytest discovery subprocess (environment extension) for workspace ${uri.fsPath}`); + + // Wire up cancellation and process events + const envExtCancellationHandler = token?.onCancellationRequested(() => { + cleanupOnCancellation('pytest', proc, deferredTillExecClose, discoveryPipeCancellation, uri); }); + if (envExtCancellationHandler) { + disposables.push(envExtCancellationHandler); + } + proc.stdout.on('data', handlers.onStdout); + proc.stderr.on('data', handlers.onStderr); proc.onExit((code, signal) => { - if (code !== 0) { - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal} on workspace ${uri.fsPath}`, - ); - this.resultResolver?.resolveDiscovery(createDiscoveryErrorPayload(code, signal, cwd)); - } - deferredTillExecClose.resolve(); + handlers.onExit(code, signal); + handlers.onClose(code, signal); }); + await deferredTillExecClose.promise; - } else { - traceError(`Python Environment not found for: ${uri.fsPath}`); + traceInfo(`Pytest discovery completed for workspace ${uri.fsPath}`); + return; } - return; - } - - const spawnOptions: SpawnOptions = { - cwd, - throwOnStdErr: true, - env: mutableEnv, - token, - }; - // Create the Python environment in which to execute the command. - const creationOptions: ExecutionFactoryCreateWithEnvironmentOptions = { - allowEnvironmentFetchExceptions: false, - resource: uri, - interpreter, - }; - const execService = await executionFactory?.createActivatedEnvironment(creationOptions); + // Execute using execution factory (fallback path) + traceInfo(`Using execution factory for pytest discovery in workspace ${uri.fsPath}`); + const creationOptions: ExecutionFactoryCreateWithEnvironmentOptions = { + allowEnvironmentFetchExceptions: false, + resource: uri, + interpreter, + }; + const execService = await executionFactory.createActivatedEnvironment(creationOptions); + if (!execService) { + traceError( + `Failed to create execution service for workspace ${uri.fsPath}. Cannot proceed with test discovery.`, + ); + deferredTillExecClose.resolve(); + return; + } + const execInfo = await execService.getExecutablePath(); + traceVerbose(`Using Python executable: ${execInfo} for workspace ${uri.fsPath}`); - const execInfo = await execService?.getExecutablePath(); - traceVerbose(`Executable path for pytest discovery: ${execInfo}.`); + // Check for cancellation before spawning process + if (token?.isCancellationRequested) { + traceInfo(`Pytest discovery cancelled before spawning process for workspace ${uri.fsPath}`); + deferredTillExecClose.resolve(); + return; + } - const deferredTillExecClose: Deferred = createTestingDeferred(); + const spawnOptions: SpawnOptions = { + cwd, + throwOnStdErr: true, + env: mutableEnv, + token, + }; + + let resultProc: ChildProcess | undefined; + + // Set up cancellation handler after all early return checks + const cancellationHandler = token?.onCancellationRequested(() => { + traceInfo(`Cancellation requested during pytest discovery for workspace ${uri.fsPath}`); + cleanupOnCancellation('pytest', resultProc, deferredTillExecClose, discoveryPipeCancellation, uri); + }); + if (cancellationHandler) { + disposables.push(cancellationHandler); + } - let resultProc: ChildProcess | undefined; + try { + const result = execService.execObservable(commandArgs, spawnOptions); + resultProc = result?.proc; - token?.onCancellationRequested(() => { - traceInfo(`Test discovery cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); - // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. - if (resultProc) { - resultProc?.kill(); - } else { + if (!resultProc) { + traceError(`Failed to spawn pytest discovery subprocess for workspace ${uri.fsPath}`); + deferredTillExecClose.resolve(); + return; + } + traceInfo(`Started pytest discovery subprocess (execution factory) for workspace ${uri.fsPath}`); + } catch (error) { + traceError(`Error spawning pytest discovery subprocess for workspace ${uri.fsPath}: ${error}`); deferredTillExecClose.resolve(); - cSource.cancel(); + throw error; } - }); - const result = execService?.execObservable(execArgs, spawnOptions); - resultProc = result?.proc; - - // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. - // Displays output to user and ensure the subprocess doesn't run into buffer overflow. - - result?.proc?.stdout?.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceInfo(out); - }); - result?.proc?.stderr?.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceError(out); - }); - result?.proc?.on('exit', (code, signal) => { - if (code !== 0) { - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal} on workspace ${uri.fsPath}.`, - ); - } - }); - result?.proc?.on('close', (code, signal) => { - // pytest exits with code of 5 when 0 tests are found- this is not a failure for discovery. - if (code !== 0 && code !== 5) { - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal} on workspace ${uri.fsPath}. Creating and sending error discovery payload`, - ); - this.resultResolver?.resolveDiscovery(createDiscoveryErrorPayload(code, signal, cwd)); - } - // due to the sync reading of the output. - deferredTillExecClose?.resolve(); - }); - await deferredTillExecClose.promise; + resultProc.stdout?.on('data', handlers.onStdout); + resultProc.stderr?.on('data', handlers.onStderr); + resultProc.on('exit', handlers.onExit); + resultProc.on('close', handlers.onClose); + + traceVerbose(`Waiting for pytest discovery subprocess to complete for workspace ${uri.fsPath}`); + await deferredTillExecClose.promise; + traceInfo(`Pytest discovery completed for workspace ${uri.fsPath}`); + } catch (error) { + traceError(`Error during pytest discovery for workspace ${uri.fsPath}: ${error}`); + deferredTillExecClose.resolve(); + throw error; + } finally { + // Dispose all cancellation handlers and event subscriptions + disposables.forEach((d) => d.dispose()); + // Dispose the discovery pipe cancellation token + discoveryPipeCancellation.dispose(); + } } } diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 053c497c56e0..3b2f9f7de33a 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -32,9 +32,9 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { async runTests( uri: Uri, testIds: string[], - profileKind?: TestRunProfileKind, - runInstance?: TestRun, - executionFactory?: IPythonExecutionFactory, + profileKind: boolean | TestRunProfileKind | undefined, + runInstance: TestRun, + executionFactory: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { @@ -49,14 +49,14 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { } }; const cSource = new CancellationTokenSource(); - runInstance?.token.onCancellationRequested(() => cSource.cancel()); + runInstance.token.onCancellationRequested(() => cSource.cancel()); const name = await utils.startRunResultNamedPipe( dataReceivedCallback, // callback to handle data received deferredTillServerClose, // deferred to resolve when server closes cSource.token, // token to cancel ); - runInstance?.token.onCancellationRequested(() => { + runInstance.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, resolving 'TillServerClose' deferred for ${uri.fsPath}.`); }); @@ -82,9 +82,9 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { testIds: string[], resultNamedPipeName: string, serverCancel: CancellationTokenSource, - runInstance?: TestRun, - profileKind?: TestRunProfileKind, - executionFactory?: IPythonExecutionFactory, + runInstance: TestRun, + profileKind: boolean | TestRunProfileKind | undefined, + executionFactory: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { @@ -114,7 +114,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { interpreter, }; // need to check what will happen in the exec service is NOT defined and is null - const execService = await executionFactory?.createActivatedEnvironment(creationOptions); + const execService = await executionFactory.createActivatedEnvironment(creationOptions); const execInfo = await execService?.getExecutablePath(); traceVerbose(`Executable path for pytest execution: ${execInfo}.`); @@ -144,14 +144,14 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { cwd, throwOnStdErr: true, env: mutableEnv, - token: runInstance?.token, + token: runInstance.token, }; if (debugBool) { const launchOptions: LaunchOptions = { cwd, args: testArgs, - token: runInstance?.token, + token: runInstance.token, testProvider: PYTEST_PROVIDER, runTestIdsPort: testIdsFileName, pytestPort: resultNamedPipeName, @@ -181,7 +181,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { args: runArgs, env: (mutableEnv as unknown) as { [key: string]: string }, }); - runInstance?.token.onCancellationRequested(() => { + runInstance.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); proc.kill(); deferredTillExecClose.resolve(); @@ -189,11 +189,11 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { }); proc.stdout.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(out); + runInstance.appendOutput(out); }); proc.stderr.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(out); + runInstance.appendOutput(out); }); proc.onExit((code, signal) => { if (code !== 0) { @@ -218,7 +218,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { let resultProc: ChildProcess | undefined; - runInstance?.token.onCancellationRequested(() => { + runInstance.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, killing pytest subprocess for workspace ${uri.fsPath}`); // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. if (resultProc) { @@ -235,11 +235,11 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. result?.proc?.stdout?.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(out); + runInstance.appendOutput(out); }); result?.proc?.stderr?.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(out); + runInstance.appendOutput(out); }); result?.proc?.on('exit', (code, signal) => { if (code !== 0) { diff --git a/src/client/testing/testController/pytest/pytestHelpers.ts b/src/client/testing/testController/pytest/pytestHelpers.ts new file mode 100644 index 000000000000..c6e748fb85a7 --- /dev/null +++ b/src/client/testing/testController/pytest/pytestHelpers.ts @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +import * as path from 'path'; +import * as fs from 'fs'; +import { traceInfo, traceWarn } from '../../../logging'; +import { addValueIfKeyNotExist, hasSymlinkParent } from '../common/utils'; + +/** + * Checks if the current working directory contains a symlink and ensures --rootdir is set in pytest args. + * This is required for pytest to correctly resolve relative paths in symlinked directories. + */ +export async function handleSymlinkAndRootDir(cwd: string, pytestArgs: string[]): Promise { + const stats = await fs.promises.lstat(cwd); + const resolvedPath = await fs.promises.realpath(cwd); + let isSymbolicLink = false; + if (stats.isSymbolicLink()) { + isSymbolicLink = true; + traceWarn(`Working directory is a symbolic link: ${cwd} -> ${resolvedPath}`); + } else if (resolvedPath !== cwd) { + traceWarn( + `Working directory resolves to different path: ${cwd} -> ${resolvedPath}. Checking for symlinks in parent directories.`, + ); + isSymbolicLink = await hasSymlinkParent(cwd); + } + if (isSymbolicLink) { + traceWarn( + `Symlink detected in path. Adding '--rootdir=${cwd}' to pytest args to ensure correct path resolution.`, + ); + pytestArgs = addValueIfKeyNotExist(pytestArgs, '--rootdir', cwd); + } + // if user has provided `--rootdir` then use that, otherwise add `cwd` + // root dir is required so pytest can find the relative paths and for symlinks + pytestArgs = addValueIfKeyNotExist(pytestArgs, '--rootdir', cwd); + return pytestArgs; +} + +/** + * Builds the environment variables required for pytest discovery. + * Sets PYTHONPATH to include the plugin path and TEST_RUN_PIPE for communication. + */ +export function buildPytestEnv( + envVars: { [key: string]: string | undefined } | undefined, + fullPluginPath: string, + discoveryPipeName: string, +): { [key: string]: string | undefined } { + const mutableEnv = { + ...envVars, + }; + // get python path from mutable env, it contains process.env as well + const pythonPathParts: string[] = mutableEnv.PYTHONPATH?.split(path.delimiter) ?? []; + const pythonPathCommand = [fullPluginPath, ...pythonPathParts].join(path.delimiter); + mutableEnv.PYTHONPATH = pythonPathCommand; + mutableEnv.TEST_RUN_PIPE = discoveryPipeName; + traceInfo( + `Environment variables set for pytest discovery: PYTHONPATH=${mutableEnv.PYTHONPATH}, TEST_RUN_PIPE=${mutableEnv.TEST_RUN_PIPE}`, + ); + return mutableEnv; +} diff --git a/src/client/testing/testController/unittest/testDiscoveryAdapter.ts b/src/client/testing/testController/unittest/testDiscoveryAdapter.ts index 23d70568687f..7c986e95a449 100644 --- a/src/client/testing/testController/unittest/testDiscoveryAdapter.ts +++ b/src/client/testing/testController/unittest/testDiscoveryAdapter.ts @@ -1,33 +1,43 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import * as path from 'path'; -import { CancellationTokenSource, Uri } from 'vscode'; -import { CancellationToken } from 'vscode-jsonrpc'; +import { CancellationToken, Disposable, Uri } from 'vscode'; import { ChildProcess } from 'child_process'; import { IConfigurationService } from '../../../common/types'; import { EXTENSION_ROOT_DIR } from '../../../constants'; -import { - DiscoveredTestPayload, - ITestDiscoveryAdapter, - ITestResultResolver, - TestCommandOptions, - TestDiscoveryCommand, -} from '../common/types'; -import { createDeferred } from '../../../common/utils/async'; -import { EnvironmentVariables, IEnvironmentVariablesProvider } from '../../../common/variables/types'; +import { ITestDiscoveryAdapter, ITestResultResolver } from '../common/types'; +import { IEnvironmentVariablesProvider } from '../../../common/variables/types'; import { ExecutionFactoryCreateWithEnvironmentOptions, - ExecutionResult, IPythonExecutionFactory, SpawnOptions, } from '../../../common/process/types'; -import { createDiscoveryErrorPayload, fixLogLinesNoTrailing, startDiscoveryNamedPipe } from '../common/utils'; -import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging'; +import { traceError, traceInfo, traceVerbose } from '../../../logging'; import { getEnvironment, runInBackground, useEnvExtension } from '../../../envExt/api.internal'; +import { PythonEnvironment } from '../../../pythonEnvironments/info'; +import { createTestingDeferred } from '../common/utils'; +import { buildDiscoveryCommand, buildUnittestEnv as configureSubprocessEnv } from './unittestHelpers'; +import { cleanupOnCancellation, createProcessHandlers, setupDiscoveryPipe } from '../common/discoveryHelpers'; /** - * Wrapper class for unittest test discovery. This is where we call `runTestCommand`. + * Configures the subprocess environment for unittest discovery. + * @param envVarsService Service to retrieve environment variables + * @param uri Workspace URI + * @param discoveryPipeName Name of the discovery pipe to pass to the subprocess + * @returns Configured environment variables for the subprocess + */ +async function configureDiscoveryEnv( + envVarsService: IEnvironmentVariablesProvider | undefined, + uri: Uri, + discoveryPipeName: string, +): Promise { + const envVars = await envVarsService?.getEnvironmentVariables(uri); + const mutableEnv = configureSubprocessEnv(envVars, discoveryPipeName); + return mutableEnv; +} + +/** + * Wrapper class for unittest test discovery. */ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter { constructor( @@ -36,181 +46,156 @@ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter { private readonly envVarsService?: IEnvironmentVariablesProvider, ) {} - public async discoverTests( + async discoverTests( uri: Uri, - executionFactory?: IPythonExecutionFactory, + executionFactory: IPythonExecutionFactory, token?: CancellationToken, + interpreter?: PythonEnvironment, ): Promise { - const settings = this.configSettings.getSettings(uri); - const { unittestArgs } = settings.testing; - const cwd = settings.testing.cwd && settings.testing.cwd.length > 0 ? settings.testing.cwd : uri.fsPath; - - const cSource = new CancellationTokenSource(); - // Create a deferred to return to the caller - const deferredReturn = createDeferred(); - - token?.onCancellationRequested(() => { - traceInfo(`Test discovery cancelled.`); - cSource.cancel(); - deferredReturn.resolve(); - }); - - const name = await startDiscoveryNamedPipe((data: DiscoveredTestPayload) => { - if (!token?.isCancellationRequested) { - this.resultResolver?.resolveDiscovery(data); - } - }, cSource.token); - - // set up env with the pipe name - let env: EnvironmentVariables | undefined = await this.envVarsService?.getEnvironmentVariables(uri); - if (env === undefined) { - env = {} as EnvironmentVariables; + // Setup discovery pipe and cancellation + const { + pipeName: discoveryPipeName, + cancellation: discoveryPipeCancellation, + tokenDisposable, + } = await setupDiscoveryPipe(this.resultResolver, token, uri); + + // Setup process handlers deferred (used by both execution paths) + const deferredTillExecClose = createTestingDeferred(); + + // Collect all disposables for cleanup in finally block + const disposables: Disposable[] = []; + if (tokenDisposable) { + disposables.push(tokenDisposable); } - env.TEST_RUN_PIPE = name; - - const command = buildDiscoveryCommand(unittestArgs); - const options: TestCommandOptions = { - workspaceFolder: uri, - command, - cwd, - token, - }; - - this.runDiscovery(uri, options, name, cwd, cSource, executionFactory).then(() => { - deferredReturn.resolve(); - }); - - return deferredReturn.promise; - } - - async runDiscovery( - uri: Uri, - options: TestCommandOptions, - testRunPipeName: string, - cwd: string, - cSource: CancellationTokenSource, - executionFactory?: IPythonExecutionFactory, - ): Promise { - // get and edit env vars - const mutableEnv = { - ...(await this.envVarsService?.getEnvironmentVariables(uri)), - }; - mutableEnv.TEST_RUN_PIPE = testRunPipeName; - const args = [options.command.script].concat(options.command.args); - - if (options.outChannel) { - options.outChannel.appendLine(`python ${args.join(' ')}`); - } - - if (useEnvExtension()) { - const pythonEnv = await getEnvironment(uri); - if (pythonEnv) { - const deferredTillExecClose = createDeferred(); + try { + // Build unittest command and arguments + const settings = this.configSettings.getSettings(uri); + const { unittestArgs } = settings.testing; + const cwd = settings.testing.cwd && settings.testing.cwd.length > 0 ? settings.testing.cwd : uri.fsPath; + const execArgs = buildDiscoveryCommand(unittestArgs, EXTENSION_ROOT_DIR); + traceVerbose(`Running unittest discovery with command: ${execArgs.join(' ')} for workspace ${uri.fsPath}.`); + + // Configure subprocess environment + const mutableEnv = await configureDiscoveryEnv(this.envVarsService, uri, discoveryPipeName); + + // Setup process handlers (shared by both execution paths) + const handlers = createProcessHandlers('unittest', uri, cwd, this.resultResolver, deferredTillExecClose); + + // Execute using environment extension if available + if (useEnvExtension()) { + traceInfo(`Using environment extension for unittest discovery in workspace ${uri.fsPath}`); + const pythonEnv = await getEnvironment(uri); + if (!pythonEnv) { + traceError( + `Python environment not found for workspace ${uri.fsPath}. Cannot proceed with test discovery.`, + ); + deferredTillExecClose.resolve(); + return; + } + traceVerbose(`Using Python environment: ${JSON.stringify(pythonEnv)}`); const proc = await runInBackground(pythonEnv, { cwd, - args, + args: execArgs, env: (mutableEnv as unknown) as { [key: string]: string }, }); - options.token?.onCancellationRequested(() => { - traceInfo(`Test discovery cancelled, killing unittest subprocess for workspace ${uri.fsPath}`); - proc.kill(); - deferredTillExecClose.resolve(); - cSource.cancel(); - }); - proc.stdout.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceInfo(out); - }); - proc.stderr.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceError(out); + traceInfo(`Started unittest discovery subprocess (environment extension) for workspace ${uri.fsPath}`); + + // Wire up cancellation and process events + const envExtCancellationHandler = token?.onCancellationRequested(() => { + cleanupOnCancellation('unittest', proc, deferredTillExecClose, discoveryPipeCancellation, uri); }); + if (envExtCancellationHandler) { + disposables.push(envExtCancellationHandler); + } + proc.stdout.on('data', handlers.onStdout); + proc.stderr.on('data', handlers.onStderr); proc.onExit((code, signal) => { - if (code !== 0) { - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal} on workspace ${uri.fsPath}`, - ); - } - deferredTillExecClose.resolve(); + handlers.onExit(code, signal); + handlers.onClose(code, signal); }); + await deferredTillExecClose.promise; - } else { - traceError(`Python Environment not found for: ${uri.fsPath}`); + traceInfo(`Unittest discovery completed for workspace ${uri.fsPath}`); + return; } - return; - } - const spawnOptions: SpawnOptions = { - token: options.token, - cwd: options.cwd, - throwOnStdErr: true, - env: mutableEnv, - }; - - try { - traceLog(`Discovering unittest tests for workspace ${options.cwd} with arguments: ${args}\r\n`); - const deferredTillExecClose = createDeferred>(); - - // Create the Python environment in which to execute the command. + // Execute using execution factory (fallback path) + traceInfo(`Using execution factory for unittest discovery in workspace ${uri.fsPath}`); const creationOptions: ExecutionFactoryCreateWithEnvironmentOptions = { allowEnvironmentFetchExceptions: false, - resource: options.workspaceFolder, + resource: uri, + interpreter, + }; + const execService = await executionFactory.createActivatedEnvironment(creationOptions); + if (!execService) { + traceError( + `Failed to create execution service for workspace ${uri.fsPath}. Cannot proceed with test discovery.`, + ); + deferredTillExecClose.resolve(); + return; + } + const execInfo = await execService.getExecutablePath(); + traceVerbose(`Using Python executable: ${execInfo} for workspace ${uri.fsPath}`); + + // Check for cancellation before spawning process + if (token?.isCancellationRequested) { + traceInfo(`Unittest discovery cancelled before spawning process for workspace ${uri.fsPath}`); + deferredTillExecClose.resolve(); + return; + } + + const spawnOptions: SpawnOptions = { + cwd, + throwOnStdErr: true, + env: mutableEnv, + token, }; - const execService = await executionFactory?.createActivatedEnvironment(creationOptions); - const execInfo = await execService?.getExecutablePath(); - traceVerbose(`Executable path for unittest discovery: ${execInfo}.`); let resultProc: ChildProcess | undefined; - options.token?.onCancellationRequested(() => { - traceInfo(`Test discovery cancelled, killing unittest subprocess for workspace ${uri.fsPath}`); - // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. - if (resultProc) { - resultProc?.kill(); - } else { - deferredTillExecClose.resolve(); - cSource.cancel(); - } - }); - const result = execService?.execObservable(args, spawnOptions); - resultProc = result?.proc; - // Displays output to user and ensure the subprocess doesn't run into buffer overflow. - result?.proc?.stdout?.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceInfo(out); - }); - result?.proc?.stderr?.on('data', (data) => { - const out = fixLogLinesNoTrailing(data.toString()); - traceError(out); + // Set up cancellation handler after all early return checks + const cancellationHandler = token?.onCancellationRequested(() => { + traceInfo(`Cancellation requested during unittest discovery for workspace ${uri.fsPath}`); + cleanupOnCancellation('unittest', resultProc, deferredTillExecClose, discoveryPipeCancellation, uri); }); + if (cancellationHandler) { + disposables.push(cancellationHandler); + } - result?.proc?.on('exit', (code, signal) => { - // if the child has testIds then this is a run request + try { + const result = execService.execObservable(execArgs, spawnOptions); + resultProc = result?.proc; - if (code !== 0) { - // This occurs when we are running discovery - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal} on workspace ${options.cwd}. Creating and sending error discovery payload \n`, - ); - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal} on workspace ${uri.fsPath}. Creating and sending error discovery payload`, - ); - this.resultResolver?.resolveDiscovery(createDiscoveryErrorPayload(code, signal, cwd)); + if (!resultProc) { + traceError(`Failed to spawn unittest discovery subprocess for workspace ${uri.fsPath}`); + deferredTillExecClose.resolve(); + return; } + traceInfo(`Started unittest discovery subprocess (execution factory) for workspace ${uri.fsPath}`); + } catch (error) { + traceError(`Error spawning unittest discovery subprocess for workspace ${uri.fsPath}: ${error}`); deferredTillExecClose.resolve(); - }); + throw error; + } + resultProc.stdout?.on('data', handlers.onStdout); + resultProc.stderr?.on('data', handlers.onStderr); + resultProc.on('exit', handlers.onExit); + resultProc.on('close', handlers.onClose); + + traceVerbose(`Waiting for unittest discovery subprocess to complete for workspace ${uri.fsPath}`); await deferredTillExecClose.promise; - } catch (ex) { - traceError(`Error while server attempting to run unittest command for workspace ${uri.fsPath}: ${ex}`); + traceInfo(`Unittest discovery completed for workspace ${uri.fsPath}`); + } catch (error) { + traceError(`Error during unittest discovery for workspace ${uri.fsPath}: ${error}`); + deferredTillExecClose.resolve(); + throw error; + } finally { + traceVerbose(`Cleaning up unittest discovery resources for workspace ${uri.fsPath}`); + // Dispose all cancellation handlers and event subscriptions + disposables.forEach((d) => d.dispose()); + // Dispose the discovery pipe cancellation token + discoveryPipeCancellation.dispose(); } } } -function buildDiscoveryCommand(args: string[]): TestDiscoveryCommand { - const discoveryScript = path.join(EXTENSION_ROOT_DIR, 'python_files', 'unittestadapter', 'discovery.py'); - - return { - script: discoveryScript, - args: ['--udiscovery', ...args], - }; -} diff --git a/src/client/testing/testController/unittest/testExecutionAdapter.ts b/src/client/testing/testController/unittest/testExecutionAdapter.ts index 74572ea5c63c..cbc1d2985f84 100644 --- a/src/client/testing/testController/unittest/testExecutionAdapter.ts +++ b/src/client/testing/testController/unittest/testExecutionAdapter.ts @@ -42,9 +42,9 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { public async runTests( uri: Uri, testIds: string[], - profileKind?: TestRunProfileKind, - runInstance?: TestRun, - executionFactory?: IPythonExecutionFactory, + profileKind: boolean | TestRunProfileKind | undefined, + runInstance: TestRun, + executionFactory: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, ): Promise { // deferredTillServerClose awaits named pipe server close @@ -59,13 +59,13 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { } }; const cSource = new CancellationTokenSource(); - runInstance?.token.onCancellationRequested(() => cSource.cancel()); + runInstance.token.onCancellationRequested(() => cSource.cancel()); const name = await utils.startRunResultNamedPipe( dataReceivedCallback, // callback to handle data received deferredTillServerClose, // deferred to resolve when server closes cSource.token, // token to cancel ); - runInstance?.token.onCancellationRequested(() => { + runInstance.token.onCancellationRequested(() => { console.log(`Test run cancelled, resolving 'till TillAllServerClose' deferred for ${uri.fsPath}.`); // if canceled, stop listening for results deferredTillServerClose.resolve(); @@ -93,9 +93,9 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { testIds: string[], resultNamedPipeName: string, serverCancel: CancellationTokenSource, - runInstance?: TestRun, - profileKind?: TestRunProfileKind, - executionFactory?: IPythonExecutionFactory, + runInstance: TestRun, + profileKind: boolean | TestRunProfileKind | undefined, + executionFactory: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, ): Promise { const settings = this.configSettings.getSettings(uri); @@ -119,9 +119,9 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { workspaceFolder: uri, command, cwd, - profileKind, + profileKind: typeof profileKind === 'boolean' ? undefined : profileKind, testIds, - token: runInstance?.token, + token: runInstance.token, }; traceLog(`Running UNITTEST execution for the following test ids: ${testIds}`); @@ -145,7 +145,7 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { allowEnvironmentFetchExceptions: false, resource: options.workspaceFolder, }; - const execService = await executionFactory?.createActivatedEnvironment(creationOptions); + const execService = await executionFactory.createActivatedEnvironment(creationOptions); const execInfo = await execService?.getExecutablePath(); traceVerbose(`Executable path for unittest execution: ${execInfo}.`); @@ -193,7 +193,7 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { args, env: (mutableEnv as unknown) as { [key: string]: string }, }); - runInstance?.token.onCancellationRequested(() => { + runInstance.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, killing unittest subprocess for workspace ${uri.fsPath}`); proc.kill(); deferredTillExecClose.resolve(); @@ -201,11 +201,11 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { }); proc.stdout.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(out); + runInstance.appendOutput(out); }); proc.stderr.on('data', (data) => { const out = utils.fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(out); + runInstance.appendOutput(out); }); proc.onExit((code, signal) => { if (code !== 0) { @@ -228,7 +228,7 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { let resultProc: ChildProcess | undefined; - runInstance?.token.onCancellationRequested(() => { + runInstance.token.onCancellationRequested(() => { traceInfo(`Test run cancelled, killing unittest subprocess for workspace ${cwd}.`); // if the resultProc exists just call kill on it which will handle resolving the ExecClose deferred, otherwise resolve the deferred here. if (resultProc) { @@ -246,11 +246,11 @@ export class UnittestTestExecutionAdapter implements ITestExecutionAdapter { result?.proc?.stdout?.on('data', (data) => { const out = fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(`${out}`); + runInstance.appendOutput(`${out}`); }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLinesNoTrailing(data.toString()); - runInstance?.appendOutput(`${out}`); + runInstance.appendOutput(`${out}`); }); result?.proc?.on('exit', (code, signal) => { diff --git a/src/client/testing/testController/unittest/unittestHelpers.ts b/src/client/testing/testController/unittest/unittestHelpers.ts new file mode 100644 index 000000000000..249a78dda7b7 --- /dev/null +++ b/src/client/testing/testController/unittest/unittestHelpers.ts @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +import * as path from 'path'; +import { traceInfo } from '../../../logging'; + +/** + * Builds the environment variables required for unittest discovery. + * Sets TEST_RUN_PIPE for communication. + */ +export function buildUnittestEnv( + envVars: { [key: string]: string | undefined } | undefined, + discoveryPipeName: string, +): { [key: string]: string | undefined } { + const mutableEnv = { + ...envVars, + }; + mutableEnv.TEST_RUN_PIPE = discoveryPipeName; + traceInfo(`Environment variables set for unittest discovery: TEST_RUN_PIPE=${mutableEnv.TEST_RUN_PIPE}`); + return mutableEnv; +} + +/** + * Builds the unittest discovery command. + */ +export function buildDiscoveryCommand(args: string[], extensionRootDir: string): string[] { + const discoveryScript = path.join(extensionRootDir, 'python_files', 'unittestadapter', 'discovery.py'); + return [discoveryScript, '--udiscovery', ...args]; +} diff --git a/src/client/testing/testController/workspaceTestAdapter.ts b/src/client/testing/testController/workspaceTestAdapter.ts index a73acdaba5f0..75b9489f708e 100644 --- a/src/client/testing/testController/workspaceTestAdapter.ts +++ b/src/client/testing/testController/workspaceTestAdapter.ts @@ -42,9 +42,9 @@ export class WorkspaceTestAdapter { testController: TestController, runInstance: TestRun, includes: TestItem[], + executionFactory: IPythonExecutionFactory, token?: CancellationToken, profileKind?: boolean | TestRunProfileKind, - executionFactory?: IPythonExecutionFactory, debugLauncher?: ITestDebugLauncher, interpreter?: PythonEnvironment, ): Promise { @@ -73,20 +73,18 @@ export class WorkspaceTestAdapter { } }); const testCaseIds = Array.from(testCaseIdsSet); - // ** execution factory only defined for new rewrite way - if (executionFactory !== undefined) { - await this.executionAdapter.runTests( - this.workspaceUri, - testCaseIds, - profileKind, - runInstance, - executionFactory, - debugLauncher, - interpreter, - ); - } else { - await this.executionAdapter.runTests(this.workspaceUri, testCaseIds, profileKind); + if (executionFactory === undefined) { + throw new Error('Execution factory is required for test execution'); } + await this.executionAdapter.runTests( + this.workspaceUri, + testCaseIds, + profileKind, + runInstance, + executionFactory, + debugLauncher, + interpreter, + ); deferred.resolve(); } catch (ex) { // handle token and telemetry here @@ -116,8 +114,8 @@ export class WorkspaceTestAdapter { public async discoverTests( testController: TestController, + executionFactory: IPythonExecutionFactory, token?: CancellationToken, - executionFactory?: IPythonExecutionFactory, interpreter?: PythonEnvironment, ): Promise { sendTelemetryEvent(EventName.UNITTEST_DISCOVERING, undefined, { tool: this.testProvider }); @@ -132,12 +130,10 @@ export class WorkspaceTestAdapter { this.discovering = deferred; try { - // ** execution factory only defined for new rewrite way - if (executionFactory !== undefined) { - await this.discoveryAdapter.discoverTests(this.workspaceUri, executionFactory, token, interpreter); - } else { - await this.discoveryAdapter.discoverTests(this.workspaceUri); + if (executionFactory === undefined) { + throw new Error('Execution factory is required for test discovery'); } + await this.discoveryAdapter.discoverTests(this.workspaceUri, executionFactory, token, interpreter); deferred.resolve(); } catch (ex) { sendTelemetryEvent(EventName.UNITTEST_DISCOVERY_DONE, undefined, { tool: this.testProvider, failed: true }); diff --git a/src/test/chat/utils.unit.test.ts b/src/test/chat/utils.unit.test.ts new file mode 100644 index 000000000000..8d45c1ac118f --- /dev/null +++ b/src/test/chat/utils.unit.test.ts @@ -0,0 +1,248 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +import { expect } from 'chai'; +import * as sinon from 'sinon'; +import { Uri, WorkspaceFolder } from 'vscode'; +import { resolveFilePath } from '../../client/chat/utils'; +import * as workspaceApis from '../../client/common/vscodeApis/workspaceApis'; + +suite('Chat Utils - resolveFilePath()', () => { + let getWorkspaceFoldersStub: sinon.SinonStub; + + setup(() => { + getWorkspaceFoldersStub = sinon.stub(workspaceApis, 'getWorkspaceFolders'); + getWorkspaceFoldersStub.returns([]); + }); + + teardown(() => { + sinon.restore(); + }); + + suite('When filepath is undefined or empty', () => { + test('Should return first workspace folder URI when workspace folders exist', () => { + const expectedUri = Uri.file('/test/workspace'); + const mockFolder: WorkspaceFolder = { + uri: expectedUri, + name: 'test', + index: 0, + }; + getWorkspaceFoldersStub.returns([mockFolder]); + + const result = resolveFilePath(undefined); + + expect(result?.toString()).to.equal(expectedUri.toString()); + }); + + test('Should return first folder when multiple workspace folders exist', () => { + const firstUri = Uri.file('/first/workspace'); + const secondUri = Uri.file('/second/workspace'); + const mockFolders: WorkspaceFolder[] = [ + { uri: firstUri, name: 'first', index: 0 }, + { uri: secondUri, name: 'second', index: 1 }, + ]; + getWorkspaceFoldersStub.returns(mockFolders); + + const result = resolveFilePath(undefined); + + expect(result?.toString()).to.equal(firstUri.toString()); + }); + + test('Should return undefined when no workspace folders exist', () => { + getWorkspaceFoldersStub.returns(undefined); + + const result = resolveFilePath(undefined); + + expect(result).to.be.undefined; + }); + + test('Should return undefined when workspace folders is empty array', () => { + getWorkspaceFoldersStub.returns([]); + + const result = resolveFilePath(undefined); + + expect(result).to.be.undefined; + }); + + test('Should return undefined for empty string when no workspace folders', () => { + getWorkspaceFoldersStub.returns(undefined); + + const result = resolveFilePath(''); + + expect(result).to.be.undefined; + }); + }); + + suite('Windows file paths', () => { + test('Should handle Windows path with lowercase drive letter', () => { + const filepath = 'c:\\GIT\\tests\\simple-python-app'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + // Uri.file normalizes drive letters to lowercase + expect(result?.fsPath.toLowerCase()).to.include('git'); + }); + + test('Should handle Windows path with uppercase drive letter', () => { + const filepath = 'C:\\Users\\test\\project'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + expect(result?.fsPath.toLowerCase()).to.include('users'); + }); + + test('Should handle Windows path with forward slashes', () => { + const filepath = 'C:/Users/test/project'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + }); + + suite('Unix file paths', () => { + test('Should handle Unix absolute path', () => { + const filepath = '/home/user/projects/myapp'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + expect(result?.path).to.include('/home/user/projects/myapp'); + }); + + test('Should handle Unix root path', () => { + const filepath = '/'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + }); + + suite('Relative paths', () => { + test('Should handle relative path with dot prefix', () => { + const filepath = './src/main.py'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + + test('Should handle relative path without prefix', () => { + const filepath = 'src/main.py'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + + test('Should handle parent directory reference', () => { + const filepath = '../other-project/file.py'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + }); + + suite('URI schemes', () => { + test('Should handle file:// URI scheme', () => { + const filepath = 'file:///home/user/test.py'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + expect(result?.path).to.include('/home/user/test.py'); + }); + + test('Should handle vscode-notebook:// URI scheme', () => { + const filepath = 'vscode-notebook://jupyter/notebook.ipynb'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('vscode-notebook'); + }); + + test('Should handle untitled: URI scheme without double slash as file path', () => { + const filepath = 'untitled:Untitled-1'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + // untitled: doesn't have ://, so it will be treated as a file path + expect(result?.scheme).to.equal('file'); + }); + + test('Should handle https:// URI scheme', () => { + const filepath = 'https://example.com/path'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('https'); + }); + + test('Should handle vscode-vfs:// URI scheme', () => { + const filepath = 'vscode-vfs://github/microsoft/vscode/file.ts'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('vscode-vfs'); + }); + }); + + suite('Edge cases', () => { + test('Should handle path with spaces', () => { + const filepath = '/home/user/my project/file.py'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + + test('Should handle path with special characters', () => { + const filepath = '/home/user/project-name_v2/file.py'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + + test('Should not treat Windows drive letter colon as URI scheme', () => { + // Windows path should not be confused with a URI scheme + const filepath = 'd:\\projects\\test'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + + test('Should not treat single colon as URI scheme', () => { + // A path with a colon but not :// should be treated as a file + const filepath = 'c:somepath'; + + const result = resolveFilePath(filepath); + + expect(result).to.not.be.undefined; + expect(result?.scheme).to.equal('file'); + }); + }); +}); diff --git a/src/test/common/application/commands/reportIssueCommand.unit.test.ts b/src/test/common/application/commands/reportIssueCommand.unit.test.ts index b1884fa83c21..175a43d14007 100644 --- a/src/test/common/application/commands/reportIssueCommand.unit.test.ts +++ b/src/test/common/application/commands/reportIssueCommand.unit.test.ts @@ -126,17 +126,17 @@ suite('Report Issue Command', () => { ); const expectedData = fs.readFileSync(userDataTemplatePath, 'utf8'); - const args: [string, { extensionId: string; issueBody: string; data: string }] = capture< + const args: [string, { extensionId: string; issueBody: string; extensionData: string }] = capture< AllCommands, - { extensionId: string; issueBody: string; data: string } + { extensionId: string; issueBody: string; extensionData: string } >(cmdManager.executeCommand).last(); verify(cmdManager.registerCommand(Commands.ReportIssue, anything(), anything())).once(); verify(cmdManager.executeCommand('workbench.action.openIssueReporter', anything())).once(); expect(args[0]).to.be.equal('workbench.action.openIssueReporter'); - const { issueBody, data } = args[1]; + const { issueBody, extensionData } = args[1]; expect(issueBody).to.be.equal(expectedIssueBody); - expect(data).to.be.equal(expectedData); + expect(extensionData).to.be.equal(expectedData); }); test('Test if issue body is filled when only including settings which are explicitly set', async () => { @@ -167,16 +167,16 @@ suite('Report Issue Command', () => { ); const expectedData = fs.readFileSync(userDataTemplatePath, 'utf8'); - const args: [string, { extensionId: string; issueBody: string; data: string }] = capture< + const args: [string, { extensionId: string; issueBody: string; extensionData: string }] = capture< AllCommands, - { extensionId: string; issueBody: string; data: string } + { extensionId: string; issueBody: string; extensionData: string } >(cmdManager.executeCommand).last(); verify(cmdManager.executeCommand('workbench.action.openIssueReporter', anything())).once(); expect(args[0]).to.be.equal('workbench.action.openIssueReporter'); - const { issueBody, data } = args[1]; + const { issueBody, extensionData } = args[1]; expect(issueBody).to.be.equal(expectedIssueBody); - expect(data).to.be.equal(expectedData); + expect(extensionData).to.be.equal(expectedData); }); test('Should send telemetry event when run Report Issue Command', async () => { const sendTelemetryStub = sinon.stub(Telemetry, 'sendTelemetryEvent'); diff --git a/src/test/common/terminals/service.unit.test.ts b/src/test/common/terminals/service.unit.test.ts index 63a1cd544940..3a6d54c9390b 100644 --- a/src/test/common/terminals/service.unit.test.ts +++ b/src/test/common/terminals/service.unit.test.ts @@ -14,8 +14,9 @@ import { Uri, Terminal as VSCodeTerminal, WorkspaceConfiguration, + TerminalDataWriteEvent, } from 'vscode'; -import { ITerminalManager, IWorkspaceService } from '../../../client/common/application/types'; +import { IApplicationShell, ITerminalManager, IWorkspaceService } from '../../../client/common/application/types'; import { EXTENSION_ROOT_DIR } from '../../../client/common/constants'; import { IPlatformService } from '../../../client/common/platform/types'; import { TerminalService } from '../../../client/common/terminal/service'; @@ -56,6 +57,9 @@ suite('Terminal Service', () => { let useEnvExtensionStub: sinon.SinonStub; let interpreterService: TypeMoq.IMock; let options: TypeMoq.IMock; + let applicationShell: TypeMoq.IMock; + let onDidWriteTerminalDataEmitter: EventEmitter; + let onDidChangeTerminalStateEmitter: EventEmitter; setup(() => { useEnvExtensionStub = sinon.stub(extapi, 'useEnvExtension'); @@ -118,6 +122,17 @@ suite('Terminal Service', () => { mockServiceContainer.setup((c) => c.get(ITerminalActivator)).returns(() => terminalActivator.object); mockServiceContainer.setup((c) => c.get(ITerminalAutoActivation)).returns(() => terminalAutoActivator.object); mockServiceContainer.setup((c) => c.get(IInterpreterService)).returns(() => interpreterService.object); + + applicationShell = TypeMoq.Mock.ofType(); + onDidWriteTerminalDataEmitter = new EventEmitter(); + applicationShell.setup((a) => a.onDidWriteTerminalData).returns(() => onDidWriteTerminalDataEmitter.event); + mockServiceContainer.setup((c) => c.get(IApplicationShell)).returns(() => applicationShell.object); + + onDidChangeTerminalStateEmitter = new EventEmitter(); + terminalManager + .setup((t) => t.onDidChangeTerminalState(TypeMoq.It.isAny())) + .returns((handler) => onDidChangeTerminalStateEmitter.event(handler)); + getConfigurationStub = sinon.stub(workspaceApis, 'getConfiguration'); isWindowsStub = sinon.stub(platform, 'isWindows'); pythonConfig = TypeMoq.Mock.ofType(); @@ -230,8 +245,10 @@ suite('Terminal Service', () => { terminalHelper.setup((h) => h.identifyTerminalShell(TypeMoq.It.isAny())).returns(() => TerminalShellType.bash); terminalManager.setup((t) => t.createTerminal(TypeMoq.It.isAny())).returns(() => terminal.object); - service.ensureTerminal(); - service.executeCommand(textToSend, true); + await service.ensureTerminal(); + const executePromise = service.executeCommand(textToSend, true); + onDidWriteTerminalDataEmitter.fire({ terminal: terminal.object, data: '>>> ' }); + await executePromise; terminal.verify((t) => t.show(TypeMoq.It.isValue(true)), TypeMoq.Times.exactly(1)); terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.exactly(1)); @@ -251,14 +268,16 @@ suite('Terminal Service', () => { terminalHelper.setup((h) => h.identifyTerminalShell(TypeMoq.It.isAny())).returns(() => TerminalShellType.bash); terminalManager.setup((t) => t.createTerminal(TypeMoq.It.isAny())).returns(() => terminal.object); - service.ensureTerminal(); - service.executeCommand(textToSend, true); + await service.ensureTerminal(); + const executePromise = service.executeCommand(textToSend, true); + onDidWriteTerminalDataEmitter.fire({ terminal: terminal.object, data: '>>> ' }); + await executePromise; terminal.verify((t) => t.show(TypeMoq.It.isValue(true)), TypeMoq.Times.exactly(1)); terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.exactly(1)); }); - test('Ensure sendText is NOT called when Python shell integration and terminal shell integration are both enabled - Mac, Linux && Python < 3.13', async () => { + test('Ensure sendText is called when Python shell integration and terminal shell integration are both enabled - Mac, Linux && Python < 3.13', async () => { isWindowsStub.returns(false); pythonConfig .setup((p) => p.get('terminal.shellIntegration.enabled')) @@ -273,11 +292,13 @@ suite('Terminal Service', () => { terminalHelper.setup((h) => h.identifyTerminalShell(TypeMoq.It.isAny())).returns(() => TerminalShellType.bash); terminalManager.setup((t) => t.createTerminal(TypeMoq.It.isAny())).returns(() => terminal.object); - service.ensureTerminal(); - service.executeCommand(textToSend, true); + await service.ensureTerminal(); + const executePromise = service.executeCommand(textToSend, true); + onDidWriteTerminalDataEmitter.fire({ terminal: terminal.object, data: '>>> ' }); + await executePromise; terminal.verify((t) => t.show(TypeMoq.It.isValue(true)), TypeMoq.Times.exactly(1)); - terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.never()); + terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.exactly(1)); }); test('Ensure sendText is called when Python shell integration and terminal shell integration are both enabled - Mac, Linux && Python >= 3.13', async () => { @@ -305,7 +326,9 @@ suite('Terminal Service', () => { terminalManager.setup((t) => t.createTerminal(TypeMoq.It.isAny())).returns(() => terminal.object); await service.ensureTerminal(); - await service.executeCommand(textToSend, true); + const executePromise = service.executeCommand(textToSend, true); + onDidWriteTerminalDataEmitter.fire({ terminal: terminal.object, data: '>>> ' }); + await executePromise; terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.once()); }); @@ -325,13 +348,39 @@ suite('Terminal Service', () => { terminalHelper.setup((h) => h.identifyTerminalShell(TypeMoq.It.isAny())).returns(() => TerminalShellType.bash); terminalManager.setup((t) => t.createTerminal(TypeMoq.It.isAny())).returns(() => terminal.object); - service.ensureTerminal(); - service.executeCommand(textToSend, true); + await service.ensureTerminal(); + const executePromise = service.executeCommand(textToSend, true); + onDidWriteTerminalDataEmitter.fire({ terminal: terminal.object, data: '>>> ' }); + await executePromise; terminal.verify((t) => t.show(TypeMoq.It.isValue(true)), TypeMoq.Times.exactly(1)); terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.exactly(1)); }); + test('Ensure REPL ready when onDidChangeTerminalState fires with python shell', async () => { + pythonConfig + .setup((p) => p.get('terminal.shellIntegration.enabled')) + .returns(() => false) + .verifiable(TypeMoq.Times.once()); + + terminalHelper + .setup((helper) => helper.getEnvironmentActivationCommands(TypeMoq.It.isAny(), TypeMoq.It.isAny())) + .returns(() => Promise.resolve(undefined)); + service = new TerminalService(mockServiceContainer.object); + const textToSend = 'Some Text'; + terminalHelper.setup((h) => h.identifyTerminalShell(TypeMoq.It.isAny())).returns(() => TerminalShellType.bash); + + terminal.setup((t) => t.state).returns(() => ({ isInteractedWith: true, shell: 'python' })); + terminalManager.setup((t) => t.createTerminal(TypeMoq.It.isAny())).returns(() => terminal.object); + + await service.ensureTerminal(); + const executePromise = service.executeCommand(textToSend, true); + onDidChangeTerminalStateEmitter.fire(terminal.object); + await executePromise; + + terminal.verify((t) => t.sendText(TypeMoq.It.isValue(textToSend)), TypeMoq.Times.exactly(1)); + }); + test('Ensure terminal is not shown if `hideFromUser` option is set to `true`', async () => { terminalHelper .setup((helper) => helper.getEnvironmentActivationCommands(TypeMoq.It.isAny(), TypeMoq.It.isAny())) diff --git a/src/test/configuration/interpreterSelector/commands/setInterpreter.unit.test.ts b/src/test/configuration/interpreterSelector/commands/setInterpreter.unit.test.ts index 0016ca339bfe..7837245ec9d2 100644 --- a/src/test/configuration/interpreterSelector/commands/setInterpreter.unit.test.ts +++ b/src/test/configuration/interpreterSelector/commands/setInterpreter.unit.test.ts @@ -1052,6 +1052,7 @@ suite('Set Interpreter Command', () => { openLabel: InterpreterQuickPickList.browsePath.openButtonLabel, canSelectMany: false, title: InterpreterQuickPickList.browsePath.title, + defaultUri: undefined, }; const multiStepInput = TypeMoq.Mock.ofType>(); multiStepInput.setup((i) => i.showQuickPick(TypeMoq.It.isAny())).returns(() => Promise.resolve(items[0])); @@ -1073,6 +1074,27 @@ suite('Set Interpreter Command', () => { openLabel: InterpreterQuickPickList.browsePath.openButtonLabel, canSelectMany: false, title: InterpreterQuickPickList.browsePath.title, + defaultUri: undefined, + }; + multiStepInput.setup((i) => i.showQuickPick(TypeMoq.It.isAny())).returns(() => Promise.resolve(items[0])); + appShell.setup((a) => a.showOpenDialog(expectedParams)).verifiable(TypeMoq.Times.once()); + platformService.setup((p) => p.isWindows).returns(() => false); + + await setInterpreterCommand._enterOrBrowseInterpreterPath(multiStepInput.object, state).ignoreErrors(); + + appShell.verifyAll(); + }); + + test('If `Browse...` option is selected with workspace, file browser opens at workspace root', async () => { + const workspaceUri = Uri.parse('file:///workspace/root'); + const state: InterpreterStateArgs = { path: undefined, workspace: workspaceUri }; + const multiStepInput = TypeMoq.Mock.ofType>(); + const expectedParams = { + filters: undefined, + openLabel: InterpreterQuickPickList.browsePath.openButtonLabel, + canSelectMany: false, + title: InterpreterQuickPickList.browsePath.title, + defaultUri: workspaceUri, }; multiStepInput.setup((i) => i.showQuickPick(TypeMoq.It.isAny())).returns(() => Promise.resolve(items[0])); appShell.setup((a) => a.showOpenDialog(expectedParams)).verifiable(TypeMoq.Times.once()); @@ -1126,6 +1148,7 @@ suite('Set Interpreter Command', () => { openLabel: InterpreterQuickPickList.browsePath.openButtonLabel, canSelectMany: false, title: InterpreterQuickPickList.browsePath.title, + defaultUri: undefined, }; multiStepInput .setup((i) => i.showQuickPick(TypeMoq.It.isAny())) diff --git a/src/test/pythonEnvironments/creation/pyProjectTomlContext.unit.test.ts b/src/test/pythonEnvironments/creation/pyProjectTomlContext.unit.test.ts index 8363837a4a36..3e787570304a 100644 --- a/src/test/pythonEnvironments/creation/pyProjectTomlContext.unit.test.ts +++ b/src/test/pythonEnvironments/creation/pyProjectTomlContext.unit.test.ts @@ -28,7 +28,7 @@ function getInstallableToml(): typemoq.IMock { .setup((p) => p.getText(typemoq.It.isAny())) .returns( () => - '[project]\nname = "spam"\nversion = "2020.0.0"\n[build-system]\nrequires = ["setuptools ~= 58.0", "cython ~= 0.29.0"]\n[project.optional-dependencies]\ntest = ["pytest"]\ndoc = ["sphinx", "furo"]', + '[project]\nname = "spam"\nversion = "2020.0.0"\n[build-system]\nrequires = ["setuptools ~= 58.0", "cython ~= 0.29.0"]\n[dependency-groups]\ndev = ["ruff", { include-group = "test" }]\ntest = ["pytest"]', ); return pyprojectToml; } diff --git a/src/test/smoke/runInTerminal.smoke.test.ts b/src/test/smoke/runInTerminal.smoke.test.ts index bd4c88e44e80..4bdec0843862 100644 --- a/src/test/smoke/runInTerminal.smoke.test.ts +++ b/src/test/smoke/runInTerminal.smoke.test.ts @@ -17,13 +17,22 @@ suite('Smoke Test: Run Python File In Terminal', () => { return this.skip(); } await initialize(); + // Ensure the environments extension is not used for this test + await vscode.workspace + .getConfiguration('python') + .update('useEnvironmentsExtension', false, vscode.ConfigurationTarget.Global); return undefined; }); + setup(initializeTest); suiteTeardown(closeActiveWindows); teardown(closeActiveWindows); - test('Exec', async () => { + // TODO: Re-enable this test once the flakiness on Windows is resolved + test('Exec', async function () { + if (process.platform === 'win32') { + return this.skip(); + } const file = path.join( EXTENSION_ROOT_DIR_FOR_TESTS, 'src', diff --git a/src/test/smoke/smartSend.smoke.test.ts b/src/test/smoke/smartSend.smoke.test.ts index dc1f07f047e7..cae41cc094d5 100644 --- a/src/test/smoke/smartSend.smoke.test.ts +++ b/src/test/smoke/smartSend.smoke.test.ts @@ -19,7 +19,8 @@ suite('Smoke Test: Run Smart Selection and Advance Cursor', async () => { suiteTeardown(closeActiveWindows); teardown(closeActiveWindows); - test('Smart Send', async () => { + // TODO: Re-enable this test once the flakiness on Windows, linux are resolved + test.skip('Smart Send', async function () { const file = path.join( EXTENSION_ROOT_DIR_FOR_TESTS, 'src', diff --git a/src/test/standardTest.ts b/src/test/standardTest.ts index 00eb3d7cf8c4..c3a7968c9c7a 100644 --- a/src/test/standardTest.ts +++ b/src/test/standardTest.ts @@ -89,7 +89,7 @@ async function start() { console.log('VS Code executable', vscodeExecutablePath); const launchArgs = baseLaunchArgs .concat([workspacePath]) - .concat(channel === 'insiders' ? ['--enable-proposed-api'] : []) + .concat(['--enable-proposed-api']) .concat(['--timeout', '5000']); console.log(`Starting vscode ${channel} with args ${launchArgs.join(' ')}`); const options: TestOptions = { diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index dcd78dc23dba..478e9dd85744 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -9,7 +9,11 @@ import * as fs from 'fs'; import * as os from 'os'; import * as sinon from 'sinon'; import { PytestTestDiscoveryAdapter } from '../../../client/testing/testController/pytest/pytestDiscoveryAdapter'; -import { ITestController, ITestResultResolver } from '../../../client/testing/testController/common/types'; +import { + ITestController, + ITestResultResolver, + ExecutionTestPayload, +} from '../../../client/testing/testController/common/types'; import { IPythonExecutionFactory } from '../../../client/common/process/types'; import { IConfigurationService } from '../../../client/common/types'; import { IServiceContainer } from '../../../client/ioc/types'; @@ -157,11 +161,10 @@ suite('End to End Tests: test adapters', () => { resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; // const deferredTillEOT = createTestingDeferred(); - resultResolver._resolveDiscovery = async (payload, _token?) => { + resultResolver.resolveDiscovery = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; actualData = payload; - return Promise.resolve(); }; // set workspace to test workspace folder and set up settings @@ -198,11 +201,10 @@ suite('End to End Tests: test adapters', () => { }; resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { + resultResolver.resolveDiscovery = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; actualData = payload; - return Promise.resolve(); }; // set settings to work for the given workspace @@ -238,10 +240,9 @@ suite('End to End Tests: test adapters', () => { workspaceUri = Uri.parse(rootPathSmallWorkspace); resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { + resultResolver.resolveDiscovery = (payload, _token?) => { callCount = callCount + 1; actualData = payload; - return Promise.resolve(); }; // run pytest discovery const discoveryAdapter = new PytestTestDiscoveryAdapter(configService, resultResolver, envVarsService); @@ -287,11 +288,10 @@ suite('End to End Tests: test adapters', () => { resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { + resultResolver.resolveDiscovery = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; actualData = payload; - return Promise.resolve(); }; // run pytest discovery const discoveryAdapter = new PytestTestDiscoveryAdapter(configService, resultResolver, envVarsService); @@ -371,11 +371,10 @@ suite('End to End Tests: test adapters', () => { resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { + resultResolver.resolveDiscovery = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; actualData = payload; - return Promise.resolve(); }; // run pytest discovery const discoveryAdapter = new PytestTestDiscoveryAdapter(configService, resultResolver, envVarsService); @@ -442,11 +441,10 @@ suite('End to End Tests: test adapters', () => { }; resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { + resultResolver.resolveDiscovery = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; actualData = payload; - return Promise.resolve(); }; // run pytest discovery const discoveryAdapter = new PytestTestDiscoveryAdapter(configService, resultResolver, envVarsService); @@ -476,22 +474,23 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { + resultResolver.resolveExecution = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; // the payloads that get to the _resolveExecution are all data and should be successful. try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); + if ('status' in payload) { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } } catch (err) { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; // set workspace to test workspace folder @@ -550,22 +549,25 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { + resultResolver.resolveExecution = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; // the payloads that get to the _resolveExecution are all data and should be successful. try { - const validStatuses = ['subtest-success', 'subtest-failure']; - assert.ok( - validStatuses.includes(payload.status), - `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); + if ('status' in payload) { + const validStatuses = ['subtest-success', 'subtest-failure']; + assert.ok( + validStatuses.includes(payload.status), + `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${ + payload.status + }`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } } catch (err) { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; // set workspace to test workspace folder @@ -621,22 +623,23 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { + resultResolver.resolveExecution = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; // the payloads that get to the _resolveExecution are all data and should be successful. try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); + if ('status' in payload) { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } } catch (err) { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; // set workspace to test workspace folder workspaceUri = Uri.parse(rootPathSmallWorkspace); @@ -703,7 +706,7 @@ suite('End to End Tests: test adapters', () => { test('Unittest execution with coverage, small workspace', async () => { // result resolver and saved data for assertions resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - resultResolver._resolveCoverage = async (payload, _token?) => { + resultResolver._resolveCoverage = (payload, _token?) => { assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); assert.ok(payload.result, 'Expected results to be present'); const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; @@ -713,7 +716,6 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); assert.strictEqual(simpleFileCov.executed_branches, 1, 'Expected 1 branch to be executed in even.py'); assert.strictEqual(simpleFileCov.total_branches, 2, 'Expected 2 branches in even.py'); - return Promise.resolve(); }; // set workspace to test workspace folder @@ -753,7 +755,7 @@ suite('End to End Tests: test adapters', () => { test('pytest coverage execution, small workspace', async () => { // result resolver and saved data for assertions resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - resultResolver._resolveCoverage = async (payload, _runInstance?) => { + resultResolver._resolveCoverage = (payload, _runInstance?) => { assert.strictEqual(payload.cwd, rootPathCoverageWorkspace, 'Expected cwd to be the workspace folder'); assert.ok(payload.result, 'Expected results to be present'); const simpleFileCov = payload.result[`${rootPathCoverageWorkspace}/even.py`]; @@ -763,8 +765,6 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(simpleFileCov.lines_missed.length, 1, 'Expected 3 lines to be missed in even.py'); assert.strictEqual(simpleFileCov.executed_branches, 1, 'Expected 1 branch to be executed in even.py'); assert.strictEqual(simpleFileCov.total_branches, 2, 'Expected 2 branches in even.py'); - - return Promise.resolve(); }; // set workspace to test workspace folder workspaceUri = Uri.parse(rootPathCoverageWorkspace); @@ -807,22 +807,23 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { + resultResolver.resolveExecution = (payload, _token?) => { traceLog(`resolveDiscovery ${payload}`); callCount = callCount + 1; // the payloads that get to the _resolveExecution are all data and should be successful. try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); + if ('status' in payload) { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } } catch (err) { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; // set workspace to test workspace folder @@ -874,7 +875,7 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveDiscovery = async (data, _token?) => { + resultResolver.resolveDiscovery = (data, _token?) => { // do the following asserts for each time resolveExecution is called, should be called once per test. callCount = callCount + 1; traceLog(`unittest discovery adapter seg fault error handling \n ${JSON.stringify(data)}`); @@ -899,7 +900,6 @@ suite('End to End Tests: test adapters', () => { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; // set workspace to test workspace folder @@ -927,7 +927,7 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveDiscovery = async (data, _token?) => { + resultResolver.resolveDiscovery = (data, _token?) => { // do the following asserts for each time resolveExecution is called, should be called once per test. callCount = callCount + 1; traceLog(`add one to call count, is now ${callCount}`); @@ -957,7 +957,6 @@ suite('End to End Tests: test adapters', () => { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; // run pytest discovery const discoveryAdapter = new PytestTestDiscoveryAdapter(configService, resultResolver, envVarsService); @@ -980,22 +979,24 @@ suite('End to End Tests: test adapters', () => { let callCount = 0; let failureOccurred = false; let failureMsg = ''; - resultResolver._resolveExecution = async (data, _token?) => { + resultResolver.resolveExecution = (data, _token?) => { // do the following asserts for each time resolveExecution is called, should be called once per test. console.log(`pytest execution adapter seg fault error handling \n ${JSON.stringify(data)}`); callCount = callCount + 1; try { - if (data.status === 'error') { - assert.ok(data.error, "Expected errors in 'error' field"); - } else { - const indexOfTest = JSON.stringify(data.result).search('error'); - assert.notDeepEqual( - indexOfTest, - -1, - 'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.', - ); + if ('status' in data) { + if (data.status === 'error') { + assert.ok(data.error, "Expected errors in 'error' field"); + } else { + const indexOfTest = JSON.stringify(data.result).search('error'); + assert.notDeepEqual( + indexOfTest, + -1, + 'If payload status is not error then the individual tests should be marked as errors. This should occur on windows machines.', + ); + } + assert.ok(data.result, 'Expected results to be present'); } - assert.ok(data.result, 'Expected results to be present'); // make sure the testID is found in the results const indexOfTest = JSON.stringify(data).search( 'test_seg_fault.py::TestSegmentationFault::test_segfault', @@ -1005,7 +1006,6 @@ suite('End to End Tests: test adapters', () => { failureMsg = err ? (err as Error).toString() : ''; failureOccurred = true; } - return Promise.resolve(); }; const testId = `${rootPathErrorWorkspace}/test_seg_fault.py::TestSegmentationFault::test_segfault`; @@ -1033,4 +1033,210 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(failureOccurred, false, failureMsg); }); }); + + test('resolveExecution performance test: validates efficient test result processing', async () => { + // This test validates that resolveExecution processes test results efficiently + // without expensive tree rebuilding or linear searching operations. + // + // The test ensures that processing many test results (like parameterized tests) + // remains fast and doesn't cause performance issues or stack overflow. + + // ================================================================ + // SETUP: Initialize test environment and tracking variables + // ================================================================ + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + + // Performance tracking variables + let totalCallTime = 0; + let callCount = 0; + const callTimes: number[] = []; + let treeRebuildCount = 0; + let totalSearchOperations = 0; + + // Test configuration - Moderate scale to validate efficiency + const numTestFiles = 5; // Multiple test files + const testFunctionsPerFile = 10; // Test functions per file + const totalTestItems = numTestFiles * testFunctionsPerFile; // Total test items in mock tree + const numParameterizedResults = 15; // Number of parameterized test results to process + + // ================================================================ + // MOCK: Set up spies and function wrapping to track performance + // ================================================================ + + // Mock getTestCaseNodes to track expensive tree operations + const originalGetTestCaseNodes = require('../../../client/testing/testController/common/testItemUtilities') + .getTestCaseNodes; + const getTestCaseNodesSpy = sinon.stub().callsFake((item) => { + treeRebuildCount++; + const result = originalGetTestCaseNodes(item); + // Track search operations through tree items + // Safely handle undefined results + if (result && Array.isArray(result)) { + totalSearchOperations += result.length; + } + return result || []; // Return empty array if undefined + }); + + // Replace the real function with our spy + const testItemUtilities = require('../../../client/testing/testController/common/testItemUtilities'); + testItemUtilities.getTestCaseNodes = getTestCaseNodesSpy; + + // Stub isTestItemValid to always return true for performance test + // This prevents expensive tree searches during validation + const testItemIndexStub = sinon.stub((resultResolver as any).testItemIndex, 'isTestItemValid').returns(true); + + // Wrap the _resolveExecution function to measure performance + const original_resolveExecution = resultResolver.resolveExecution.bind(resultResolver); + resultResolver.resolveExecution = (payload, runInstance) => { + const startTime = performance.now(); + callCount++; + + // Call the actual implementation + original_resolveExecution(payload, runInstance); + + const endTime = performance.now(); + const callTime = endTime - startTime; + callTimes.push(callTime); + totalCallTime += callTime; + }; + + // ================================================================ + // SETUP: Create test data that simulates realistic test scenarios + // ================================================================ + + // Create a mock TestController with the methods we need + const mockTestController = { + items: new Map(), + createTestItem: (id: string, label: string, uri?: Uri) => { + const childrenMap = new Map(); + // Add forEach method to children map to simulate TestItemCollection + (childrenMap as any).forEach = function (callback: (item: any) => void) { + Map.prototype.forEach.call(this, callback); + }; + + const mockTestItem = { + id, + label, + uri, + children: childrenMap, + parent: undefined, + canResolveChildren: false, + tags: [{ id: 'python-run' }, { id: 'python-debug' }], + }; + return mockTestItem; + }, + // Add a forEach method to simulate the problematic iteration + forEach: function (callback: (item: any) => void) { + this.items.forEach(callback); + }, + }; // Replace the testController in our resolver + (resultResolver as any).testController = mockTestController; + + // Create test controller with many test items (simulates real workspace) + for (let i = 0; i < numTestFiles; i++) { + const testItem = mockTestController.createTestItem( + `test_file_${i}`, + `Test File ${i}`, + Uri.file(`/test_${i}.py`), + ); + mockTestController.items.set(`test_file_${i}`, testItem); + + // Add child test items to each file + for (let j = 0; j < testFunctionsPerFile; j++) { + const childItem = mockTestController.createTestItem( + `test_${i}_${j}`, + `test_method_${j}`, + Uri.file(`/test_${i}.py`), + ); + testItem.children.set(`test_${i}_${j}`, childItem); + + // Set up the ID mappings that the resolver uses + resultResolver.runIdToTestItem.set(`test_${i}_${j}`, childItem as any); + resultResolver.runIdToVSid.set(`test_${i}_${j}`, `test_${i}_${j}`); + resultResolver.vsIdToRunId.set(`test_${i}_${j}`, `test_${i}_${j}`); + } + } // Create payload with multiple test results (simulates real test execution) + const testResults: Record = {}; + for (let i = 0; i < numParameterizedResults; i++) { + // Use test IDs that actually exist in our mock setup (test_0_0 through test_0_9) + testResults[`test_0_${i % testFunctionsPerFile}`] = { + test: `test_method[${i}]`, + outcome: 'success', + message: null, + traceback: null, + subtest: null, + }; + } + + const payload: ExecutionTestPayload = { + cwd: '/test', + status: 'success' as const, + error: '', + result: testResults, + }; + + const mockRunInstance = { + passed: sinon.stub(), + failed: sinon.stub(), + errored: sinon.stub(), + skipped: sinon.stub(), + }; + + // ================================================================ + // EXECUTION: Run the performance test + // ================================================================ + + const overallStartTime = performance.now(); + + // Run the resolveExecution function with test data + await resultResolver.resolveExecution(payload, mockRunInstance as any); + + const overallEndTime = performance.now(); + const totalTime = overallEndTime - overallStartTime; + + // ================================================================ + // CLEANUP: Restore original functions + // ================================================================ + testItemUtilities.getTestCaseNodes = originalGetTestCaseNodes; + testItemIndexStub.restore(); + + // ================================================================ + // ASSERT: Verify efficient performance characteristics + // ================================================================ + console.log(`\n=== PERFORMANCE RESULTS ===`); + console.log( + `Test setup: ${numTestFiles} files Γ— ${testFunctionsPerFile} test functions = ${totalTestItems} total items`, + ); + console.log(`Total execution time: ${totalTime.toFixed(2)}ms`); + console.log(`Tree operations performed: ${treeRebuildCount}`); + console.log(`Search operations: ${totalSearchOperations}`); + console.log(`Average time per call: ${(totalCallTime / callCount).toFixed(2)}ms`); + console.log(`Results processed: ${numParameterizedResults}`); + + // Basic function call verification + assert.strictEqual(callCount, 1, 'Expected resolveExecution to be called once'); + + // EFFICIENCY VERIFICATION: Ensure minimal expensive operations + assert.strictEqual( + treeRebuildCount, + 0, + 'Expected ZERO tree rebuilds - efficient implementation should use cached lookups', + ); + + assert.strictEqual( + totalSearchOperations, + 0, + 'Expected ZERO linear search operations - efficient implementation should use direct lookups', + ); + + // Performance threshold verification - should be fast + assert.ok(totalTime < 100, `Function should complete quickly, took ${totalTime}ms (should be under 100ms)`); + + // Scalability check - time should not grow significantly with more results + const timePerResult = totalTime / numParameterizedResults; + assert.ok( + timePerResult < 10, + `Time per result should be minimal: ${timePerResult.toFixed(2)}ms per result (should be under 10ms)`, + ); + }); }); diff --git a/src/test/testing/testController/common/testCoverageHandler.unit.test.ts b/src/test/testing/testController/common/testCoverageHandler.unit.test.ts new file mode 100644 index 000000000000..a81aed591128 --- /dev/null +++ b/src/test/testing/testController/common/testCoverageHandler.unit.test.ts @@ -0,0 +1,502 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestRun, Uri, FileCoverage } from 'vscode'; +import * as typemoq from 'typemoq'; +import * as assert from 'assert'; +import { TestCoverageHandler } from '../../../../client/testing/testController/common/testCoverageHandler'; +import { CoveragePayload } from '../../../../client/testing/testController/common/types'; + +suite('TestCoverageHandler', () => { + let coverageHandler: TestCoverageHandler; + let runInstanceMock: typemoq.IMock; + + setup(() => { + coverageHandler = new TestCoverageHandler(); + runInstanceMock = typemoq.Mock.ofType(); + }); + + suite('processCoverage', () => { + test('should return empty map for undefined result', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: undefined, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.strictEqual(result.size, 0); + runInstanceMock.verify((r) => r.addCoverage(typemoq.It.isAny()), typemoq.Times.never()); + }); + + test('should create FileCoverage for each file', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file1.py': { + lines_covered: [1, 2, 3], + lines_missed: [4, 5], + executed_branches: 5, + total_branches: 10, + }, + '/path/to/file2.py': { + lines_covered: [1, 2], + lines_missed: [3], + executed_branches: 2, + total_branches: 4, + }, + }, + error: '', + }; + + coverageHandler.processCoverage(payload, runInstanceMock.object); + + runInstanceMock.verify((r) => r.addCoverage(typemoq.It.isAny()), typemoq.Times.exactly(2)); + }); + + test('should call runInstance.addCoverage with correct FileCoverage', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2, 3], + lines_missed: [4, 5], + executed_branches: 5, + total_branches: 10, + }, + }, + error: '', + }; + + let capturedCoverage: FileCoverage | undefined; + runInstanceMock + .setup((r) => r.addCoverage(typemoq.It.isAny())) + .callback((coverage: FileCoverage) => { + capturedCoverage = coverage; + }); + + coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.ok(capturedCoverage); + assert.strictEqual(capturedCoverage!.uri.fsPath, Uri.file('/path/to/file.py').fsPath); + }); + + test('should return detailed coverage map with correct keys', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file1.py': { + lines_covered: [1, 2], + lines_missed: [3], + executed_branches: 2, + total_branches: 4, + }, + '/path/to/file2.py': { + lines_covered: [5, 6, 7], + lines_missed: [], + executed_branches: 3, + total_branches: 3, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.strictEqual(result.size, 2); + assert.ok(result.has(Uri.file('/path/to/file1.py').fsPath)); + assert.ok(result.has(Uri.file('/path/to/file2.py').fsPath)); + }); + + test('should handle empty coverage data', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: {}, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.strictEqual(result.size, 0); + }); + + test('should handle file with no covered lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [], + lines_missed: [1, 2, 3], + executed_branches: 0, + total_branches: 5, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 3); // Only missed lines + }); + + test('should handle file with no missed lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2, 3], + lines_missed: [], + executed_branches: 5, + total_branches: 5, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 3); // Only covered lines + }); + + test('should handle undefined lines_covered', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: undefined as any, + lines_missed: [1, 2], + executed_branches: 0, + total_branches: 2, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 2); // Only missed lines + }); + + test('should handle undefined lines_missed', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2], + lines_missed: undefined as any, + executed_branches: 2, + total_branches: 2, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 2); // Only covered lines + }); + }); + + suite('createFileCoverage', () => { + test('should handle line coverage only when totalBranches is -1', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2, 3], + lines_missed: [4, 5], + executed_branches: 0, + total_branches: -1, // Branch coverage disabled + }, + }, + error: '', + }; + + let capturedCoverage: FileCoverage | undefined; + runInstanceMock + .setup((r) => r.addCoverage(typemoq.It.isAny())) + .callback((coverage: FileCoverage) => { + capturedCoverage = coverage; + }); + + coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.ok(capturedCoverage); + // Branch coverage should not be included + assert.strictEqual((capturedCoverage as any).branchCoverage, undefined); + }); + + test('should include branch coverage when available', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2, 3], + lines_missed: [4], + executed_branches: 7, + total_branches: 10, + }, + }, + error: '', + }; + + let capturedCoverage: FileCoverage | undefined; + runInstanceMock + .setup((r) => r.addCoverage(typemoq.It.isAny())) + .callback((coverage: FileCoverage) => { + capturedCoverage = coverage; + }); + + coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.ok(capturedCoverage); + // Should have branch coverage + assert.ok((capturedCoverage as any).branchCoverage); + }); + + test('should calculate line coverage counts correctly', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2, 3, 4, 5], + lines_missed: [6, 7], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + let capturedCoverage: FileCoverage | undefined; + runInstanceMock + .setup((r) => r.addCoverage(typemoq.It.isAny())) + .callback((coverage: FileCoverage) => { + capturedCoverage = coverage; + }); + + coverageHandler.processCoverage(payload, runInstanceMock.object); + + assert.ok(capturedCoverage); + // 5 covered out of 7 total (5 covered + 2 missed) + assert.strictEqual((capturedCoverage as any).statementCoverage.covered, 5); + assert.strictEqual((capturedCoverage as any).statementCoverage.total, 7); + }); + }); + + suite('createDetailedCoverage', () => { + test('should create StatementCoverage for covered lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 2, 3], + lines_missed: [], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 3); + + // All should be covered (true) + detailedCoverage!.forEach((coverage) => { + assert.strictEqual((coverage as any).executed, true); + }); + }); + + test('should create StatementCoverage for missed lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [], + lines_missed: [1, 2, 3], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 3); + + // All should be NOT covered (false) + detailedCoverage!.forEach((coverage) => { + assert.strictEqual((coverage as any).executed, false); + }); + }); + + test('should convert 1-indexed to 0-indexed line numbers for covered lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 5, 10], + lines_missed: [], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + + // Line 1 should map to range starting at line 0 + assert.strictEqual((detailedCoverage![0] as any).location.start.line, 0); + // Line 5 should map to range starting at line 4 + assert.strictEqual((detailedCoverage![1] as any).location.start.line, 4); + // Line 10 should map to range starting at line 9 + assert.strictEqual((detailedCoverage![2] as any).location.start.line, 9); + }); + + test('should convert 1-indexed to 0-indexed line numbers for missed lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [], + lines_missed: [3, 7, 12], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + + // Line 3 should map to range starting at line 2 + assert.strictEqual((detailedCoverage![0] as any).location.start.line, 2); + // Line 7 should map to range starting at line 6 + assert.strictEqual((detailedCoverage![1] as any).location.start.line, 6); + // Line 12 should map to range starting at line 11 + assert.strictEqual((detailedCoverage![2] as any).location.start.line, 11); + }); + + test('should handle large line numbers', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1000, 5000, 10000], + lines_missed: [], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 3); + + // Verify conversion is correct for large numbers + assert.strictEqual((detailedCoverage![0] as any).location.start.line, 999); + assert.strictEqual((detailedCoverage![1] as any).location.start.line, 4999); + assert.strictEqual((detailedCoverage![2] as any).location.start.line, 9999); + }); + + test('should create detailed coverage with both covered and missed lines', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1, 3, 5], + lines_missed: [2, 4, 6], + executed_branches: 3, + total_branches: 6, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + assert.strictEqual(detailedCoverage!.length, 6); // 3 covered + 3 missed + + // Count covered vs not covered + const covered = detailedCoverage!.filter((c) => (c as any).executed === true); + const notCovered = detailedCoverage!.filter((c) => (c as any).executed === false); + + assert.strictEqual(covered.length, 3); + assert.strictEqual(notCovered.length, 3); + }); + + test('should set range to cover entire line', () => { + const payload: CoveragePayload = { + coverage: true, + cwd: '/foo/bar', + result: { + '/path/to/file.py': { + lines_covered: [1], + lines_missed: [], + executed_branches: 0, + total_branches: -1, + }, + }, + error: '', + }; + + const result = coverageHandler.processCoverage(payload, runInstanceMock.object); + + const detailedCoverage = result.get(Uri.file('/path/to/file.py').fsPath); + assert.ok(detailedCoverage); + + const coverage = detailedCoverage![0] as any; + // Start at column 0 + assert.strictEqual(coverage.location.start.character, 0); + // End at max safe integer (entire line) + assert.strictEqual(coverage.location.end.character, Number.MAX_SAFE_INTEGER); + }); + }); +}); diff --git a/src/test/testing/testController/common/testDiscoveryHandler.unit.test.ts b/src/test/testing/testController/common/testDiscoveryHandler.unit.test.ts new file mode 100644 index 000000000000..458e3d984405 --- /dev/null +++ b/src/test/testing/testController/common/testDiscoveryHandler.unit.test.ts @@ -0,0 +1,517 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestController, TestItem, Uri, CancellationToken, TestItemCollection } from 'vscode'; +import * as typemoq from 'typemoq'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import { TestDiscoveryHandler } from '../../../../client/testing/testController/common/testDiscoveryHandler'; +import { TestItemIndex } from '../../../../client/testing/testController/common/testItemIndex'; +import { DiscoveredTestPayload, DiscoveredTestNode } from '../../../../client/testing/testController/common/types'; +import { TestProvider } from '../../../../client/testing/types'; +import * as utils from '../../../../client/testing/testController/common/utils'; +import * as testItemUtilities from '../../../../client/testing/testController/common/testItemUtilities'; + +suite('TestDiscoveryHandler', () => { + let discoveryHandler: TestDiscoveryHandler; + let testControllerMock: typemoq.IMock; + let testItemIndexMock: typemoq.IMock; + let testItemCollectionMock: typemoq.IMock; + let workspaceUri: Uri; + let testProvider: TestProvider; + let cancelationToken: CancellationToken; + + setup(() => { + discoveryHandler = new TestDiscoveryHandler(); + testControllerMock = typemoq.Mock.ofType(); + testItemIndexMock = typemoq.Mock.ofType(); + testItemCollectionMock = typemoq.Mock.ofType(); + + // Setup default test controller items mock + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + testItemCollectionMock.setup((x) => x.delete(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + + workspaceUri = Uri.file('/foo/bar'); + testProvider = 'pytest'; + cancelationToken = ({ + isCancellationRequested: false, + } as unknown) as CancellationToken; + }); + + teardown(() => { + sinon.restore(); + }); + + suite('processDiscovery', () => { + test('should handle null payload gracefully', () => { + discoveryHandler.processDiscovery( + null as any, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + // Should not throw and should not call populateTestTree + testItemIndexMock.verify((x) => x.clear(), typemoq.Times.never()); + }); + + test('should call populateTestTree with correct params on success', () => { + const tests: DiscoveredTestNode = { + path: '/foo/bar', + name: 'root', + type_: 'folder', + id_: 'root_id', + children: [], + }; + + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'success', + tests, + }; + + const populateTestTreeStub = sinon.stub(utils, 'populateTestTree'); + testItemIndexMock.setup((x) => x.clear()).returns(() => undefined); + + // Setup map getters for populateTestTree + const mockRunIdMap = new Map(); + const mockVSidMap = new Map(); + const mockVStoRunMap = new Map(); + testItemIndexMock.setup((x) => x.runIdToTestItemMap).returns(() => mockRunIdMap); + testItemIndexMock.setup((x) => x.runIdToVSidMap).returns(() => mockVSidMap); + testItemIndexMock.setup((x) => x.vsIdToRunIdMap).returns(() => mockVStoRunMap); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + testItemIndexMock.verify((x) => x.clear(), typemoq.Times.once()); + assert.ok(populateTestTreeStub.calledOnce); + sinon.assert.calledWith( + populateTestTreeStub, + testControllerMock.object, + tests, + undefined, + sinon.match.any, + cancelationToken, + ); + }); + + test('should clear index before populating', () => { + const tests: DiscoveredTestNode = { + path: '/foo/bar', + name: 'root', + type_: 'folder', + id_: 'root_id', + children: [], + }; + + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'success', + tests, + }; + + sinon.stub(utils, 'populateTestTree'); + + const clearSpy = sinon.spy(); + testItemIndexMock.setup((x) => x.clear()).callback(clearSpy); + testItemIndexMock.setup((x) => x.runIdToTestItemMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.runIdToVSidMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.vsIdToRunIdMap).returns(() => new Map()); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + assert.ok(clearSpy.calledOnce); + }); + + test('should handle error status and create error node', () => { + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'error', + error: ['Error message 1', 'Error message 2'], + }; + + const createErrorNodeSpy = sinon.spy(discoveryHandler, 'createErrorNode'); + + // Mock createTestItem to return a proper TestItem + const mockErrorItem = ({ + id: 'error_id', + error: null, + canResolveChildren: false, + tags: [], + } as unknown) as TestItem; + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => mockErrorItem); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + assert.ok(createErrorNodeSpy.calledOnce); + assert.ok( + createErrorNodeSpy.calledWith(testControllerMock.object, workspaceUri, payload.error, testProvider), + ); + }); + + test('should handle both errors and tests in same payload', () => { + const tests: DiscoveredTestNode = { + path: '/foo/bar', + name: 'root', + type_: 'folder', + id_: 'root_id', + children: [], + }; + + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'error', + error: ['Partial error'], + tests, + }; + + sinon.stub(utils, 'populateTestTree'); + const createErrorNodeSpy = sinon.spy(discoveryHandler, 'createErrorNode'); + + // Mock createTestItem to return a proper TestItem + const mockErrorItem = ({ + id: 'error_id', + error: null, + canResolveChildren: false, + tags: [], + } as unknown) as TestItem; + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => mockErrorItem); + + testItemIndexMock.setup((x) => x.clear()).returns(() => undefined); + testItemIndexMock.setup((x) => x.runIdToTestItemMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.runIdToVSidMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.vsIdToRunIdMap).returns(() => new Map()); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + // Should create error node AND populate test tree + assert.ok(createErrorNodeSpy.calledOnce); + testItemIndexMock.verify((x) => x.clear(), typemoq.Times.once()); + }); + + test('should delete error node on successful discovery', () => { + const tests: DiscoveredTestNode = { + path: '/foo/bar', + name: 'root', + type_: 'folder', + id_: 'root_id', + children: [], + }; + + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'success', + tests, + }; + + const deleteSpy = sinon.spy(); + // Reset and reconfigure the collection mock to capture delete call + testItemCollectionMock.reset(); + testItemCollectionMock + .setup((x) => x.delete(typemoq.It.isAny())) + .callback(deleteSpy) + .returns(() => undefined); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.reset(); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + sinon.stub(utils, 'populateTestTree'); + testItemIndexMock.setup((x) => x.clear()).returns(() => undefined); + testItemIndexMock.setup((x) => x.runIdToTestItemMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.runIdToVSidMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.vsIdToRunIdMap).returns(() => new Map()); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + assert.ok(deleteSpy.calledOnce); + assert.ok(deleteSpy.calledWith(`DiscoveryError:${workspaceUri.fsPath}`)); + }); + + test('should respect cancellation token', () => { + const tests: DiscoveredTestNode = { + path: '/foo/bar', + name: 'root', + type_: 'folder', + id_: 'root_id', + children: [], + }; + + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'success', + tests, + }; + + const populateTestTreeStub = sinon.stub(utils, 'populateTestTree'); + testItemIndexMock.setup((x) => x.clear()).returns(() => undefined); + testItemIndexMock.setup((x) => x.runIdToTestItemMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.runIdToVSidMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.vsIdToRunIdMap).returns(() => new Map()); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + // Verify token was passed to populateTestTree + assert.ok(populateTestTreeStub.calledOnce); + const lastArg = populateTestTreeStub.getCall(0).args[4]; + assert.strictEqual(lastArg, cancelationToken); + }); + + test('should handle null tests in payload', () => { + const payload: DiscoveredTestPayload = { + cwd: workspaceUri.fsPath, + status: 'success', + tests: null as any, + }; + + const populateTestTreeStub = sinon.stub(utils, 'populateTestTree'); + testItemIndexMock.setup((x) => x.clear()).returns(() => undefined); + testItemIndexMock.setup((x) => x.runIdToTestItemMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.runIdToVSidMap).returns(() => new Map()); + testItemIndexMock.setup((x) => x.vsIdToRunIdMap).returns(() => new Map()); + + discoveryHandler.processDiscovery( + payload, + testControllerMock.object, + testItemIndexMock.object, + workspaceUri, + testProvider, + cancelationToken, + ); + + // Should still call populateTestTree with null + assert.ok(populateTestTreeStub.calledOnce); + testItemIndexMock.verify((x) => x.clear(), typemoq.Times.once()); + }); + }); + + suite('createErrorNode', () => { + test('should create error with correct message for pytest', () => { + const error = ['Error line 1', 'Error line 2']; + testProvider = 'pytest'; + + const buildErrorNodeOptionsStub = sinon.stub(utils, 'buildErrorNodeOptions').returns({ + id: 'error_id', + label: 'Error Label', + error: 'Error Message', + }); + + const mockErrorItem = ({ + id: 'error_id', + error: null, + } as unknown) as TestItem; + + const createErrorTestItemStub = sinon.stub(testItemUtilities, 'createErrorTestItem').returns(mockErrorItem); + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + discoveryHandler.createErrorNode(testControllerMock.object, workspaceUri, error, testProvider); + + assert.ok(buildErrorNodeOptionsStub.calledOnce); + assert.ok(createErrorTestItemStub.calledOnce); + assert.ok(mockErrorItem.error !== null); + }); + + test('should create error with correct message for unittest', () => { + const error = ['Unittest error']; + testProvider = 'unittest'; + + sinon.stub(utils, 'buildErrorNodeOptions').returns({ + id: 'error_id', + label: 'Error Label', + error: 'Error Message', + }); + + const mockErrorItem = ({ + id: 'error_id', + error: null, + } as unknown) as TestItem; + + sinon.stub(testItemUtilities, 'createErrorTestItem').returns(mockErrorItem); + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + discoveryHandler.createErrorNode(testControllerMock.object, workspaceUri, error, testProvider); + + assert.ok(mockErrorItem.error !== null); + }); + + test('should set markdown error label correctly', () => { + const error = ['Test error']; + + sinon.stub(utils, 'buildErrorNodeOptions').returns({ + id: 'error_id', + label: 'Error Label', + error: 'Error Message', + }); + + const mockErrorItem = ({ + id: 'error_id', + error: null, + } as unknown) as TestItem; + + sinon.stub(testItemUtilities, 'createErrorTestItem').returns(mockErrorItem); + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + discoveryHandler.createErrorNode(testControllerMock.object, workspaceUri, error, testProvider); + + assert.ok(mockErrorItem.error); + assert.strictEqual( + (mockErrorItem.error as any).value, + '[Show output](command:python.viewOutput) to view error logs', + ); + assert.strictEqual((mockErrorItem.error as any).isTrusted, true); + }); + + test('should handle undefined error array', () => { + sinon.stub(utils, 'buildErrorNodeOptions').returns({ + id: 'error_id', + label: 'Error Label', + error: 'Error Message', + }); + + const mockErrorItem = ({ + id: 'error_id', + error: null, + } as unknown) as TestItem; + + sinon.stub(testItemUtilities, 'createErrorTestItem').returns(mockErrorItem); + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + discoveryHandler.createErrorNode(testControllerMock.object, workspaceUri, undefined, testProvider); + + // Should not throw + assert.ok(mockErrorItem.error !== null); + }); + + test('should reuse existing error node if present', () => { + const error = ['Error']; + + // Create a proper object with settable error property + const existingErrorItem: any = { + id: `DiscoveryError:${workspaceUri.fsPath}`, + error: null, + canResolveChildren: false, + tags: [], + }; + + sinon.stub(utils, 'buildErrorNodeOptions').returns({ + id: `DiscoveryError:${workspaceUri.fsPath}`, + label: 'Error Label', + error: 'Error Message', + }); + + const createErrorTestItemStub = sinon.stub(testItemUtilities, 'createErrorTestItem'); + + // Reset and setup collection to return existing item + testItemCollectionMock.reset(); + testItemCollectionMock + .setup((x) => x.get(`DiscoveryError:${workspaceUri.fsPath}`)) + .returns(() => existingErrorItem); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.reset(); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + discoveryHandler.createErrorNode(testControllerMock.object, workspaceUri, error, testProvider); + + // Should not create a new error item + assert.ok(createErrorTestItemStub.notCalled); + // Should still update the error property + assert.ok(existingErrorItem.error !== null); + }); + + test('should handle multiple error messages', () => { + const error = ['Error 1', 'Error 2', 'Error 3']; + + const buildStub = sinon.stub(utils, 'buildErrorNodeOptions').returns({ + id: 'error_id', + label: 'Error Label', + error: 'Error Message', + }); + + const mockErrorItem = ({ + id: 'error_id', + error: null, + } as unknown) as TestItem; + + sinon.stub(testItemUtilities, 'createErrorTestItem').returns(mockErrorItem); + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testItemCollectionMock.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + discoveryHandler.createErrorNode(testControllerMock.object, workspaceUri, error, testProvider); + + // Verify the error messages are joined + const expectedMessage = sinon.match((value: string) => { + return value.includes('Error 1') && value.includes('Error 2') && value.includes('Error 3'); + }); + sinon.assert.calledWith(buildStub, workspaceUri, expectedMessage, testProvider); + }); + }); +}); diff --git a/src/test/testing/testController/common/testExecutionHandler.unit.test.ts b/src/test/testing/testController/common/testExecutionHandler.unit.test.ts new file mode 100644 index 000000000000..c6be4548c192 --- /dev/null +++ b/src/test/testing/testController/common/testExecutionHandler.unit.test.ts @@ -0,0 +1,922 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestController, TestItem, TestRun, TestMessage, Uri, Range, TestItemCollection, MarkdownString } from 'vscode'; +import * as typemoq from 'typemoq'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import { TestExecutionHandler } from '../../../../client/testing/testController/common/testExecutionHandler'; +import { TestItemIndex } from '../../../../client/testing/testController/common/testItemIndex'; +import { ExecutionTestPayload } from '../../../../client/testing/testController/common/types'; + +suite('TestExecutionHandler', () => { + let executionHandler: TestExecutionHandler; + let testControllerMock: typemoq.IMock; + let testItemIndexMock: typemoq.IMock; + let runInstanceMock: typemoq.IMock; + let mockTestItem: TestItem; + let mockParentItem: TestItem; + + setup(() => { + executionHandler = new TestExecutionHandler(); + testControllerMock = typemoq.Mock.ofType(); + testItemIndexMock = typemoq.Mock.ofType(); + runInstanceMock = typemoq.Mock.ofType(); + + mockTestItem = createMockTestItem('test1', 'Test 1'); + mockParentItem = createMockTestItem('parentTest', 'Parent Test'); + }); + + teardown(() => { + sinon.restore(); + }); + + suite('processExecution', () => { + test('should process empty payload without errors', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: {}, + error: '', + }; + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // No errors should be thrown + }); + + test('should process undefined result without errors', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + error: '', + }; + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // No errors should be thrown + }); + + test('should process multiple test results', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { test: 'test1', outcome: 'success', message: '', traceback: '' }, + test2: { test: 'test2', outcome: 'failure', message: 'Failed', traceback: 'traceback' }, + }, + error: '', + }; + + const mockTestItem2 = createMockTestItem('test2', 'Test 2'); + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + testItemIndexMock + .setup((x) => x.getTestItem('test2', testControllerMock.object)) + .returns(() => mockTestItem2); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.passed(mockTestItem), typemoq.Times.once()); + runInstanceMock.verify((r) => r.failed(mockTestItem2, typemoq.It.isAny()), typemoq.Times.once()); + }); + }); + + suite('handleTestError', () => { + test('should create error message with traceback', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'error', + message: 'Error occurred', + traceback: 'line1\nline2\nline3', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + let capturedMessage: TestMessage | undefined; + runInstanceMock + .setup((r) => r.errored(mockTestItem, typemoq.It.isAny())) + .callback((_, message: TestMessage) => { + capturedMessage = message; + }); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + assert.ok(capturedMessage); + const messageText = + capturedMessage!.message instanceof MarkdownString + ? capturedMessage!.message.value + : capturedMessage!.message; + assert.ok(messageText.includes('Error occurred')); + assert.ok(messageText.includes('line1')); + assert.ok(messageText.includes('line2')); + runInstanceMock.verify((r) => r.errored(mockTestItem, typemoq.It.isAny()), typemoq.Times.once()); + }); + + test('should set location when test item has range', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'error', + message: 'Error', + traceback: '', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + let capturedMessage: TestMessage | undefined; + runInstanceMock + .setup((r) => r.errored(mockTestItem, typemoq.It.isAny())) + .callback((_, message: TestMessage) => { + capturedMessage = message; + }); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + assert.ok(capturedMessage); + assert.ok(capturedMessage!.location); + assert.strictEqual(capturedMessage!.location!.uri.fsPath, mockTestItem.uri!.fsPath); + }); + + test('should handle missing traceback', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'error', + message: 'Error', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.errored(mockTestItem, typemoq.It.isAny()), typemoq.Times.once()); + }); + }); + + suite('handleTestFailure', () => { + test('should create failure message with traceback', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'failure', + message: 'Assertion failed', + traceback: 'AssertionError\nline1', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + let capturedMessage: TestMessage | undefined; + runInstanceMock + .setup((r) => r.failed(mockTestItem, typemoq.It.isAny())) + .callback((_, message: TestMessage) => { + capturedMessage = message; + }); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + assert.ok(capturedMessage); + const messageText = + capturedMessage!.message instanceof MarkdownString + ? capturedMessage!.message.value + : capturedMessage!.message; + assert.ok(messageText.includes('Assertion failed')); + assert.ok(messageText.includes('AssertionError')); + runInstanceMock.verify((r) => r.failed(mockTestItem, typemoq.It.isAny()), typemoq.Times.once()); + }); + + test('should handle passed-unexpected outcome', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'passed-unexpected', + message: 'Unexpected pass', + traceback: '', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.failed(mockTestItem, typemoq.It.isAny()), typemoq.Times.once()); + }); + }); + + suite('handleTestSuccess', () => { + test('should mark test as passed', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'success', + message: '', + traceback: '', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.passed(mockTestItem), typemoq.Times.once()); + }); + + test('should handle expected-failure outcome', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'expected-failure', + message: '', + traceback: '', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.passed(mockTestItem), typemoq.Times.once()); + }); + + test('should not call passed when test item not found', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'success', + message: '', + traceback: '', + }, + }, + error: '', + }; + + testItemIndexMock.setup((x) => x.getTestItem('test1', testControllerMock.object)).returns(() => undefined); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.passed(typemoq.It.isAny()), typemoq.Times.never()); + }); + }); + + suite('handleTestSkipped', () => { + test('should mark test as skipped', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + test1: { + test: 'test1', + outcome: 'skipped', + message: 'Test skipped', + traceback: '', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('test1', testControllerMock.object)) + .returns(() => mockTestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.skipped(mockTestItem), typemoq.Times.once()); + }); + }); + + suite('handleSubtestFailure', () => { + test('should create child test item for subtest', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest1)': { + test: 'parentTest', + outcome: 'subtest-failure', + message: 'Subtest failed', + traceback: 'traceback', + subtest: 'subtest1', + }, + }, + error: '', + }; + + const mockSubtestItem = createMockTestItem('subtest1', 'Subtest 1'); + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => undefined); + testItemIndexMock + .setup((x) => x.setSubtestStats('parentTest', typemoq.It.isAny())) + .returns(() => undefined); + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => mockSubtestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify stats were set correctly + testItemIndexMock.verify( + (x) => + x.setSubtestStats( + 'parentTest', + typemoq.It.is((stats) => stats.failed === 1 && stats.passed === 0), + ), + typemoq.Times.once(), + ); + + runInstanceMock.verify((r) => r.started(mockSubtestItem), typemoq.Times.once()); + runInstanceMock.verify((r) => r.failed(mockSubtestItem, typemoq.It.isAny()), typemoq.Times.once()); + }); + + test('should update stats correctly for multiple subtests', () => { + const payload1: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest1)': { + test: 'parentTest', + outcome: 'subtest-failure', + message: 'Failed', + traceback: '', + subtest: 'subtest1', + }, + }, + error: '', + }; + + const payload2: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest2)': { + test: 'parentTest', + outcome: 'subtest-failure', + message: 'Failed', + traceback: '', + subtest: 'subtest2', + }, + }, + error: '', + }; + + const mockSubtest1 = createMockTestItem('subtest1', 'Subtest 1'); + const mockSubtest2 = createMockTestItem('subtest2', 'Subtest 2'); + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + + // First subtest: no existing stats + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => undefined); + testItemIndexMock + .setup((x) => x.setSubtestStats('parentTest', typemoq.It.isAny())) + .returns(() => undefined); + + // Return different items based on call order + let callCount = 0; + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => { + callCount++; + return callCount === 1 ? mockSubtest1 : mockSubtest2; + }); + + executionHandler.processExecution( + payload1, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Second subtest: should have existing stats from first + testItemIndexMock.reset(); + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => ({ failed: 1, passed: 0 })); + + executionHandler.processExecution( + payload2, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify the first subtest set initial stats + runInstanceMock.verify((r) => r.started(mockSubtest1), typemoq.Times.once()); + runInstanceMock.verify((r) => r.started(mockSubtest2), typemoq.Times.once()); + }); + + test('should throw error when parent test item not found', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest1)': { + test: 'parentTest', + outcome: 'subtest-failure', + message: 'Failed', + traceback: '', + subtest: 'subtest1', + }, + }, + error: '', + }; + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => undefined); + + assert.throws(() => { + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + }, /Parent test item not found/); + }); + }); + + suite('handleSubtestSuccess', () => { + test('should create passing subtest', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest1)': { + test: 'parentTest', + outcome: 'subtest-success', + message: '', + traceback: '', + subtest: 'subtest1', + }, + }, + error: '', + }; + + const mockSubtestItem = createMockTestItem('subtest1', 'Subtest 1'); + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => undefined); + testItemIndexMock + .setup((x) => x.setSubtestStats('parentTest', typemoq.It.isAny())) + .returns(() => undefined); + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => mockSubtestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify stats were set correctly + testItemIndexMock.verify( + (x) => + x.setSubtestStats( + 'parentTest', + typemoq.It.is((stats) => stats.passed === 1 && stats.failed === 0), + ), + typemoq.Times.once(), + ); + + runInstanceMock.verify((r) => r.started(mockSubtestItem), typemoq.Times.once()); + runInstanceMock.verify((r) => r.passed(mockSubtestItem), typemoq.Times.once()); + }); + + test('should handle subtest with special characters in name', () => { + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest [subtest with spaces and [brackets]]': { + test: 'parentTest', + outcome: 'subtest-success', + message: '', + traceback: '', + subtest: 'subtest with spaces and [brackets]', + }, + }, + error: '', + }; + + const mockSubtestItem = createMockTestItem('[subtest with spaces and [brackets]]', 'Subtest'); + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => undefined); + testItemIndexMock + .setup((x) => x.setSubtestStats('parentTest', typemoq.It.isAny())) + .returns(() => undefined); + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => mockSubtestItem); + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + runInstanceMock.verify((r) => r.passed(mockSubtestItem), typemoq.Times.once()); + }); + }); + + suite('Comprehensive Subtest Scenarios', () => { + test('should handle mixed passing and failing subtests in sequence', () => { + // Simulates unittest with subtests like: test_even with i=0,1,2,3,4,5 + const mockSubtest0 = createMockTestItem('(i=0)', '(i=0)'); + const mockSubtest1 = createMockTestItem('(i=1)', '(i=1)'); + const mockSubtest2 = createMockTestItem('(i=2)', '(i=2)'); + const mockSubtest3 = createMockTestItem('(i=3)', '(i=3)'); + const mockSubtest4 = createMockTestItem('(i=4)', '(i=4)'); + const mockSubtest5 = createMockTestItem('(i=5)', '(i=5)'); + + const subtestItems = [mockSubtest0, mockSubtest1, mockSubtest2, mockSubtest3, mockSubtest4, mockSubtest5]; + + testItemIndexMock + .setup((x) => x.getTestItem('test_even', testControllerMock.object)) + .returns(() => mockParentItem); + + let subtestCallCount = 0; + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => subtestItems[subtestCallCount++]); + + // First subtest (i=0) - passes + testItemIndexMock.setup((x) => x.getSubtestStats('test_even')).returns(() => undefined); + testItemIndexMock.setup((x) => x.setSubtestStats('test_even', typemoq.It.isAny())).returns(() => undefined); + + const payload0: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'test_even (i=0)': { + test: 'test_even', + outcome: 'subtest-success', + message: '', + traceback: '', + subtest: '(i=0)', + }, + }, + error: '', + }; + + executionHandler.processExecution( + payload0, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify first subtest created stats + testItemIndexMock.verify( + (x) => + x.setSubtestStats( + 'test_even', + typemoq.It.is((stats) => stats.passed === 1 && stats.failed === 0), + ), + typemoq.Times.once(), + ); + + // Second subtest (i=1) - fails + testItemIndexMock.reset(); + testItemIndexMock + .setup((x) => x.getTestItem('test_even', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('test_even')).returns(() => ({ passed: 1, failed: 0 })); + + const payload1: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'test_even (i=1)': { + test: 'test_even', + outcome: 'subtest-failure', + message: '1 is not even', + traceback: 'AssertionError', + subtest: '(i=1)', + }, + }, + error: '', + }; + + executionHandler.processExecution( + payload1, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Third subtest (i=2) - passes + testItemIndexMock.reset(); + testItemIndexMock + .setup((x) => x.getTestItem('test_even', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('test_even')).returns(() => ({ passed: 1, failed: 1 })); + + const payload2: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'test_even (i=2)': { + test: 'test_even', + outcome: 'subtest-success', + message: '', + traceback: '', + subtest: '(i=2)', + }, + }, + error: '', + }; + + executionHandler.processExecution( + payload2, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify all subtests were started and had outcomes + runInstanceMock.verify((r) => r.started(mockSubtest0), typemoq.Times.once()); + runInstanceMock.verify((r) => r.passed(mockSubtest0), typemoq.Times.once()); + runInstanceMock.verify((r) => r.started(mockSubtest1), typemoq.Times.once()); + runInstanceMock.verify((r) => r.failed(mockSubtest1, typemoq.It.isAny()), typemoq.Times.once()); + runInstanceMock.verify((r) => r.started(mockSubtest2), typemoq.Times.once()); + runInstanceMock.verify((r) => r.passed(mockSubtest2), typemoq.Times.once()); + }); + + test('should persist stats across multiple processExecution calls', () => { + // Test that stats persist in TestItemIndex across multiple processExecution calls + const mockSubtest1 = createMockTestItem('subtest1', 'Subtest 1'); + const mockSubtest2 = createMockTestItem('subtest2', 'Subtest 2'); + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => undefined); + testItemIndexMock + .setup((x) => x.setSubtestStats('parentTest', typemoq.It.isAny())) + .returns(() => undefined); + + let callCount = 0; + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => (callCount++ === 0 ? mockSubtest1 : mockSubtest2)); + + const payload1: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest1)': { + test: 'parentTest', + outcome: 'subtest-success', + message: '', + traceback: '', + subtest: 'subtest1', + }, + }, + error: '', + }; + + // First call - no existing stats + executionHandler.processExecution( + payload1, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Simulate stats being stored in TestItemIndex + testItemIndexMock.reset(); + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => ({ passed: 1, failed: 0 })); + + const payload2: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest2)': { + test: 'parentTest', + outcome: 'subtest-failure', + message: 'Failed', + traceback: '', + subtest: 'subtest2', + }, + }, + error: '', + }; + + // Second call - existing stats should be retrieved and updated + executionHandler.processExecution( + payload2, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify getSubtestStats was called to retrieve existing stats + testItemIndexMock.verify((x) => x.getSubtestStats('parentTest'), typemoq.Times.once()); + + // Verify both subtests were processed + runInstanceMock.verify((r) => r.passed(mockSubtest1), typemoq.Times.once()); + runInstanceMock.verify((r) => r.failed(mockSubtest2, typemoq.It.isAny()), typemoq.Times.once()); + }); + + test('should clear children only on first subtest when no existing stats', () => { + // When first subtest arrives, children should be cleared + // Subsequent subtests should NOT clear children + const mockSubtest1 = createMockTestItem('subtest1', 'Subtest 1'); + + testItemIndexMock + .setup((x) => x.getTestItem('parentTest', testControllerMock.object)) + .returns(() => mockParentItem); + testItemIndexMock.setup((x) => x.getSubtestStats('parentTest')).returns(() => undefined); + testItemIndexMock + .setup((x) => x.setSubtestStats('parentTest', typemoq.It.isAny())) + .returns(() => undefined); + testControllerMock + .setup((t) => t.createTestItem(typemoq.It.isAny(), typemoq.It.isAny(), typemoq.It.isAny())) + .returns(() => mockSubtest1); + + const payload: ExecutionTestPayload = { + cwd: '/foo/bar', + status: 'success', + result: { + 'parentTest (subtest1)': { + test: 'parentTest', + outcome: 'subtest-success', + message: '', + traceback: '', + subtest: 'subtest1', + }, + }, + error: '', + }; + + executionHandler.processExecution( + payload, + runInstanceMock.object, + testItemIndexMock.object, + testControllerMock.object, + ); + + // Verify setSubtestStats was called (which happens when creating new stats) + testItemIndexMock.verify((x) => x.setSubtestStats('parentTest', typemoq.It.isAny()), typemoq.Times.once()); + }); + }); +}); + +function createMockTestItem(id: string, label: string): TestItem { + const range = new Range(0, 0, 0, 0); + const mockChildren = typemoq.Mock.ofType(); + mockChildren.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + + const mockTestItem = ({ + id, + label, + canResolveChildren: false, + tags: [], + children: mockChildren.object, + range, + uri: Uri.file('/foo/bar/test.py'), + parent: undefined, + } as unknown) as TestItem; + + return mockTestItem; +} diff --git a/src/test/testing/testController/common/testItemIndex.unit.test.ts b/src/test/testing/testController/common/testItemIndex.unit.test.ts new file mode 100644 index 000000000000..6712d90ff667 --- /dev/null +++ b/src/test/testing/testController/common/testItemIndex.unit.test.ts @@ -0,0 +1,359 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { TestController, TestItem, Uri, Range, TestItemCollection } from 'vscode'; +import * as typemoq from 'typemoq'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import { TestItemIndex } from '../../../../client/testing/testController/common/testItemIndex'; + +suite('TestItemIndex', () => { + let testItemIndex: TestItemIndex; + let testControllerMock: typemoq.IMock; + let mockTestItem1: TestItem; + let mockTestItem2: TestItem; + let mockParentItem: TestItem; + + setup(() => { + testItemIndex = new TestItemIndex(); + testControllerMock = typemoq.Mock.ofType(); + + // Create mock test items + mockTestItem1 = createMockTestItem('test1', 'Test 1'); + mockTestItem2 = createMockTestItem('test2', 'Test 2'); + mockParentItem = createMockTestItem('parent', 'Parent'); + }); + + teardown(() => { + sinon.restore(); + }); + + suite('registerTestItem', () => { + test('should store all three mappings correctly', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'test_file.py::test_example'; + + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId), mockTestItem1); + assert.strictEqual(testItemIndex.runIdToVSidMap.get(runId), vsId); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.get(vsId), runId); + }); + + test('should overwrite existing mappings', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'test_file.py::test_example'; + + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + testItemIndex.registerTestItem(runId, vsId, mockTestItem2); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId), mockTestItem2); + }); + + test('should handle different runId and vsId', () => { + const runId = 'test_file.py::TestClass::test_method'; + const vsId = 'different_id'; + + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + assert.strictEqual(testItemIndex.runIdToVSidMap.get(runId), vsId); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.get(vsId), runId); + }); + }); + + suite('getTestItem', () => { + test('should return item on direct lookup when valid', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'test_file.py::test_example'; + + // Register the item + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + // Mock the validation to return true + const isValidStub = sinon.stub(testItemIndex, 'isTestItemValid').returns(true); + + const result = testItemIndex.getTestItem(runId, testControllerMock.object); + + assert.strictEqual(result, mockTestItem1); + assert.ok(isValidStub.calledOnce); + }); + + test('should remove stale item and try vsId fallback', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'test_file.py::test_example'; + + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + // Mock validation to fail on first call (stale item) + const isValidStub = sinon.stub(testItemIndex, 'isTestItemValid').returns(false); + + // Setup controller to not find the item + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.forEach(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.getTestItem(runId, testControllerMock.object); + + // Should have removed the stale item + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId), undefined); + assert.strictEqual(result, undefined); + assert.ok(isValidStub.calledOnce); + }); + + test('should perform vsId search when direct lookup is stale', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'test_file.py::test_example'; + + // Create test item with correct ID + const searchableTestItem = createMockTestItem(vsId, 'Test Example'); + + testItemIndex.registerTestItem(runId, vsId, searchableTestItem); + + // First validation fails (stale), need to search by vsId + sinon.stub(testItemIndex, 'isTestItemValid').returns(false); + + // Setup controller to find item by vsId + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock + .setup((x) => x.forEach(typemoq.It.isAny())) + .callback((callback) => { + callback(searchableTestItem); + }) + .returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.getTestItem(runId, testControllerMock.object); + + // Should recache the found item + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId), searchableTestItem); + assert.strictEqual(result, searchableTestItem); + }); + + test('should return undefined if not found anywhere', () => { + const runId = 'nonexistent'; + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.forEach(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.getTestItem(runId, testControllerMock.object); + + assert.strictEqual(result, undefined); + }); + }); + + suite('getRunId and getVSId', () => { + test('getRunId should convert VS Code ID to Python run ID', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'vscode_id'; + + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + assert.strictEqual(testItemIndex.getRunId(vsId), runId); + }); + + test('getRunId should return undefined for unknown vsId', () => { + assert.strictEqual(testItemIndex.getRunId('unknown'), undefined); + }); + + test('getVSId should convert Python run ID to VS Code ID', () => { + const runId = 'test_file.py::test_example'; + const vsId = 'vscode_id'; + + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + assert.strictEqual(testItemIndex.getVSId(runId), vsId); + }); + + test('getVSId should return undefined for unknown runId', () => { + assert.strictEqual(testItemIndex.getVSId('unknown'), undefined); + }); + }); + + suite('clear', () => { + test('should remove all mappings', () => { + testItemIndex.registerTestItem('runId1', 'vsId1', mockTestItem1); + testItemIndex.registerTestItem('runId2', 'vsId2', mockTestItem2); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.size, 2); + assert.strictEqual(testItemIndex.runIdToVSidMap.size, 2); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.size, 2); + + testItemIndex.clear(); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.size, 0); + assert.strictEqual(testItemIndex.runIdToVSidMap.size, 0); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.size, 0); + }); + + test('should handle clearing empty index', () => { + testItemIndex.clear(); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.size, 0); + assert.strictEqual(testItemIndex.runIdToVSidMap.size, 0); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.size, 0); + }); + }); + + suite('isTestItemValid', () => { + test('should return true for item with valid parent chain leading to controller', () => { + const childItem = createMockTestItem('child', 'Child'); + (childItem as any).parent = mockParentItem; + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(mockParentItem.id)).returns(() => mockParentItem); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.isTestItemValid(childItem, testControllerMock.object); + + assert.strictEqual(result, true); + }); + + test('should return false for orphaned item', () => { + const orphanedItem = createMockTestItem('orphaned', 'Orphaned'); + (orphanedItem as any).parent = mockParentItem; + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.isTestItemValid(orphanedItem, testControllerMock.object); + + assert.strictEqual(result, false); + }); + + test('should return true for root item in controller', () => { + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(mockTestItem1.id)).returns(() => mockTestItem1); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.isTestItemValid(mockTestItem1, testControllerMock.object); + + assert.strictEqual(result, true); + }); + + test('should return false for item not in controller and no parent', () => { + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock.setup((x) => x.get(typemoq.It.isAny())).returns(() => undefined); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + + const result = testItemIndex.isTestItemValid(mockTestItem1, testControllerMock.object); + + assert.strictEqual(result, false); + }); + }); + + suite('cleanupStaleReferences', () => { + test('should remove items not in controller', () => { + const runId1 = 'test1'; + const runId2 = 'test2'; + const vsId1 = 'vs1'; + const vsId2 = 'vs2'; + + testItemIndex.registerTestItem(runId1, vsId1, mockTestItem1); + testItemIndex.registerTestItem(runId2, vsId2, mockTestItem2); + + // Mock validation: first item invalid, second valid + const isValidStub = sinon.stub(testItemIndex, 'isTestItemValid'); + isValidStub.onFirstCall().returns(false); // mockTestItem1 is invalid + isValidStub.onSecondCall().returns(true); // mockTestItem2 is valid + + testItemIndex.cleanupStaleReferences(testControllerMock.object); + + // First item should be removed + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId1), undefined); + assert.strictEqual(testItemIndex.runIdToVSidMap.get(runId1), undefined); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.get(vsId1), undefined); + + // Second item should remain + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId2), mockTestItem2); + assert.strictEqual(testItemIndex.runIdToVSidMap.get(runId2), vsId2); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.get(vsId2), runId2); + }); + + test('should keep all valid items', () => { + const runId1 = 'test1'; + const vsId1 = 'vs1'; + + testItemIndex.registerTestItem(runId1, vsId1, mockTestItem1); + + sinon.stub(testItemIndex, 'isTestItemValid').returns(true); + + testItemIndex.cleanupStaleReferences(testControllerMock.object); + + // Item should still be there + assert.strictEqual(testItemIndex.runIdToTestItemMap.get(runId1), mockTestItem1); + assert.strictEqual(testItemIndex.runIdToVSidMap.get(runId1), vsId1); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.get(vsId1), runId1); + }); + + test('should handle empty index', () => { + testItemIndex.cleanupStaleReferences(testControllerMock.object); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.size, 0); + }); + + test('should remove all items when all are invalid', () => { + testItemIndex.registerTestItem('test1', 'vs1', mockTestItem1); + testItemIndex.registerTestItem('test2', 'vs2', mockTestItem2); + + sinon.stub(testItemIndex, 'isTestItemValid').returns(false); + + testItemIndex.cleanupStaleReferences(testControllerMock.object); + + assert.strictEqual(testItemIndex.runIdToTestItemMap.size, 0); + assert.strictEqual(testItemIndex.runIdToVSidMap.size, 0); + assert.strictEqual(testItemIndex.vsIdToRunIdMap.size, 0); + }); + }); + + suite('Backward compatibility getters', () => { + test('runIdToTestItemMap should return the internal map', () => { + const runId = 'test1'; + testItemIndex.registerTestItem(runId, 'vs1', mockTestItem1); + + const map = testItemIndex.runIdToTestItemMap; + + assert.strictEqual(map.get(runId), mockTestItem1); + }); + + test('runIdToVSidMap should return the internal map', () => { + const runId = 'test1'; + const vsId = 'vs1'; + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + const map = testItemIndex.runIdToVSidMap; + + assert.strictEqual(map.get(runId), vsId); + }); + + test('vsIdToRunIdMap should return the internal map', () => { + const runId = 'test1'; + const vsId = 'vs1'; + testItemIndex.registerTestItem(runId, vsId, mockTestItem1); + + const map = testItemIndex.vsIdToRunIdMap; + + assert.strictEqual(map.get(vsId), runId); + }); + }); +}); + +function createMockTestItem(id: string, label: string): TestItem { + const range = new Range(0, 0, 0, 0); + const mockChildren = typemoq.Mock.ofType(); + mockChildren.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + + const mockTestItem = ({ + id, + label, + canResolveChildren: false, + tags: [], + children: mockChildren.object, + range, + uri: Uri.file('/foo/bar'), + parent: undefined, + } as unknown) as TestItem; + + return mockTestItem; +} diff --git a/src/test/testing/testController/resultResolver.unit.test.ts b/src/test/testing/testController/resultResolver.unit.test.ts index 05d2ee1dd0f3..e4b350a20750 100644 --- a/src/test/testing/testController/resultResolver.unit.test.ts +++ b/src/test/testing/testController/resultResolver.unit.test.ts @@ -93,12 +93,13 @@ suite('Result Resolver tests', () => { // assert the stub functions were called with the correct parameters // header of populateTestTree is (testController: TestController, testTreeData: DiscoveredTestNode, testRoot: TestItem | undefined, resultResolver: ITestResultResolver, token?: CancellationToken) + // After refactor, an inline object with testItemIndex maps is passed instead of resultResolver sinon.assert.calledWithMatch( populateTestTreeStub, testController, // testController tests, // testTreeData undefined, // testRoot - resultResolver, // resultResolver + sinon.match.has('runIdToTestItem'), // inline object with maps cancelationToken, // token ); }); @@ -182,12 +183,13 @@ suite('Result Resolver tests', () => { sinon.assert.calledWithMatch(createErrorTestItemStub, sinon.match.any, sinon.match.any); // also calls populateTestTree with the discovery test results + // After refactor, an inline object with testItemIndex maps is passed instead of resultResolver sinon.assert.calledWithMatch( populateTestTreeStub, testController, // testController tests, // testTreeData undefined, // testRoot - resultResolver, // resultResolver + sinon.match.has('runIdToTestItem'), // inline object with maps cancelationToken, // token ); }); @@ -327,6 +329,34 @@ suite('Result Resolver tests', () => { sinon.stub(testItemUtilities, 'clearAllChildren').callsFake(() => undefined); testProvider = 'unittest'; workspaceUri = Uri.file('/foo/bar'); + + // Create parent test item with correct ID + const mockParentItem = createMockTestItem('parentTest'); + + // Update testControllerMock to include parent item in its collection + const mockTestItems: [string, TestItem][] = [ + ['1', mockTestItem1], + ['2', mockTestItem2], + ['parentTest', mockParentItem], + ]; + const iterableMock = mockTestItems[Symbol.iterator](); + + const testItemCollectionMock = typemoq.Mock.ofType(); + testItemCollectionMock + .setup((x) => x.forEach(typemoq.It.isAny())) + .callback((callback) => { + let result = iterableMock.next(); + while (!result.done) { + callback(result.value[1]); + result = iterableMock.next(); + } + }) + .returns(() => mockTestItem1); + testItemCollectionMock.setup((x) => x.get('parentTest')).returns(() => mockParentItem); + + testControllerMock.reset(); + testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); + resultResolver = new ResultResolver.PythonResultResolver( testControllerMock.object, testProvider, @@ -334,13 +364,16 @@ suite('Result Resolver tests', () => { ); const subtestName = 'parentTest [subTest with spaces and [brackets]]'; const mockSubtestItem = createMockTestItem(subtestName); + // add a mock test item to the map of known VSCode ids to run ids resultResolver.runIdToVSid.set('mockTestItem2', 'mockTestItem2'); // creates a mock test item with a space which will be used to split the runId resultResolver.runIdToVSid.set(subtestName, subtestName); + // Register parent test in testItemIndex so it can be found by getTestItem + resultResolver.runIdToVSid.set('parentTest', 'parentTest'); // add this mock test to the map of known test items - resultResolver.runIdToTestItem.set('parentTest', mockTestItem2); + resultResolver.runIdToTestItem.set('parentTest', mockParentItem); resultResolver.runIdToTestItem.set(subtestName, mockSubtestItem); let generatedId: string | undefined; @@ -563,15 +596,15 @@ suite('Result Resolver tests', () => { function createMockTestItem(id: string): TestItem { const range = new Range(0, 0, 0, 0); + const mockChildren = typemoq.Mock.ofType(); + mockChildren.setup((x) => x.add(typemoq.It.isAny())).returns(() => undefined); + mockChildren.setup((x) => x.forEach(typemoq.It.isAny())).returns(() => undefined); + const mockTestItem = ({ id, canResolveChildren: false, tags: [], - children: { - add: () => { - // empty - }, - }, + children: mockChildren.object, range, uri: Uri.file('/foo/bar'), } as unknown) as TestItem; diff --git a/src/test/testing/testController/workspaceTestAdapter.unit.test.ts b/src/test/testing/testController/workspaceTestAdapter.unit.test.ts index aac07793ca66..6d2895ca2979 100644 --- a/src/test/testing/testController/workspaceTestAdapter.unit.test.ts +++ b/src/test/testing/testController/workspaceTestAdapter.unit.test.ts @@ -147,7 +147,7 @@ suite('Workspace test adapter', () => { const testProvider = 'unittest'; execFactory = typemoq.Mock.ofType(); - await workspaceTestAdapter.discoverTests(testController, undefined, execFactory.object); + await workspaceTestAdapter.discoverTests(testController, execFactory.object); sinon.assert.calledWithMatch(createErrorTestItemStub, sinon.match.any, sinon.match.any); sinon.assert.calledWithMatch(buildErrorNodeOptionsStub, uriFoo, sinon.match.any, testProvider); @@ -166,7 +166,7 @@ suite('Workspace test adapter', () => { stubResultResolver, ); - await workspaceTestAdapter.discoverTests(testController, undefined, execFactory.object); + await workspaceTestAdapter.discoverTests(testController, execFactory.object); sinon.assert.calledOnce(discoverTestsStub); }); @@ -193,8 +193,8 @@ suite('Workspace test adapter', () => { ); // Try running discovery twice - const one = workspaceTestAdapter.discoverTests(testController); - const two = workspaceTestAdapter.discoverTests(testController); + const one = workspaceTestAdapter.discoverTests(testController, execFactory.object); + const two = workspaceTestAdapter.discoverTests(testController, execFactory.object); Promise.all([one, two]); @@ -215,7 +215,7 @@ suite('Workspace test adapter', () => { stubResultResolver, ); - await workspaceTestAdapter.discoverTests(testController, undefined, execFactory.object); + await workspaceTestAdapter.discoverTests(testController, execFactory.object); sinon.assert.calledWith(sendTelemetryStub, EventName.UNITTEST_DISCOVERY_DONE); assert.strictEqual(telemetryEvent.length, 2); @@ -238,7 +238,7 @@ suite('Workspace test adapter', () => { stubResultResolver, ); - await workspaceTestAdapter.discoverTests(testController); + await workspaceTestAdapter.discoverTests(testController, execFactory.object); sinon.assert.calledWith(sendTelemetryStub, EventName.UNITTEST_DISCOVERY_DONE); assert.strictEqual(telemetryEvent.length, 2); @@ -256,6 +256,7 @@ suite('Workspace test adapter', () => { let testControllerMock: typemoq.IMock; let telemetryEvent: { eventName: EventName; properties: Record }[] = []; let resultResolver: ResultResolver.PythonResultResolver; + let execFactory: typemoq.IMock; // Stubbed test controller (see comment around L.40) let testController: TestController; @@ -328,6 +329,7 @@ suite('Workspace test adapter', () => { executionTestsStub = sandbox.stub(UnittestTestExecutionAdapter.prototype, 'runTests'); sendTelemetryStub = sandbox.stub(Telemetry, 'sendTelemetryEvent').callsFake(mockSendTelemetryEvent); + execFactory = typemoq.Mock.ofType(); runInstance = typemoq.Mock.ofType(); const testProvider = 'pytest'; @@ -384,7 +386,12 @@ suite('Workspace test adapter', () => { testControllerMock = typemoq.Mock.ofType(); testControllerMock.setup((t) => t.items).returns(() => testItemCollectionMock.object); - await workspaceTestAdapter.executeTests(testController, runInstance.object, [mockTestItem1, mockTestItem2]); + await workspaceTestAdapter.executeTests( + testController, + runInstance.object, + [mockTestItem1, mockTestItem2], + execFactory.object, + ); runInstance.verify((r) => r.started(typemoq.It.isAny()), typemoq.Times.exactly(2)); }); @@ -400,7 +407,7 @@ suite('Workspace test adapter', () => { stubResultResolver, ); - await workspaceTestAdapter.executeTests(testController, runInstance.object, []); + await workspaceTestAdapter.executeTests(testController, runInstance.object, [], execFactory.object); sinon.assert.calledOnce(executionTestsStub); }); @@ -427,8 +434,8 @@ suite('Workspace test adapter', () => { ); // Try running discovery twice - const one = workspaceTestAdapter.executeTests(testController, runInstance.object, []); - const two = workspaceTestAdapter.executeTests(testController, runInstance.object, []); + const one = workspaceTestAdapter.executeTests(testController, runInstance.object, [], execFactory.object); + const two = workspaceTestAdapter.executeTests(testController, runInstance.object, [], execFactory.object); Promise.all([one, two]); @@ -467,7 +474,7 @@ suite('Workspace test adapter', () => { const buildErrorNodeOptionsStub = sinon.stub(util, 'buildErrorNodeOptions').returns(errorTestItemOptions); const testProvider = 'unittest'; - await workspaceTestAdapter.executeTests(testController, runInstance.object, []); + await workspaceTestAdapter.executeTests(testController, runInstance.object, [], execFactory.object); sinon.assert.calledWithMatch(createErrorTestItemStub, sinon.match.any, sinon.match.any); sinon.assert.calledWithMatch(buildErrorNodeOptionsStub, Uri.parse('foo'), sinon.match.any, testProvider); @@ -487,7 +494,7 @@ suite('Workspace test adapter', () => { stubResultResolver, ); - await workspaceTestAdapter.executeTests(testController, runInstance.object, []); + await workspaceTestAdapter.executeTests(testController, runInstance.object, [], execFactory.object); sinon.assert.calledWith(sendTelemetryStub, EventName.UNITTEST_RUN_ALL_FAILED); assert.strictEqual(telemetryEvent.length, 1); diff --git a/src/test/vscode-mock.ts b/src/test/vscode-mock.ts index 0605b1718166..3e2816afbbde 100644 --- a/src/test/vscode-mock.ts +++ b/src/test/vscode-mock.ts @@ -134,3 +134,17 @@ mockedVSCode.LogLevel = vscodeMocks.LogLevel; (mockedVSCode as any).CancellationError = vscodeMocks.vscMockExtHostedTypes.CancellationError; (mockedVSCode as any).LSPCancellationError = vscodeMocks.vscMockExtHostedTypes.LSPCancellationError; mockedVSCode.TestRunProfileKind = vscodeMocks.TestRunProfileKind; +(mockedVSCode as any).TestCoverageCount = class TestCoverageCount { + constructor(public covered: number, public total: number) {} +}; +(mockedVSCode as any).FileCoverage = class FileCoverage { + constructor( + public uri: any, + public statementCoverage: any, + public branchCoverage?: any, + public declarationCoverage?: any, + ) {} +}; +(mockedVSCode as any).StatementCoverage = class StatementCoverage { + constructor(public executed: number | boolean, public location: any, public branches?: any) {} +};