From c76b319e4605333a532bfadd604187e5414e98d8 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:07:28 +0200 Subject: [PATCH 01/18] chore: expose schema_cache & file_context in lint rules --- Cargo.lock | 3 + crates/pgt_analyse/Cargo.toml | 1 + .../pgt_analyse/src/analysed_file_context.rs | 7 ++ crates/pgt_analyse/src/lib.rs | 4 +- crates/pgt_analyse/src/registry.rs | 5 +- crates/pgt_analyse/src/rule.rs | 9 +- crates/pgt_analyser/Cargo.toml | 2 + crates/pgt_analyser/src/lib.rs | 57 ++++++++--- .../src/lint/safety/adding_required_field.rs | 11 ++- .../src/lint/safety/ban_drop_column.rs | 11 ++- .../src/lint/safety/ban_drop_database.rs | 11 ++- .../src/lint/safety/ban_drop_not_null.rs | 11 ++- .../src/lint/safety/ban_drop_table.rs | 11 ++- .../src/lint/safety/ban_truncate_cascade.rs | 11 ++- crates/pgt_analyser/tests/rules_tests.rs | 12 ++- crates/pgt_query_ext/src/diagnostics.rs | 2 +- crates/pgt_workspace/src/workspace/server.rs | 99 +++++++++++-------- .../src/workspace/server/parsed_document.rs | 27 +++++ docs/codegen/src/rules_docs.rs | 14 ++- .../codegen/src/generate_new_analyser_rule.rs | 9 +- xtask/rules_check/src/lib.rs | 14 ++- 21 files changed, 244 insertions(+), 87 deletions(-) create mode 100644 crates/pgt_analyse/src/analysed_file_context.rs diff --git a/Cargo.lock b/Cargo.lock index ce816de89..83d3fbf87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2579,6 +2579,7 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_query_ext", + "pgt_schema_cache", "pgt_text_size", "rustc-hash 2.1.0", "schemars", @@ -2594,7 +2595,9 @@ dependencies = [ "pgt_console", "pgt_diagnostics", "pgt_query_ext", + "pgt_schema_cache", "pgt_test_macros", + "pgt_text_size", "serde", "termcolor", ] diff --git a/crates/pgt_analyse/Cargo.toml b/crates/pgt_analyse/Cargo.toml index 75eb0211c..c2d890564 100644 --- a/crates/pgt_analyse/Cargo.toml +++ b/crates/pgt_analyse/Cargo.toml @@ -16,6 +16,7 @@ version = "0.0.0" pgt_console.workspace = true pgt_diagnostics.workspace = true pgt_query_ext.workspace = true +pgt_schema_cache.workspace = true rustc-hash = { workspace = true } biome_deserialize = { workspace = true, optional = true } diff --git a/crates/pgt_analyse/src/analysed_file_context.rs b/crates/pgt_analyse/src/analysed_file_context.rs new file mode 100644 index 000000000..cba53eebe --- /dev/null +++ b/crates/pgt_analyse/src/analysed_file_context.rs @@ -0,0 +1,7 @@ +#[derive(Default)] +pub struct AnalysedFileContext {} + +impl AnalysedFileContext { + #[allow(unused)] + pub fn update_from(&mut self, stmt_root: &pgt_query_ext::NodeEnum) {} +} diff --git a/crates/pgt_analyse/src/lib.rs b/crates/pgt_analyse/src/lib.rs index f312de45a..bc4ea096a 100644 --- a/crates/pgt_analyse/src/lib.rs +++ b/crates/pgt_analyse/src/lib.rs @@ -1,14 +1,16 @@ +mod analysed_file_context; mod categories; pub mod context; mod filter; pub mod macros; pub mod options; mod registry; -mod rule; +pub mod rule; // Re-exported for use in the `declare_group` macro pub use pgt_diagnostics::category_concat; +pub use crate::analysed_file_context::AnalysedFileContext; pub use crate::categories::{ ActionCategory, RefactorKind, RuleCategories, RuleCategoriesBuilder, RuleCategory, SUPPRESSION_ACTION_CATEGORY, SourceActionKind, diff --git a/crates/pgt_analyse/src/registry.rs b/crates/pgt_analyse/src/registry.rs index 48b73b154..058f6dc3a 100644 --- a/crates/pgt_analyse/src/registry.rs +++ b/crates/pgt_analyse/src/registry.rs @@ -2,6 +2,7 @@ use std::{borrow, collections::BTreeSet}; use crate::{ AnalyserOptions, + analysed_file_context::AnalysedFileContext, context::RuleContext, filter::{AnalysisFilter, GroupKey, RuleKey}, rule::{GroupCategory, Rule, RuleDiagnostic, RuleGroup}, @@ -158,6 +159,8 @@ impl RuleRegistry { pub struct RegistryRuleParams<'a> { pub root: &'a pgt_query_ext::NodeEnum, pub options: &'a AnalyserOptions, + pub analysed_file_context: &'a AnalysedFileContext, + pub schema_cache: Option<&'a pgt_schema_cache::SchemaCache>, } /// Executor for rule as a generic function pointer @@ -175,7 +178,7 @@ impl RegistryRule { { let options = params.options.rule_options::().unwrap_or_default(); let ctx = RuleContext::new(params.root, &options); - R::run(&ctx) + R::run(&ctx, params.analysed_file_context, params.schema_cache) } Self { run: run:: } diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index fae3cda36..3648bb099 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -5,10 +5,12 @@ use pgt_diagnostics::{ Advices, Category, Diagnostic, DiagnosticTags, Location, LogCategory, MessageAndDescription, Severity, Visit, }; +use pgt_schema_cache::SchemaCache; use pgt_text_size::TextRange; use std::cmp::Ordering; use std::fmt::Debug; +use crate::analysed_file_context::AnalysedFileContext; use crate::{categories::RuleCategory, context::RuleContext, registry::RegistryVisitor}; #[derive(Clone, Debug)] @@ -102,7 +104,12 @@ pub trait GroupCategory { pub trait Rule: RuleMeta + Sized { type Options: Default + Clone + Debug; - fn run(ctx: &RuleContext) -> Vec; + /// `schema_cache` will only be available if the user has a working database connection. + fn run( + rule_context: &RuleContext, + file_context: &AnalysedFileContext, + schema_cache: Option<&SchemaCache>, + ) -> Vec; } /// Diagnostic object returned by a single analysis rule diff --git a/crates/pgt_analyser/Cargo.toml b/crates/pgt_analyser/Cargo.toml index e77aaa4f8..0471d720b 100644 --- a/crates/pgt_analyser/Cargo.toml +++ b/crates/pgt_analyser/Cargo.toml @@ -15,7 +15,9 @@ version = "0.0.0" pgt_analyse = { workspace = true } pgt_console = { workspace = true } pgt_diagnostics = { workspace = true } +pgt_text_size = { workspace = true } pgt_query_ext = { workspace = true } +pgt_schema_cache = { workspace = true } serde = { workspace = true } [dev-dependencies] diff --git a/crates/pgt_analyser/src/lib.rs b/crates/pgt_analyser/src/lib.rs index 248fe22be..4c6576ce3 100644 --- a/crates/pgt_analyser/src/lib.rs +++ b/crates/pgt_analyser/src/lib.rs @@ -1,8 +1,8 @@ use std::{ops::Deref, sync::LazyLock}; use pgt_analyse::{ - AnalyserOptions, AnalysisFilter, MetadataRegistry, RegistryRuleParams, RuleDiagnostic, - RuleRegistry, + AnalysedFileContext, AnalyserOptions, AnalysisFilter, MetadataRegistry, RegistryRuleParams, + RuleDiagnostic, RuleRegistry, }; pub use registry::visit_registry; @@ -30,8 +30,15 @@ pub struct Analyser<'a> { registry: RuleRegistry, } -pub struct AnalyserContext<'a> { - pub root: &'a pgt_query_ext::NodeEnum, +#[derive(Debug)] +pub struct AnalysableStatement { + pub root: pgt_query_ext::NodeEnum, + pub range: pgt_text_size::TextRange, +} + +pub struct AnalyserParams<'a> { + pub stmts: Vec, + pub schema_cache: Option<&'a pgt_schema_cache::SchemaCache>, } pub struct AnalyserConfig<'a> { @@ -52,17 +59,30 @@ impl<'a> Analyser<'a> { } } - pub fn run(&self, ctx: AnalyserContext) -> Vec { - let params = RegistryRuleParams { - root: ctx.root, - options: self.options, - }; + pub fn run(&self, params: AnalyserParams) -> Vec { + let mut diagnostics = vec![]; + + let mut file_context = AnalysedFileContext::default(); + + for stmt in params.stmts { + let rule_params = RegistryRuleParams { + root: &stmt.root, + options: self.options, + analysed_file_context: &file_context, + schema_cache: params.schema_cache, + }; - self.registry - .rules - .iter() - .flat_map(|rule| (rule.run)(¶ms)) - .collect::>() + diagnostics.extend( + self.registry + .rules + .iter() + .flat_map(|rule| (rule.run)(&rule_params)), + ); + + file_context.update_from(&stmt.root); + } + + diagnostics } } @@ -77,9 +97,10 @@ mod tests { markup, }; use pgt_diagnostics::PrintDiagnostic; + use pgt_text_size::TextRange; use termcolor::NoColor; - use crate::Analyser; + use crate::{AnalysableStatement, Analyser}; #[ignore] #[test] @@ -102,6 +123,7 @@ mod tests { }; let ast = pgt_query_ext::parse(SQL).expect("failed to parse SQL"); + let range = TextRange::new(0.into(), u32::try_from(SQL.len()).unwrap().into()); let options = AnalyserOptions::default(); @@ -110,7 +132,10 @@ mod tests { filter, }); - let results = analyser.run(crate::AnalyserContext { root: &ast }); + let results = analyser.run(crate::AnalyserParams { + stmts: vec![AnalysableStatement { root: ast, range }], + schema_cache: None, + }); println!("*******************"); for result in &results { diff --git a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs index 069019528..0da9d3398 100644 --- a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs +++ b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs @@ -1,6 +1,9 @@ -use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. @@ -27,7 +30,11 @@ declare_lint_rule! { impl Rule for AddingRequiredField { type Options = (); - fn run(ctx: &RuleContext) -> Vec { + fn run( + ctx: &RuleContext, + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec { let mut diagnostics = vec![]; if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs index 165d4230f..d008fe5ea 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs @@ -1,6 +1,9 @@ -use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a column may break existing clients. @@ -29,7 +32,11 @@ declare_lint_rule! { impl Rule for BanDropColumn { type Options = (); - fn run(ctx: &RuleContext) -> Vec { + fn run( + ctx: &RuleContext, + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec { let mut diagnostics = Vec::new(); if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs index 11d07da9a..6827b177b 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs @@ -1,6 +1,9 @@ -use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a database may break existing clients (and everything else, really). @@ -18,7 +21,11 @@ declare_lint_rule! { impl Rule for BanDropDatabase { type Options = (); - fn run(ctx: &RuleContext) -> Vec { + fn run( + ctx: &RuleContext, + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec { let mut diagnostics = vec![]; if let pgt_query_ext::NodeEnum::DropdbStmt(_) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs index fa4c90115..178c45ec1 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs @@ -1,6 +1,9 @@ -use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a NOT NULL constraint may break existing clients. @@ -29,7 +32,11 @@ declare_lint_rule! { impl Rule for BanDropNotNull { type Options = (); - fn run(ctx: &RuleContext) -> Vec { + fn run( + ctx: &RuleContext, + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec { let mut diagnostics = Vec::new(); if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs index 90c08514f..1899500d5 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs @@ -1,6 +1,9 @@ -use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a table may break existing clients. @@ -28,7 +31,11 @@ declare_lint_rule! { impl Rule for BanDropTable { type Options = (); - fn run(ctx: &RuleContext) -> Vec { + fn run( + ctx: &RuleContext, + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec { let mut diagnostics = vec![]; if let pgt_query_ext::NodeEnum::DropStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs index cef5cd47b..dfaa1fa20 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs @@ -1,7 +1,10 @@ -use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; +use pgt_analyse::{ + AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, +}; use pgt_console::markup; use pgt_diagnostics::Severity; use pgt_query_ext::protobuf::DropBehavior; +use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. @@ -31,7 +34,11 @@ declare_lint_rule! { impl Rule for BanTruncateCascade { type Options = (); - fn run(ctx: &RuleContext) -> Vec { + fn run( + ctx: &RuleContext, + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec { let mut diagnostics = Vec::new(); if let pgt_query_ext::NodeEnum::TruncateStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/tests/rules_tests.rs b/crates/pgt_analyser/tests/rules_tests.rs index 247c02b0d..0a6b47ec5 100644 --- a/crates/pgt_analyser/tests/rules_tests.rs +++ b/crates/pgt_analyser/tests/rules_tests.rs @@ -2,7 +2,7 @@ use core::slice; use std::{fmt::Write, fs::read_to_string, path::Path}; use pgt_analyse::{AnalyserOptions, AnalysisFilter, RuleDiagnostic, RuleFilter}; -use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig, AnalyserParams}; use pgt_console::StdDisplay; use pgt_diagnostics::PrintDiagnostic; @@ -32,7 +32,15 @@ fn rule_test(full_path: &'static str, _: &str, _: &str) { filter, }); - let results = analyser.run(AnalyserContext { root: &ast }); + let stmt = AnalysableStatement { + root: ast, + range: pgt_text_size::TextRange::new(0.into(), u32::try_from(query.len()).unwrap().into()), + }; + + let results = analyser.run(AnalyserParams { + stmts: vec![stmt], + schema_cache: None, + }); let mut snapshot = String::new(); write_snapshot(&mut snapshot, query.as_str(), results.as_slice()); diff --git a/crates/pgt_query_ext/src/diagnostics.rs b/crates/pgt_query_ext/src/diagnostics.rs index aa16db813..c5584b21f 100644 --- a/crates/pgt_query_ext/src/diagnostics.rs +++ b/crates/pgt_query_ext/src/diagnostics.rs @@ -9,7 +9,7 @@ use pgt_text_size::TextRange; pub struct SyntaxDiagnostic { /// The location where the error is occurred #[location(span)] - span: Option, + pub span: Option, #[message] #[description] pub message: MessageAndDescription, diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 5f2a9ebf9..5cd5e3731 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -13,14 +13,15 @@ use document::Document; use futures::{StreamExt, stream}; use parsed_document::{ AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, ExecuteStatementMapper, - ParsedDocument, SyncDiagnosticsMapper, + ParsedDocument, }; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; -use pgt_analyser::{Analyser, AnalyserConfig, AnalyserContext}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig, AnalyserParams}; use pgt_diagnostics::{ Diagnostic, DiagnosticExt, Error, Severity, serde::Diagnostic as SDiagnostic, }; use pgt_fs::{ConfigName, PgTPath}; +use pgt_query_ext::diagnostics::SyntaxDiagnostic; use pgt_typecheck::{IdentifierType, TypecheckParams, TypedIdentifier}; use schema_cache_manager::SchemaCacheManager; use sqlx::{Executor, PgPool}; @@ -38,6 +39,7 @@ use crate::{ diagnostics::{PullDiagnosticsParams, PullDiagnosticsResult}, }, settings::{WorkspaceSettings, WorkspaceSettingsHandle, WorkspaceSettingsHandleMut}, + workspace::LintDiagnosticsMapper, }; use super::{ @@ -527,48 +529,59 @@ impl Workspace for WorkspaceServer { filter, }); - diagnostics.extend(parser.iter(SyncDiagnosticsMapper).flat_map( - |(_id, range, ast, diag)| { - let mut errors: Vec = vec![]; - - if let Some(diag) = diag { - errors.push(diag.into()); - } - - if let Some(ast) = ast { - errors.extend( - analyser - .run(AnalyserContext { root: &ast }) - .into_iter() - .map(Error::from) - .collect::>(), - ); - } + let (analysable_stmts, syntax_diagnostics): ( + Vec>, + Vec>, + ) = parser.iter(LintDiagnosticsMapper).partition(Result::is_ok); + + let analysable_stmts = analysable_stmts.into_iter().map(Result::unwrap).collect(); + + let schema_cache = self + .get_current_connection() + .and_then(|pool| self.schema_cache.load(pool.clone()).ok()); + + let path = params.path.as_path().display().to_string(); + + diagnostics.extend( + syntax_diagnostics + .into_iter() + .map(Result::unwrap_err) + .map(|diag| { + let span = diag.span.unwrap(); + SDiagnostic::new( + diag.with_file_path(path.clone()) + .with_file_span(span) + .with_severity(Severity::Error), + ) + }), + ); - errors - .into_iter() - .map(|d| { - let severity = d - .category() - .filter(|category| category.name().starts_with("lint/")) - .map_or_else( - || d.severity(), - |category| { - settings - .get_severity_from_rule_code(category) - .unwrap_or(Severity::Warning) - }, - ); - - SDiagnostic::new( - d.with_file_path(params.path.as_path().display().to_string()) - .with_file_span(range) - .with_severity(severity), - ) - }) - .collect::>() - }, - )); + diagnostics.extend( + analyser + .run(AnalyserParams { + stmts: analysable_stmts, + schema_cache: schema_cache.as_deref(), + }) + .into_iter() + .map(Error::from) + .map(|d| { + let severity = d + .category() + .map(|category| { + settings + .get_severity_from_rule_code(category) + .unwrap_or(Severity::Warning) + }) + .unwrap(); + + let span = d.location().span; + SDiagnostic::new( + d.with_file_path(path.clone()) + .with_file_span(span) + .with_severity(severity), + ) + }), + ); let suppressions = parser.document_suppressions(); diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs index 8b5151286..82346df56 100644 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ b/crates/pgt_workspace/src/workspace/server/parsed_document.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use pgt_analyser::AnalysableStatement; use pgt_diagnostics::serde::Diagnostic as SDiagnostic; use pgt_fs::PgTPath; use pgt_query_ext::diagnostics::SyntaxDiagnostic; @@ -313,6 +314,32 @@ impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { } } +pub struct LintDiagnosticsMapper; +impl<'a> StatementMapper<'a> for LintDiagnosticsMapper { + type Output = Result; + + fn map( + &self, + parser: &'a ParsedDocument, + id: StatementId, + range: TextRange, + content: &str, + ) -> Self::Output { + let maybe_node = parser.ast_db.get_or_cache_ast(&id, content); + + match maybe_node.as_ref() { + Ok(node) => Ok(AnalysableStatement { + range, + root: node.clone(), + }), + Err(diag) => Err(SyntaxDiagnostic { + message: diag.message.clone(), + span: Some(range), + }), + } + } +} + pub struct SyncDiagnosticsMapper; impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { type Output = ( diff --git a/docs/codegen/src/rules_docs.rs b/docs/codegen/src/rules_docs.rs index 68db53dbb..1d4b86a9e 100644 --- a/docs/codegen/src/rules_docs.rs +++ b/docs/codegen/src/rules_docs.rs @@ -1,7 +1,7 @@ use anyhow::{Result, bail}; use biome_string_case::Case; use pgt_analyse::{AnalyserOptions, AnalysisFilter, RuleFilter, RuleMetadata}; -use pgt_analyser::{Analyser, AnalyserConfig}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig}; use pgt_console::StdDisplay; use pgt_diagnostics::{Diagnostic, DiagnosticExt, PrintDiagnostic}; use pgt_query_ext::diagnostics::SyntaxDiagnostic; @@ -443,10 +443,16 @@ fn print_diagnostics( // split and parse each statement let stmts = pgt_statement_splitter::split(code); - for stmt in stmts.ranges { - match pgt_query_ext::parse(&code[stmt]) { + for stmt_range in stmts.ranges { + match pgt_query_ext::parse(&code[stmt_range]) { Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserContext { root: &ast }) { + for rule_diag in analyser.run(pgt_analyser::AnalyserParams { + schema_cache: None, + stmts: vec![AnalysableStatement { + range: stmt_range, + root: ast, + }], + }) { let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); let category = diag.category().expect("linter diagnostic has no code"); diff --git a/xtask/codegen/src/generate_new_analyser_rule.rs b/xtask/codegen/src/generate_new_analyser_rule.rs index fc2257121..343b06731 100644 --- a/xtask/codegen/src/generate_new_analyser_rule.rs +++ b/xtask/codegen/src/generate_new_analyser_rule.rs @@ -41,10 +41,11 @@ fn generate_rule_template( format!( r#"use pgt_analyse::{{ - context::RuleContext, {macro_name}, Rule, RuleDiagnostic + AnalysedFileContext, context::RuleContext, {macro_name}, Rule, RuleDiagnostic, }}; use pgt_console::markup; use pgt_diagnostics::Severity; +use pgt_schema_cache::SchemaCache; {macro_name}! {{ /// Succinct description of the rule. @@ -78,7 +79,11 @@ use pgt_diagnostics::Severity; impl Rule for {rule_name_upper_camel} {{ type Options = (); - fn run(ctx: &RuleContext) -> Vec {{ + fn run( + ctx: &RuleContext + _file_context: &AnalysedFileContext, + _schema_cache: Option<&SchemaCache>, + ) -> Vec {{ Vec::new() }} }} diff --git a/xtask/rules_check/src/lib.rs b/xtask/rules_check/src/lib.rs index da4b4c73a..0c57d06f0 100644 --- a/xtask/rules_check/src/lib.rs +++ b/xtask/rules_check/src/lib.rs @@ -7,7 +7,7 @@ use pgt_analyse::{ AnalyserOptions, AnalysisFilter, GroupCategory, RegistryVisitor, Rule, RuleCategory, RuleFilter, RuleGroup, RuleMetadata, }; -use pgt_analyser::{Analyser, AnalyserConfig}; +use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig}; use pgt_console::{markup, Console}; use pgt_diagnostics::{Diagnostic, DiagnosticExt, PrintDiagnostic}; use pgt_query_ext::diagnostics::SyntaxDiagnostic; @@ -127,10 +127,16 @@ fn assert_lint( }); let result = pgt_statement_splitter::split(code); - for stmt in result.ranges { - match pgt_query_ext::parse(&code[stmt]) { + for stmt_range in result.ranges { + match pgt_query_ext::parse(&code[stmt_range]) { Ok(ast) => { - for rule_diag in analyser.run(pgt_analyser::AnalyserContext { root: &ast }) { + for rule_diag in analyser.run(pgt_analyser::AnalyserParams { + schema_cache: None, + stmts: vec![AnalysableStatement { + range: stmt_range, + root: ast, + }], + }) { let diag = pgt_diagnostics::serde::Diagnostic::new(rule_diag); let category = diag.category().expect("linter diagnostic has no code"); From 597173f48e22581e3d9c2bc82c93e85e46c8c0ae Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:12:11 +0200 Subject: [PATCH 02/18] do not expose --- crates/pgt_analyse/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/pgt_analyse/src/lib.rs b/crates/pgt_analyse/src/lib.rs index bc4ea096a..1d4ec6aee 100644 --- a/crates/pgt_analyse/src/lib.rs +++ b/crates/pgt_analyse/src/lib.rs @@ -5,7 +5,7 @@ mod filter; pub mod macros; pub mod options; mod registry; -pub mod rule; +mod rule; // Re-exported for use in the `declare_group` macro pub use pgt_diagnostics::category_concat; From 8aaa5f5535d7d867fdd9f84fe3b67ad9e0470cd8 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:13:37 +0200 Subject: [PATCH 03/18] cleanup --- crates/pgt_workspace/src/workspace/server.rs | 1 - .../src/workspace/server/document.rs | 32 ++----------------- 2 files changed, 3 insertions(+), 30 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 938672a9e..5794d07dc 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -11,7 +11,6 @@ use async_helper::run_async; use connection_manager::ConnectionManager; use document::{ AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, - SyncDiagnosticsMapper, }; use futures::{StreamExt, stream}; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 170b44081..39abfe44a 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use pgt_analyser::AnalysableStatement; use pgt_diagnostics::{Diagnostic, DiagnosticExt, serde::Diagnostic as SDiagnostic}; use pgt_query_ext::diagnostics::SyntaxDiagnostic; use pgt_suppressions::Suppressions; @@ -244,14 +245,8 @@ pub struct LintDiagnosticsMapper; impl<'a> StatementMapper<'a> for LintDiagnosticsMapper { type Output = Result; - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let maybe_node = parser.ast_db.get_or_cache_ast(&id, content); + fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { + let maybe_node = parser.ast_db.get_or_cache_ast(&id); match maybe_node.as_ref() { Ok(node) => Ok(AnalysableStatement { @@ -266,27 +261,6 @@ impl<'a> StatementMapper<'a> for LintDiagnosticsMapper { } } -pub struct SyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { - type Output = ( - StatementId, - TextRange, - Option, - Option, - ); - - fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { - let ast_result = parser.ast_db.get_or_cache_ast(&id); - - let (ast_option, diagnostics) = match &*ast_result { - Ok(node) => (Some(node.clone()), None), - Err(diag) => (None, Some(diag.clone())), - }; - - (id.clone(), range, ast_option, diagnostics) - } -} - pub struct GetCompletionsMapper; impl<'a> StatementMapper<'a> for GetCompletionsMapper { type Output = (StatementId, TextRange, String, Arc); From 55d4c1657aba7740ef6b085190555e8268580695 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:14:20 +0200 Subject: [PATCH 04/18] update --- packages/@postgrestools/backend-jsonrpc/src/workspace.ts | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index 72b77bc1d..971f07ec9 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -435,17 +435,10 @@ export interface OpenFileParams { version: number; } export interface ChangeFileParams { - changes: ChangeParams[]; + content: string; path: PgTPath; version: number; } -export interface ChangeParams { - /** - * The range of the file that changed. If `None`, the whole file changed. - */ - range?: TextRange; - text: string; -} export interface CloseFileParams { path: PgTPath; } From 8c9aa055b07f0a6ae65c1123fab078083a62ec45 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:15:43 +0200 Subject: [PATCH 05/18] format --- crates/pgt_analyse/Cargo.toml | 8 ++++---- crates/pgt_analyser/Cargo.toml | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/pgt_analyse/Cargo.toml b/crates/pgt_analyse/Cargo.toml index c2d890564..4d30784c8 100644 --- a/crates/pgt_analyse/Cargo.toml +++ b/crates/pgt_analyse/Cargo.toml @@ -13,11 +13,11 @@ version = "0.0.0" [dependencies] -pgt_console.workspace = true -pgt_diagnostics.workspace = true -pgt_query_ext.workspace = true +pgt_console.workspace = true +pgt_diagnostics.workspace = true +pgt_query_ext.workspace = true pgt_schema_cache.workspace = true -rustc-hash = { workspace = true } +rustc-hash = { workspace = true } biome_deserialize = { workspace = true, optional = true } biome_deserialize_macros = { workspace = true, optional = true } diff --git a/crates/pgt_analyser/Cargo.toml b/crates/pgt_analyser/Cargo.toml index 0471d720b..5f65b9787 100644 --- a/crates/pgt_analyser/Cargo.toml +++ b/crates/pgt_analyser/Cargo.toml @@ -12,13 +12,13 @@ repository.workspace = true version = "0.0.0" [dependencies] -pgt_analyse = { workspace = true } -pgt_console = { workspace = true } -pgt_diagnostics = { workspace = true } -pgt_text_size = { workspace = true } -pgt_query_ext = { workspace = true } -pgt_schema_cache = { workspace = true } -serde = { workspace = true } +pgt_analyse = { workspace = true } +pgt_console = { workspace = true } +pgt_diagnostics = { workspace = true } +pgt_query_ext = { workspace = true } +pgt_schema_cache = { workspace = true } +pgt_text_size = { workspace = true } +serde = { workspace = true } [dev-dependencies] insta = { version = "1.42.1" } From 83005a10a9ff7e48208a38a46104c611b767e9c0 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:19:19 +0200 Subject: [PATCH 06/18] hell no --- .../src/workspace/server/parsed_document.rs | 474 ------------------ 1 file changed, 474 deletions(-) delete mode 100644 crates/pgt_workspace/src/workspace/server/parsed_document.rs diff --git a/crates/pgt_workspace/src/workspace/server/parsed_document.rs b/crates/pgt_workspace/src/workspace/server/parsed_document.rs deleted file mode 100644 index 82346df56..000000000 --- a/crates/pgt_workspace/src/workspace/server/parsed_document.rs +++ /dev/null @@ -1,474 +0,0 @@ -use std::sync::Arc; - -use pgt_analyser::AnalysableStatement; -use pgt_diagnostics::serde::Diagnostic as SDiagnostic; -use pgt_fs::PgTPath; -use pgt_query_ext::diagnostics::SyntaxDiagnostic; -use pgt_suppressions::Suppressions; -use pgt_text_size::{TextRange, TextSize}; - -use crate::workspace::ChangeFileParams; - -use super::{ - annotation::AnnotationStore, - change::StatementChange, - document::{Document, StatementIterator}, - pg_query::PgQueryStore, - sql_function::{SQLFunctionSignature, get_sql_fn_body, get_sql_fn_signature}, - statement_identifier::StatementId, - tree_sitter::TreeSitterStore, -}; - -pub struct ParsedDocument { - #[allow(dead_code)] - path: PgTPath, - - doc: Document, - ast_db: PgQueryStore, - cst_db: TreeSitterStore, - annotation_db: AnnotationStore, -} - -impl ParsedDocument { - pub fn new(path: PgTPath, content: String, version: i32) -> ParsedDocument { - let doc = Document::new(content, version); - - let cst_db = TreeSitterStore::new(); - let ast_db = PgQueryStore::new(); - let annotation_db = AnnotationStore::new(); - - doc.iter().for_each(|(stmt, _, content)| { - cst_db.add_statement(&stmt, content); - }); - - ParsedDocument { - path, - doc, - ast_db, - cst_db, - annotation_db, - } - } - - /// Applies a change to the document and updates the CST and AST databases accordingly. - /// - /// Note that only tree-sitter cares about statement modifications vs remove + add. - /// Hence, we just clear the AST for the old statements and lazily load them when requested. - /// - /// * `params`: ChangeFileParams - The parameters for the change to be applied. - pub fn apply_change(&mut self, params: ChangeFileParams) { - for c in &self.doc.apply_file_change(¶ms) { - match c { - StatementChange::Added(added) => { - tracing::debug!( - "Adding statement: id:{:?}, text:{:?}", - added.stmt, - added.text - ); - self.cst_db.add_statement(&added.stmt, &added.text); - } - StatementChange::Deleted(s) => { - tracing::debug!("Deleting statement: id {:?}", s,); - self.cst_db.remove_statement(s); - self.ast_db.clear_statement(s); - self.annotation_db.clear_statement(s); - } - StatementChange::Modified(s) => { - tracing::debug!( - "Modifying statement with id {:?} (new id {:?}). Range {:?}, Changed from '{:?}' to '{:?}', changed text: {:?}", - s.old_stmt, - s.new_stmt, - s.change_range, - s.old_stmt_text, - s.new_stmt_text, - s.change_text - ); - - self.cst_db.modify_statement(s); - self.ast_db.clear_statement(&s.old_stmt); - self.annotation_db.clear_statement(&s.old_stmt); - } - } - } - } - - pub fn get_document_content(&self) -> &str { - &self.doc.content - } - - pub fn document_diagnostics(&self) -> &Vec { - &self.doc.diagnostics - } - - pub fn document_suppressions(&self) -> &Suppressions { - &self.doc.suppressions - } - - pub fn find<'a, M>(&'a self, id: StatementId, mapper: M) -> Option - where - M: StatementMapper<'a>, - { - self.iter_with_filter(mapper, IdFilter::new(id)).next() - } - - pub fn iter<'a, M>(&'a self, mapper: M) -> ParseIterator<'a, M, NoFilter> - where - M: StatementMapper<'a>, - { - self.iter_with_filter(mapper, NoFilter) - } - - pub fn iter_with_filter<'a, M, F>(&'a self, mapper: M, filter: F) -> ParseIterator<'a, M, F> - where - M: StatementMapper<'a>, - F: StatementFilter<'a>, - { - ParseIterator::new(self, mapper, filter) - } - - #[allow(dead_code)] - pub fn count(&self) -> usize { - self.iter(DefaultMapper).count() - } -} - -pub trait StatementMapper<'a> { - type Output; - - fn map( - &self, - parsed: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output; -} - -pub trait StatementFilter<'a> { - fn predicate(&self, id: &StatementId, range: &TextRange, content: &str) -> bool; -} - -pub struct ParseIterator<'a, M, F> { - parser: &'a ParsedDocument, - statements: StatementIterator<'a>, - mapper: M, - filter: F, - pending_sub_statements: Vec<(StatementId, TextRange, String)>, -} - -impl<'a, M, F> ParseIterator<'a, M, F> { - pub fn new(parser: &'a ParsedDocument, mapper: M, filter: F) -> Self { - Self { - parser, - statements: parser.doc.iter(), - mapper, - filter, - pending_sub_statements: Vec::new(), - } - } -} - -impl<'a, M, F> Iterator for ParseIterator<'a, M, F> -where - M: StatementMapper<'a>, - F: StatementFilter<'a>, -{ - type Item = M::Output; - - fn next(&mut self) -> Option { - // First check if we have any pending sub-statements to process - if let Some((id, range, content)) = self.pending_sub_statements.pop() { - if self.filter.predicate(&id, &range, content.as_str()) { - return Some(self.mapper.map(self.parser, id, range, &content)); - } - // If the sub-statement doesn't pass the filter, continue to the next item - return self.next(); - } - - // Process the next top-level statement - let next_statement = self.statements.next(); - - if let Some((root_id, range, content)) = next_statement { - // If we should include sub-statements and this statement has an AST - let content_owned = content.to_string(); - if let Ok(ast) = self - .parser - .ast_db - .get_or_cache_ast(&root_id, &content_owned) - .as_ref() - { - // Check if this is a SQL function definition with a body - if let Some(sub_statement) = get_sql_fn_body(ast, &content_owned) { - // Add sub-statements to our pending queue - self.pending_sub_statements.push(( - root_id.create_child(), - // adjust range to document - sub_statement.range + range.start(), - sub_statement.body.clone(), - )); - } - } - - // Return the current statement if it passes the filter - if self.filter.predicate(&root_id, &range, content) { - return Some(self.mapper.map(self.parser, root_id, range, content)); - } - - // If the current statement doesn't pass the filter, try the next one - return self.next(); - } - - None - } -} - -pub struct DefaultMapper; -impl<'a> StatementMapper<'a> for DefaultMapper { - type Output = (StatementId, TextRange, String); - - fn map( - &self, - _parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - (id, range, content.to_string()) - } -} - -pub struct ExecuteStatementMapper; -impl<'a> StatementMapper<'a> for ExecuteStatementMapper { - type Output = ( - StatementId, - TextRange, - String, - Option, - ); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let ast_result = parser.ast_db.get_or_cache_ast(&id, content); - let ast_option = match &*ast_result { - Ok(node) => Some(node.clone()), - Err(_) => None, - }; - - (id, range, content.to_string(), ast_option) - } -} - -pub struct AsyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { - type Output = ( - StatementId, - TextRange, - String, - Option, - Arc, - Option, - ); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let content_owned = content.to_string(); - let ast_result = parser.ast_db.get_or_cache_ast(&id, &content_owned); - - let ast_option = match &*ast_result { - Ok(node) => Some(node.clone()), - Err(_) => None, - }; - - let cst_result = parser.cst_db.get_or_cache_tree(&id, &content_owned); - - let sql_fn_sig = id - .parent() - .and_then(|root| { - let c = parser.doc.statement_content(&root)?; - Some((root, c)) - }) - .and_then(|(root, c)| { - let ast_option = parser - .ast_db - .get_or_cache_ast(&root, c) - .as_ref() - .clone() - .ok(); - - let ast_option = ast_option.as_ref()?; - - get_sql_fn_signature(ast_option) - }); - - (id, range, content_owned, ast_option, cst_result, sql_fn_sig) - } -} - -pub struct LintDiagnosticsMapper; -impl<'a> StatementMapper<'a> for LintDiagnosticsMapper { - type Output = Result; - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let maybe_node = parser.ast_db.get_or_cache_ast(&id, content); - - match maybe_node.as_ref() { - Ok(node) => Ok(AnalysableStatement { - range, - root: node.clone(), - }), - Err(diag) => Err(SyntaxDiagnostic { - message: diag.message.clone(), - span: Some(range), - }), - } - } -} - -pub struct SyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for SyncDiagnosticsMapper { - type Output = ( - StatementId, - TextRange, - Option, - Option, - ); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let ast_result = parser.ast_db.get_or_cache_ast(&id, content); - - let (ast_option, diagnostics) = match &*ast_result { - Ok(node) => (Some(node.clone()), None), - Err(diag) => (None, Some(diag.clone())), - }; - - (id, range, ast_option, diagnostics) - } -} - -pub struct GetCompletionsMapper; -impl<'a> StatementMapper<'a> for GetCompletionsMapper { - type Output = (StatementId, TextRange, String, Arc); - - fn map( - &self, - parser: &'a ParsedDocument, - id: StatementId, - range: TextRange, - content: &str, - ) -> Self::Output { - let tree = parser.cst_db.get_or_cache_tree(&id, content); - (id, range, content.into(), tree) - } -} - -/* - * We allow an offset of two for the statement: - * - * select * from | <-- we want to suggest items for the next token. - * - * However, if the current statement is terminated by a semicolon, we don't apply any - * offset. - * - * select * from users; | <-- no autocompletions here. - */ -pub struct GetCompletionsFilter { - pub cursor_position: TextSize, -} -impl StatementFilter<'_> for GetCompletionsFilter { - fn predicate(&self, _id: &StatementId, range: &TextRange, content: &str) -> bool { - let is_terminated_by_semi = content.chars().last().is_some_and(|c| c == ';'); - - let measuring_range = if is_terminated_by_semi { - *range - } else { - range.checked_expand_end(2.into()).unwrap_or(*range) - }; - measuring_range.contains(self.cursor_position) - } -} - -pub struct NoFilter; -impl StatementFilter<'_> for NoFilter { - fn predicate(&self, _id: &StatementId, _range: &TextRange, _content: &str) -> bool { - true - } -} - -pub struct CursorPositionFilter { - pos: TextSize, -} - -impl CursorPositionFilter { - pub fn new(pos: TextSize) -> Self { - Self { pos } - } -} - -impl StatementFilter<'_> for CursorPositionFilter { - fn predicate(&self, _id: &StatementId, range: &TextRange, _content: &str) -> bool { - range.contains(self.pos) - } -} - -pub struct IdFilter { - id: StatementId, -} - -impl IdFilter { - pub fn new(id: StatementId) -> Self { - Self { id } - } -} - -impl StatementFilter<'_> for IdFilter { - fn predicate(&self, id: &StatementId, _range: &TextRange, _content: &str) -> bool { - *id == self.id - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use pgt_fs::PgTPath; - - #[test] - fn sql_function_body() { - let input = "CREATE FUNCTION add(test0 integer, test1 integer) RETURNS integer - AS 'select $1 + $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT;"; - - let path = PgTPath::new("test.sql"); - - let d = ParsedDocument::new(path, input.to_string(), 0); - - let stmts = d.iter(DefaultMapper).collect::>(); - - assert_eq!(stmts.len(), 2); - assert_eq!(stmts[1].2, "select $1 + $2;"); - } -} From 491013b8d1acc3b79ca06e4454eaa87929e968ca Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:21:06 +0200 Subject: [PATCH 07/18] rename --- crates/pgt_workspace/src/workspace/server.rs | 4 ++-- crates/pgt_workspace/src/workspace/server/document.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 5794d07dc..e41c4b560 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -37,7 +37,7 @@ use crate::{ diagnostics::{PullDiagnosticsParams, PullDiagnosticsResult}, }, settings::{WorkspaceSettings, WorkspaceSettingsHandle, WorkspaceSettingsHandleMut}, - workspace::LintDiagnosticsMapper, + workspace::AnalyserDiagnosticsMapper, }; use super::{ @@ -530,7 +530,7 @@ impl Workspace for WorkspaceServer { let (analysable_stmts, syntax_diagnostics): ( Vec>, Vec>, - ) = doc.iter(LintDiagnosticsMapper).partition(Result::is_ok); + ) = doc.iter(AnalyserDiagnosticsMapper).partition(Result::is_ok); let analysable_stmts = analysable_stmts.into_iter().map(Result::unwrap).collect(); diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 39abfe44a..b80b85da5 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -241,8 +241,8 @@ impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { } } -pub struct LintDiagnosticsMapper; -impl<'a> StatementMapper<'a> for LintDiagnosticsMapper { +pub struct AnalyserDiagnosticsMapper; +impl<'a> StatementMapper<'a> for AnalyserDiagnosticsMapper { type Output = Result; fn map(&self, parser: &'a Document, id: StatementId, range: TextRange) -> Self::Output { From 922c5e0acb0f6d8f27aa5f3d1b11868d33b50948 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 10:21:31 +0200 Subject: [PATCH 08/18] rename that one too --- crates/pgt_workspace/src/workspace/server.rs | 5 +++-- crates/pgt_workspace/src/workspace/server/document.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index e41c4b560..2bbcd648e 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -10,7 +10,8 @@ use analyser::AnalyserVisitorBuilder; use async_helper::run_async; use connection_manager::ConnectionManager; use document::{ - AsyncDiagnosticsMapper, CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, + CursorPositionFilter, DefaultMapper, Document, ExecuteStatementMapper, + TypecheckDiagnosticsMapper, }; use futures::{StreamExt, stream}; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; @@ -444,7 +445,7 @@ impl Workspace for WorkspaceServer { if let Some(pool) = self.get_current_connection() { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; - let input = doc.iter(AsyncDiagnosticsMapper).collect::>(); + let input = doc.iter(TypecheckDiagnosticsMapper).collect::>(); // sorry for the ugly code :( let async_results = run_async(async move { stream::iter(input) diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index b80b85da5..c17581ab6 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -209,8 +209,8 @@ impl<'a> StatementMapper<'a> for ExecuteStatementMapper { } } -pub struct AsyncDiagnosticsMapper; -impl<'a> StatementMapper<'a> for AsyncDiagnosticsMapper { +pub struct TypecheckDiagnosticsMapper; +impl<'a> StatementMapper<'a> for TypecheckDiagnosticsMapper { type Output = ( StatementId, TextRange, From 17463d8daf488373ce30a44c451f3dae223c58c8 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 13:22:51 +0200 Subject: [PATCH 09/18] ok --- crates/pgt_analyse/src/rule.rs | 6 ++++++ crates/pgt_analyser/src/lib.rs | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index 3648bb099..ce9d7aa8b 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -215,6 +215,12 @@ impl RuleDiagnostic { self } + /// Sets the span of this diagnostic. + pub fn span(mut self, span: TextRange) -> Self { + self.span = Some(span); + self + } + /// Marks this diagnostic as unnecessary code, which will /// be displayed in the language server. /// diff --git a/crates/pgt_analyser/src/lib.rs b/crates/pgt_analyser/src/lib.rs index 4c6576ce3..1bb8c411b 100644 --- a/crates/pgt_analyser/src/lib.rs +++ b/crates/pgt_analyser/src/lib.rs @@ -4,6 +4,7 @@ use pgt_analyse::{ AnalysedFileContext, AnalyserOptions, AnalysisFilter, MetadataRegistry, RegistryRuleParams, RuleDiagnostic, RuleRegistry, }; +use pgt_diagnostics::DiagnosticExt; pub use registry::visit_registry; mod lint; @@ -76,7 +77,8 @@ impl<'a> Analyser<'a> { self.registry .rules .iter() - .flat_map(|rule| (rule.run)(&rule_params)), + .flat_map(|rule| (rule.run)(&rule_params)) + .map(|r| r.span(stmt.range)), ); file_context.update_from(&stmt.root); From 361aaa9b0432806721bc1848ab271d6b9ca37aa3 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 13:23:12 +0200 Subject: [PATCH 10/18] ok --- crates/pgt_analyser/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/pgt_analyser/src/lib.rs b/crates/pgt_analyser/src/lib.rs index 1bb8c411b..f96b6f6df 100644 --- a/crates/pgt_analyser/src/lib.rs +++ b/crates/pgt_analyser/src/lib.rs @@ -4,7 +4,6 @@ use pgt_analyse::{ AnalysedFileContext, AnalyserOptions, AnalysisFilter, MetadataRegistry, RegistryRuleParams, RuleDiagnostic, RuleRegistry, }; -use pgt_diagnostics::DiagnosticExt; pub use registry::visit_registry; mod lint; From d8299f19dc89c64a792ba29368e33bd5bdab4b78 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 14:08:25 +0200 Subject: [PATCH 11/18] awesome --- Cargo.lock | 1 + crates/pgt_workspace/Cargo.toml | 2 + crates/pgt_workspace/src/workspace/server.rs | 4 + .../src/workspace/server.tests.rs | 99 +++++++++++++++++++ 4 files changed, 106 insertions(+) create mode 100644 crates/pgt_workspace/src/workspace/server.tests.rs diff --git a/Cargo.lock b/Cargo.lock index 06f50830b..16b1de5e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3105,6 +3105,7 @@ dependencies = [ "pgt_schema_cache", "pgt_statement_splitter", "pgt_suppressions", + "pgt_test_utils", "pgt_text_size", "pgt_typecheck", "rustc-hash 2.1.0", diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index f535e5058..3e164cb13 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -62,6 +62,8 @@ schema = [ ] [dev-dependencies] +pgt_test_utils = { workspace = true } +sqlx = { workspace = true } tempfile = "3.15.0" [lib] diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 2bbcd648e..f3c2d3acf 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -663,3 +663,7 @@ impl Workspace for WorkspaceServer { fn is_dir(path: &Path) -> bool { path.is_dir() || (path.is_symlink() && fs::read_link(path).is_ok_and(|path| path.is_dir())) } + +#[cfg(test)] +#[path = "server.tests.rs"] +mod tests; diff --git a/crates/pgt_workspace/src/workspace/server.tests.rs b/crates/pgt_workspace/src/workspace/server.tests.rs new file mode 100644 index 000000000..94b02cd5f --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server.tests.rs @@ -0,0 +1,99 @@ +use biome_deserialize::Merge; +use pgt_analyse::RuleCategories; +use pgt_configuration::{PartialConfiguration, database::PartialDatabaseConfiguration}; +use pgt_diagnostics::Diagnostic; +use pgt_fs::PgTPath; +use pgt_text_size::TextRange; +use sqlx::PgPool; + +use crate::{ + Workspace, WorkspaceError, + workspace::{ + OpenFileParams, RegisterProjectFolderParams, UpdateSettingsParams, server::WorkspaceServer, + }, +}; + +fn get_test_workspace( + partial_config: Option, +) -> Result { + let workspace = WorkspaceServer::new(); + + workspace.register_project_folder(RegisterProjectFolderParams { + path: None, + set_as_current_workspace: true, + })?; + + workspace.update_settings(UpdateSettingsParams { + configuration: partial_config.unwrap_or(PartialConfiguration::init()), + gitignore_matches: vec![], + vcs_base_path: None, + workspace_directory: None, + })?; + + Ok(workspace) +} + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn test_diagnostics(test_db: PgPool) { + let mut conf = PartialConfiguration::init(); + conf.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + let workspace = get_test_workspace(Some(conf)).expect("Unable to create test workspace"); + + let path = PgTPath::new("test.sql"); + let content = r#" + create table users ( + id serial primary key, + name text not null + ); + + drop table non_existing_table; + + select 1; + "#; + + workspace + .open_file(OpenFileParams { + path: path.clone(), + content: content.into(), + version: 1, + }) + .expect("Unable to open test file"); + + let diagnostics = workspace + .pull_diagnostics(crate::workspace::PullDiagnosticsParams { + path: path.clone(), + categories: RuleCategories::all(), + max_diagnostics: 100, + only: vec![], + skip: vec![], + }) + .expect("Unable to pull diagnostics") + .diagnostics; + + assert_eq!(diagnostics.len(), 1, "Expected one diagnostic"); + + let diagnostic = &diagnostics[0]; + + assert_eq!( + diagnostic.category().map(|c| c.name()), + Some("lint/safety/banDropTable") + ); + + assert_eq!( + diagnostic.location().span, + Some(TextRange::new(106.into(), 136.into())) + ); +} From 47344e8e37a3546a8e3f7c853e753cdc8b3001b2 Mon Sep 17 00:00:00 2001 From: Julian Date: Sun, 13 Jul 2025 14:16:02 +0200 Subject: [PATCH 12/18] cool, it added lines --- crates/pgt_workspace/Cargo.toml | 2 +- docs/rules/ban-drop-column.md | 6 +++++- docs/rules/ban-drop-not-null.md | 6 +++++- docs/rules/ban-drop-table.md | 6 +++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 3e164cb13..3ef4936b6 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -64,7 +64,7 @@ schema = [ [dev-dependencies] pgt_test_utils = { workspace = true } sqlx = { workspace = true } -tempfile = "3.15.0" +tempfile = "3.15.0" [lib] doctest = false diff --git a/docs/rules/ban-drop-column.md b/docs/rules/ban-drop-column.md index 0c46d40a3..28b3a4b5e 100644 --- a/docs/rules/ban-drop-column.md +++ b/docs/rules/ban-drop-column.md @@ -25,10 +25,14 @@ alter table test drop column id; ``` ```sh -code-block.sql lint/safety/banDropColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +code-block.sql:1:1 lint/safety/banDropColumn ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ! Dropping a column may break existing clients. + > 1 │ alter table test drop column id; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + i You can leave the column as nullable or delete the column once queries no longer select or modify the column. diff --git a/docs/rules/ban-drop-not-null.md b/docs/rules/ban-drop-not-null.md index b860c45c2..56ec33c74 100644 --- a/docs/rules/ban-drop-not-null.md +++ b/docs/rules/ban-drop-not-null.md @@ -25,10 +25,14 @@ alter table users alter column email drop not null; ``` ```sh -code-block.sql lint/safety/banDropNotNull ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +code-block.sql:1:1 lint/safety/banDropNotNull ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ! Dropping a NOT NULL constraint may break existing clients. + > 1 │ alter table users alter column email drop not null; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + i Consider using a marker value that represents NULL. Alternatively, create a new table allowing NULL values, copy the data from the old table, and create a view that filters NULL values. diff --git a/docs/rules/ban-drop-table.md b/docs/rules/ban-drop-table.md index 4b81755f3..8aeb6e2c6 100644 --- a/docs/rules/ban-drop-table.md +++ b/docs/rules/ban-drop-table.md @@ -26,10 +26,14 @@ drop table some_table; ``` ```sh -code-block.sql lint/safety/banDropTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +code-block.sql:1:1 lint/safety/banDropTable ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ! Dropping a table may break existing clients. + > 1 │ drop table some_table; + │ ^^^^^^^^^^^^^^^^^^^^^^ + 2 │ + i Update your application code to no longer read or write the table, and only then delete the table. Be sure to create a backup. From a79764f16ac1bebbc5321518b62f139a0a0e1f1e Mon Sep 17 00:00:00 2001 From: Julian Date: Tue, 15 Jul 2025 09:03:02 +0200 Subject: [PATCH 13/18] adjust --- crates/pgt_analyse/src/context.rs | 27 +++++++++++++++++-- crates/pgt_analyse/src/registry.rs | 11 ++++++-- crates/pgt_analyse/src/rule.rs | 6 +---- .../src/lint/safety/adding_required_field.rs | 11 ++------ .../src/lint/safety/ban_drop_column.rs | 11 ++------ .../src/lint/safety/ban_drop_database.rs | 11 ++------ .../src/lint/safety/ban_drop_not_null.rs | 11 ++------ .../src/lint/safety/ban_drop_table.rs | 11 ++------ .../src/lint/safety/ban_truncate_cascade.rs | 11 ++------ 9 files changed, 47 insertions(+), 63 deletions(-) diff --git a/crates/pgt_analyse/src/context.rs b/crates/pgt_analyse/src/context.rs index cd0696573..7447c0bb5 100644 --- a/crates/pgt_analyse/src/context.rs +++ b/crates/pgt_analyse/src/context.rs @@ -1,4 +1,7 @@ +use pgt_schema_cache::SchemaCache; + use crate::{ + AnalysedFileContext, categories::RuleCategory, rule::{GroupCategory, Rule, RuleGroup, RuleMetadata}, }; @@ -6,6 +9,8 @@ use crate::{ pub struct RuleContext<'a, R: Rule> { stmt: &'a pgt_query_ext::NodeEnum, options: &'a R::Options, + schema_cache: Option<&'a SchemaCache>, + file_context: &'a AnalysedFileContext, } impl<'a, R> RuleContext<'a, R> @@ -13,8 +18,18 @@ where R: Rule + Sized + 'static, { #[allow(clippy::too_many_arguments)] - pub fn new(stmt: &'a pgt_query_ext::NodeEnum, options: &'a R::Options) -> Self { - Self { stmt, options } + pub fn new( + stmt: &'a pgt_query_ext::NodeEnum, + options: &'a R::Options, + schema_cache: Option<&'a SchemaCache>, + file_context: &'a AnalysedFileContext, + ) -> Self { + Self { + stmt, + options, + schema_cache, + file_context, + } } /// Returns the group that belongs to the current rule @@ -32,6 +47,14 @@ where self.stmt } + pub fn file_context(&self) -> &AnalysedFileContext { + self.file_context + } + + pub fn schema_cache(&self) -> Option<&SchemaCache> { + self.schema_cache + } + /// Returns the metadata of the rule /// /// The metadata contains information about the rule, such as the name, version, language, and whether it is recommended. diff --git a/crates/pgt_analyse/src/registry.rs b/crates/pgt_analyse/src/registry.rs index 058f6dc3a..d43d77115 100644 --- a/crates/pgt_analyse/src/registry.rs +++ b/crates/pgt_analyse/src/registry.rs @@ -177,8 +177,15 @@ impl RegistryRule { R: Rule + 'static, { let options = params.options.rule_options::().unwrap_or_default(); - let ctx = RuleContext::new(params.root, &options); - R::run(&ctx, params.analysed_file_context, params.schema_cache) + + let ctx = RuleContext::new( + params.root, + &options, + params.schema_cache, + params.analysed_file_context, + ); + + R::run(&ctx) } Self { run: run:: } diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index ce9d7aa8b..f42adcbde 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -105,11 +105,7 @@ pub trait Rule: RuleMeta + Sized { type Options: Default + Clone + Debug; /// `schema_cache` will only be available if the user has a working database connection. - fn run( - rule_context: &RuleContext, - file_context: &AnalysedFileContext, - schema_cache: Option<&SchemaCache>, - ) -> Vec; + fn run(rule_context: &RuleContext) -> Vec; } /// Diagnostic object returned by a single analysis rule diff --git a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs index 0da9d3398..069019528 100644 --- a/crates/pgt_analyser/src/lint/safety/adding_required_field.rs +++ b/crates/pgt_analyser/src/lint/safety/adding_required_field.rs @@ -1,9 +1,6 @@ -use pgt_analyse::{ - AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, -}; +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; -use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required. @@ -30,11 +27,7 @@ declare_lint_rule! { impl Rule for AddingRequiredField { type Options = (); - fn run( - ctx: &RuleContext, - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec { + fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = vec![]; if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs index d008fe5ea..165d4230f 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_column.rs @@ -1,9 +1,6 @@ -use pgt_analyse::{ - AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, -}; +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; -use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a column may break existing clients. @@ -32,11 +29,7 @@ declare_lint_rule! { impl Rule for BanDropColumn { type Options = (); - fn run( - ctx: &RuleContext, - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec { + fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = Vec::new(); if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs index 6827b177b..11d07da9a 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_database.rs @@ -1,9 +1,6 @@ -use pgt_analyse::{ - AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, -}; +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; -use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a database may break existing clients (and everything else, really). @@ -21,11 +18,7 @@ declare_lint_rule! { impl Rule for BanDropDatabase { type Options = (); - fn run( - ctx: &RuleContext, - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec { + fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = vec![]; if let pgt_query_ext::NodeEnum::DropdbStmt(_) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs index 178c45ec1..fa4c90115 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_not_null.rs @@ -1,9 +1,6 @@ -use pgt_analyse::{ - AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, -}; +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; -use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a NOT NULL constraint may break existing clients. @@ -32,11 +29,7 @@ declare_lint_rule! { impl Rule for BanDropNotNull { type Options = (); - fn run( - ctx: &RuleContext, - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec { + fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = Vec::new(); if let pgt_query_ext::NodeEnum::AlterTableStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs index 1899500d5..90c08514f 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_drop_table.rs @@ -1,9 +1,6 @@ -use pgt_analyse::{ - AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, -}; +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; -use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Dropping a table may break existing clients. @@ -31,11 +28,7 @@ declare_lint_rule! { impl Rule for BanDropTable { type Options = (); - fn run( - ctx: &RuleContext, - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec { + fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = vec![]; if let pgt_query_ext::NodeEnum::DropStmt(stmt) = &ctx.stmt() { diff --git a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs index dfaa1fa20..cef5cd47b 100644 --- a/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs +++ b/crates/pgt_analyser/src/lint/safety/ban_truncate_cascade.rs @@ -1,10 +1,7 @@ -use pgt_analyse::{ - AnalysedFileContext, Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule, -}; +use pgt_analyse::{Rule, RuleDiagnostic, RuleSource, context::RuleContext, declare_lint_rule}; use pgt_console::markup; use pgt_diagnostics::Severity; use pgt_query_ext::protobuf::DropBehavior; -use pgt_schema_cache::SchemaCache; declare_lint_rule! { /// Using `TRUNCATE`'s `CASCADE` option will truncate any tables that are also foreign-keyed to the specified tables. @@ -34,11 +31,7 @@ declare_lint_rule! { impl Rule for BanTruncateCascade { type Options = (); - fn run( - ctx: &RuleContext, - _file_context: &AnalysedFileContext, - _schema_cache: Option<&SchemaCache>, - ) -> Vec { + fn run(ctx: &RuleContext) -> Vec { let mut diagnostics = Vec::new(); if let pgt_query_ext::NodeEnum::TruncateStmt(stmt) = &ctx.stmt() { From 9426b1f5312dffed42341448f2df9e08bd7bb46f Mon Sep 17 00:00:00 2001 From: Julian Date: Tue, 15 Jul 2025 09:03:47 +0200 Subject: [PATCH 14/18] k --- crates/pgt_analyse/src/rule.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/pgt_analyse/src/rule.rs b/crates/pgt_analyse/src/rule.rs index f42adcbde..1760ce971 100644 --- a/crates/pgt_analyse/src/rule.rs +++ b/crates/pgt_analyse/src/rule.rs @@ -5,12 +5,10 @@ use pgt_diagnostics::{ Advices, Category, Diagnostic, DiagnosticTags, Location, LogCategory, MessageAndDescription, Severity, Visit, }; -use pgt_schema_cache::SchemaCache; use pgt_text_size::TextRange; use std::cmp::Ordering; use std::fmt::Debug; -use crate::analysed_file_context::AnalysedFileContext; use crate::{categories::RuleCategory, context::RuleContext, registry::RegistryVisitor}; #[derive(Clone, Debug)] From 806a09375a856d2641ed242b3a4c1fe7e799b787 Mon Sep 17 00:00:00 2001 From: Julian Date: Tue, 15 Jul 2025 09:29:16 +0200 Subject: [PATCH 15/18] lint --- crates/pgt_workspace/src/workspace/server.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 11d326d42..e6456afc7 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -15,12 +15,11 @@ use document::{ }; use futures::{StreamExt, stream}; use pgt_analyse::{AnalyserOptions, AnalysisFilter}; -use pgt_analyser::{AnalysableStatement, Analyser, AnalyserConfig, AnalyserParams}; +use pgt_analyser::{Analyser, AnalyserConfig, AnalyserParams}; use pgt_diagnostics::{ Diagnostic, DiagnosticExt, Error, Severity, serde::Diagnostic as SDiagnostic, }; use pgt_fs::{ConfigName, PgTPath}; -use pgt_query_ext::diagnostics::SyntaxDiagnostic; use pgt_typecheck::{IdentifierType, TypecheckParams, TypedIdentifier}; use schema_cache_manager::SchemaCacheManager; use sqlx::{Executor, PgPool}; From 6be942f6125fcd24cfa4c4a80d7c7beb40533bf7 Mon Sep 17 00:00:00 2001 From: Julian Date: Tue, 15 Jul 2025 09:33:17 +0200 Subject: [PATCH 16/18] test --- .../src/workspace/server/document.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 05f7df63a..01c57e318 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -404,7 +404,7 @@ $$;"; let results = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 1); - let (_range, ast, diagnostic) = &results[0]; + let (ast, diagnostic) = &results[0]; // Should have parsed the CREATE FUNCTION statement assert!(ast.is_some()); @@ -435,7 +435,7 @@ $$;"; let results = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 1); - let (_range, ast, diagnostic) = &results[0]; + let (ast, diagnostic) = &results[0]; // Should have parsed the CREATE FUNCTION statement assert!(ast.is_some()); @@ -460,13 +460,13 @@ $$;"; let results1 = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results1.len(), 1); - assert!(results1[0].1.is_some()); - assert!(results1[0].2.is_none()); + assert!(results1[0].0.is_some()); + assert!(results1[0].1.is_none()); let results2 = d.iter(AnalyserDiagnosticsMapper).collect::>(); assert_eq!(results2.len(), 1); - assert!(results2[0].1.is_some()); - assert!(results2[0].2.is_none()); + assert!(results2[0].0.is_some()); + assert!(results2[0].1.is_none()); } #[test] @@ -513,7 +513,7 @@ END; $$ LANGUAGE plpgsql;"; let d = Document::new(input.to_string(), 1); - let results = d.iter(AsyncDiagnosticsMapper).collect::>(); + let results = d.iter(TypecheckDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 1); let (_id, _range, ast, cst, sql_fn_sig) = &results[0]; @@ -532,7 +532,7 @@ $$ LANGUAGE plpgsql;"; "CREATE FUNCTION add(a int, b int) RETURNS int AS 'SELECT $1 + $2;' LANGUAGE sql;"; let d = Document::new(input.to_string(), 1); - let results = d.iter(AsyncDiagnosticsMapper).collect::>(); + let results = d.iter(TypecheckDiagnosticsMapper).collect::>(); assert_eq!(results.len(), 2); // Check the function body From e649818e3aeda1a493fd1e359e46d8961122e01b Mon Sep 17 00:00:00 2001 From: Julian Date: Tue, 15 Jul 2025 09:38:38 +0200 Subject: [PATCH 17/18] ok --- crates/pgt_lsp/tests/server.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 353e80ae0..176868f55 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -1734,13 +1734,24 @@ $$; server.open_document(initial_content).await?; - let notification = tokio::time::timeout(Duration::from_secs(5), async { + let got_notification = tokio::time::timeout(Duration::from_secs(5), async { loop { match receiver.next().await { Some(ServerNotification::PublishDiagnostics(msg)) => { if msg.diagnostics.iter().any(|d| { d.message .contains("Invalid statement: syntax error at or near \"declre\"") + && d.range + == Range { + start: Position { + line: 5, + character: 9, + }, + end: Position { + line: 11, + character: 0, + }, + } }) { return true; } @@ -1752,7 +1763,10 @@ $$; .await .is_ok(); - assert!(notification, "expected diagnostics for unknown column"); + assert!( + got_notification, + "expected diagnostics for invalid declare statement" + ); server.shutdown().await?; reader.abort(); From 0ad31be64a1c18e74df76556be361ac2c6c53b2b Mon Sep 17 00:00:00 2001 From: Julian Date: Tue, 15 Jul 2025 09:48:13 +0200 Subject: [PATCH 18/18] fi range --- crates/pgt_query_ext/src/diagnostics.rs | 5 +++++ crates/pgt_workspace/src/workspace/server/document.rs | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/pgt_query_ext/src/diagnostics.rs b/crates/pgt_query_ext/src/diagnostics.rs index 6db048d54..1a068dc0a 100644 --- a/crates/pgt_query_ext/src/diagnostics.rs +++ b/crates/pgt_query_ext/src/diagnostics.rs @@ -23,6 +23,11 @@ impl SyntaxDiagnostic { message: MessageAndDescription::from(message.into()), } } + + pub fn span(mut self, span: TextRange) -> Self { + self.span = Some(span); + self + } } impl From for SyntaxDiagnostic { diff --git a/crates/pgt_workspace/src/workspace/server/document.rs b/crates/pgt_workspace/src/workspace/server/document.rs index 01c57e318..b57983705 100644 --- a/crates/pgt_workspace/src/workspace/server/document.rs +++ b/crates/pgt_workspace/src/workspace/server/document.rs @@ -252,7 +252,9 @@ impl<'a> StatementMapper<'a> for AnalyserDiagnosticsMapper { Ok(node) => { let plpgsql_result = parser.ast_db.get_or_cache_plpgsql_parse(&id); if let Some(Err(diag)) = plpgsql_result { - (Some(node.clone()), Some(diag.clone())) + // offset the pgpsql diagnostic from the parent statement start + let span = diag.location().span.map(|sp| sp + range.start()); + (Some(node.clone()), Some(diag.span(span.unwrap_or(range)))) } else { (Some(node.clone()), None) }