diff --git a/build.gradle.kts b/build.gradle.kts index 64743f78f5..b7aa8084c7 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -19,7 +19,6 @@ plugins { alias(kotlin.jvm) alias(publisher) alias(serialization) apply false - alias(dokka) alias(kover) alias(ktlint) alias(korro) apply false @@ -41,7 +40,7 @@ repositories { mavenLocal() mavenCentral() maven("https://maven.pkg.jetbrains.space/public/p/kotlinx-html/maven") - maven(jupyterApiTCRepo) + if (jupyterApiTCRepo.isNotBlank()) maven(jupyterApiTCRepo) } configurations { @@ -154,6 +153,8 @@ val modulesUsingJava11 = with(projects) { dataframeJupyter, dataframeGeo, examples.ideaExamples.titanic, + plugins.symbolProcessor, + plugins.dataframeGradlePlugin, ) }.map { it.path } diff --git a/core/api/core.api b/core/api/core.api index ddc3861f49..b0fc9d78e8 100644 --- a/core/api/core.api +++ b/core/api/core.api @@ -6481,8 +6481,6 @@ public final class org/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords : j public static final field EXTERNAL Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; public static final field FINAL Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; public static final field FUN Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; - public static final field HEADER Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; - public static final field IMPL Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; public static final field IN Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; public static final field INFIX Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; public static final field INLINE Lorg/jetbrains/kotlinx/dataframe/keywords/ModifierKeywords; @@ -6536,8 +6534,6 @@ public final class org/jetbrains/kotlinx/dataframe/keywords/SoftKeywords : java/ public static final field FINAL Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; public static final field FINALLY Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; public static final field GET Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; - public static final field HEADER Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; - public static final field IMPL Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; public static final field IMPORT Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; public static final field INFIX Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; public static final field INIT Lorg/jetbrains/kotlinx/dataframe/keywords/SoftKeywords; diff --git a/core/build.gradle.kts b/core/build.gradle.kts index 3c519eb80c..fd2c4493dd 100644 --- a/core/build.gradle.kts +++ b/core/build.gradle.kts @@ -112,7 +112,7 @@ tasks.withType { } } -val clearTestResults by tasks.creating(Delete::class) { +val clearTestResults by tasks.registering(Delete::class) { delete(layout.buildDirectory.dir("dataframes")) delete(layout.buildDirectory.dir("korroOutputLines")) } @@ -140,7 +140,7 @@ val samplesTest = tasks.register("samplesTest") { sourceSets["main"].runtimeClasspath } -val clearSamplesOutputs by tasks.creating { +val clearSamplesOutputs by tasks.registering { group = "documentation" doFirst { @@ -152,7 +152,7 @@ val clearSamplesOutputs by tasks.creating { } } -val addSamplesToGit by tasks.creating(GitTask::class) { +val addSamplesToGit by tasks.registering(GitTask::class) { directory = file(".") command = "add" args = listOf("-A", "../docs/StardustDocs/snippets") @@ -167,7 +167,7 @@ val copySamplesOutputs = tasks.register("copySamplesOutputs") { classpath = sourceSets.test.get().runtimeClasspath doLast { - addSamplesToGit.executeCommand() + addSamplesToGit.get().executeCommand() } } @@ -240,7 +240,7 @@ idea { // If `changeJarTask` is run, modify all Jar tasks such that before running the Kotlin sources are set to // the target of `processKdocMain`, and they are returned to normal afterward. // This is usually only done when publishing -val changeJarTask by tasks.creating { +val changeJarTask by tasks.registering { outputs.upToDateWhen { project.hasProperty("skipKodex") } doFirst { tasks.withType { @@ -282,16 +282,6 @@ idea { } } -// If we want to use Dokka, make sure to use the preprocessed sources -tasks.withType { - dependsOn(processKDocsMain) - dokkaSourceSets { - all { - sourceRoot(processKDocsMain.target.get()) - } - } -} - // endregion korro { diff --git a/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/ColumnNotFoundException.kt b/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/ColumnNotFoundException.kt index 2098b18b75..7d037acffa 100644 --- a/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/ColumnNotFoundException.kt +++ b/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/ColumnNotFoundException.kt @@ -1,4 +1,5 @@ package org.jetbrains.kotlinx.dataframe.exceptions public class ColumnNotFoundException(public val columnName: String, public override val message: String) : - RuntimeException() + RuntimeException(), + DataFrameException diff --git a/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/DuplicateColumnNamesException.kt b/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/DuplicateColumnNamesException.kt index be2db6c9dd..890d9547cb 100644 --- a/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/DuplicateColumnNamesException.kt +++ b/core/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/exceptions/DuplicateColumnNamesException.kt @@ -1,6 +1,8 @@ package org.jetbrains.kotlinx.dataframe.exceptions -public class DuplicateColumnNamesException(public val allColumnNames: List) : IllegalArgumentException() { +public class DuplicateColumnNamesException(public val allColumnNames: List) : + IllegalArgumentException(), + DataFrameException { public val duplicatedNames: List = allColumnNames .groupBy { it } diff --git a/dataframe-csv/build.gradle.kts b/dataframe-csv/build.gradle.kts index 1a73328eed..1a4b537ff2 100644 --- a/dataframe-csv/build.gradle.kts +++ b/dataframe-csv/build.gradle.kts @@ -106,7 +106,7 @@ tasks.named("runKtlintCheckOverGeneratedSourcesSourceSet") { // If `changeJarTask` is run, modify all Jar tasks such that before running the Kotlin sources are set to // the target of `processKdocMain`, and they are returned to normal afterward. // This is usually only done when publishing -val changeJarTask by tasks.creating { +val changeJarTask by tasks.registering { outputs.upToDateWhen { false } doFirst { tasks.withType { diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/documentation/DelimParams.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/documentation/DelimParams.kt index c15dbc8b74..b025b56e36 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/documentation/DelimParams.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/documentation/DelimParams.kt @@ -92,7 +92,7 @@ internal object DelimParams { * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. */ const val HAS_FIXED_WIDTH_COLUMNS: Boolean = false @@ -163,7 +163,7 @@ internal object DelimParams { const val IGNORE_EMPTY_LINES: Boolean = false /** - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. */ @@ -201,7 +201,7 @@ internal object DelimParams { * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ const val PARSE_PARALLEL: Boolean = true diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/impl/io/readDelim.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/impl/io/readDelim.kt index dc633d1530..3ef98a4f96 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/impl/io/readDelim.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/impl/io/readDelim.kt @@ -79,7 +79,7 @@ import kotlin.time.Duration * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -102,7 +102,7 @@ import kotlin.time.Duration * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -120,7 +120,7 @@ import kotlin.time.Duration * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. * @param compression The compression of the data. * Default: [Compression.None], unless detected otherwise from the input file or url. * @param adjustCsvSpecs Optional extra [CsvSpecs] configuration. Default: `{ it }`. diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsv.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsv.kt index a2f3d12270..a65674124e 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsv.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsv.kt @@ -82,7 +82,7 @@ import kotlin.io.path.inputStream * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -119,7 +119,7 @@ import kotlin.io.path.inputStream * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -137,7 +137,7 @@ import kotlin.io.path.inputStream * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readCsv( path: Path, @@ -234,7 +234,7 @@ public fun DataFrame.Companion.readCsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -271,7 +271,7 @@ public fun DataFrame.Companion.readCsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -289,7 +289,7 @@ public fun DataFrame.Companion.readCsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readCsv( file: File, @@ -386,7 +386,7 @@ public fun DataFrame.Companion.readCsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -423,7 +423,7 @@ public fun DataFrame.Companion.readCsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -441,7 +441,7 @@ public fun DataFrame.Companion.readCsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readCsv( url: URL, @@ -538,7 +538,7 @@ public fun DataFrame.Companion.readCsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -575,7 +575,7 @@ public fun DataFrame.Companion.readCsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -593,7 +593,7 @@ public fun DataFrame.Companion.readCsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readCsv( fileOrUrl: String, @@ -690,7 +690,7 @@ public fun DataFrame.Companion.readCsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -727,7 +727,7 @@ public fun DataFrame.Companion.readCsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -745,7 +745,7 @@ public fun DataFrame.Companion.readCsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. * @param adjustCsvSpecs Optional extra [CsvSpecs] configuration. Default: `{ it }`. * * Before instantiating the [CsvSpecs], the [CsvSpecs.Builder] will be passed to this lambda. diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsvStr.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsvStr.kt index 023dd509b7..2743ac4dd0 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsvStr.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readCsvStr.kt @@ -70,7 +70,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -107,7 +107,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -125,7 +125,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readCsvStr( text: String, diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelim.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelim.kt index bcab301856..a9bedfd10a 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelim.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelim.kt @@ -88,7 +88,7 @@ import kotlin.io.path.inputStream * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -125,7 +125,7 @@ import kotlin.io.path.inputStream * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -143,7 +143,7 @@ import kotlin.io.path.inputStream * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readDelim( path: Path, @@ -240,7 +240,7 @@ public fun DataFrame.Companion.readDelim( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -277,7 +277,7 @@ public fun DataFrame.Companion.readDelim( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -295,7 +295,7 @@ public fun DataFrame.Companion.readDelim( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readDelim( file: File, @@ -392,7 +392,7 @@ public fun DataFrame.Companion.readDelim( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -429,7 +429,7 @@ public fun DataFrame.Companion.readDelim( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -447,7 +447,7 @@ public fun DataFrame.Companion.readDelim( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readDelim( url: URL, @@ -544,7 +544,7 @@ public fun DataFrame.Companion.readDelim( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -581,7 +581,7 @@ public fun DataFrame.Companion.readDelim( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -599,7 +599,7 @@ public fun DataFrame.Companion.readDelim( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readDelim( fileOrUrl: String, @@ -696,7 +696,7 @@ public fun DataFrame.Companion.readDelim( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -733,7 +733,7 @@ public fun DataFrame.Companion.readDelim( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -751,7 +751,7 @@ public fun DataFrame.Companion.readDelim( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. * @param adjustCsvSpecs Optional extra [CsvSpecs] configuration. Default: `{ it }`. * * Before instantiating the [CsvSpecs], the [CsvSpecs.Builder] will be passed to this lambda. diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelimStr.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelimStr.kt index c9fb3a4618..7e01b21988 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelimStr.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readDelimStr.kt @@ -70,7 +70,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -107,7 +107,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -125,7 +125,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readDelimStr( text: String, diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsv.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsv.kt index 52834e9133..60f9ee96c9 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsv.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsv.kt @@ -82,7 +82,7 @@ import kotlin.io.path.inputStream * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -119,7 +119,7 @@ import kotlin.io.path.inputStream * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -137,7 +137,7 @@ import kotlin.io.path.inputStream * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readTsv( path: Path, @@ -234,7 +234,7 @@ public fun DataFrame.Companion.readTsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -271,7 +271,7 @@ public fun DataFrame.Companion.readTsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -289,7 +289,7 @@ public fun DataFrame.Companion.readTsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readTsv( file: File, @@ -386,7 +386,7 @@ public fun DataFrame.Companion.readTsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -423,7 +423,7 @@ public fun DataFrame.Companion.readTsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -441,7 +441,7 @@ public fun DataFrame.Companion.readTsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readTsv( url: URL, @@ -538,7 +538,7 @@ public fun DataFrame.Companion.readTsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -575,7 +575,7 @@ public fun DataFrame.Companion.readTsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -593,7 +593,7 @@ public fun DataFrame.Companion.readTsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readTsv( fileOrUrl: String, @@ -690,7 +690,7 @@ public fun DataFrame.Companion.readTsv( * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -727,7 +727,7 @@ public fun DataFrame.Companion.readTsv( * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -745,7 +745,7 @@ public fun DataFrame.Companion.readTsv( * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. * @param adjustCsvSpecs Optional extra [CsvSpecs] configuration. Default: `{ it }`. * * Before instantiating the [CsvSpecs], the [CsvSpecs.Builder] will be passed to this lambda. diff --git a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsvStr.kt b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsvStr.kt index ad735f7130..2b43d0687c 100644 --- a/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsvStr.kt +++ b/dataframe-csv/generated-sources/src/main/kotlin/org/jetbrains/kotlinx/dataframe/io/readTsvStr.kt @@ -70,7 +70,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * * Fixed-width columns can occur, for instance, in multi-space delimited data, where the columns are separated * by multiple spaces instead of a single delimiter, so columns are visually aligned. - * Columns widths are determined by the header in the data (if present), or manually by setting + * Column widths are determined by the header in the data (if present), or manually by setting * [fixedColumnWidths]. * @param fixedColumnWidths The fixed column widths. Default: empty list. * @@ -107,7 +107,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * @param ignoreEmptyLines Whether to skip intermediate empty lines. Default: `false`. * * If `false`, empty lines will be interpreted as having _empty_ values if [allowMissingColumns]. - * @param allowMissingColumns Wether to allow rows with fewer columns than the header. Default: `true`. + * @param allowMissingColumns Whether to allow rows with fewer columns than the header. Default: `true`. * * If `true`, rows that are too short will be interpreted as _empty_ values. * @param ignoreExcessColumns Whether to ignore rows with more columns than the header. Default: `true`. @@ -125,7 +125,7 @@ import org.jetbrains.kotlinx.dataframe.impl.io.readDelimImpl * @param parseParallel Whether to parse the data in parallel. Default: `true`. * * If `true`, the data will be read and parsed in parallel by the Deephaven parser. - * This is usually faster, but can be turned off for debugging. + * This is usually faster but can be turned off for debugging. */ public fun DataFrame.Companion.readTsvStr( text: String, diff --git a/dataframe-openapi-generator/build.gradle.kts b/dataframe-openapi-generator/build.gradle.kts index 37aa3b2a58..28e6760047 100644 --- a/dataframe-openapi-generator/build.gradle.kts +++ b/dataframe-openapi-generator/build.gradle.kts @@ -18,7 +18,7 @@ val jupyterApiTCRepo: String by project repositories { mavenLocal() mavenCentral() - maven(jupyterApiTCRepo) + if (jupyterApiTCRepo.isNotBlank()) maven(jupyterApiTCRepo) } dependencies { diff --git a/gradle.properties b/gradle.properties index 0b7b8e09c7..9c994ee4df 100644 --- a/gradle.properties +++ b/gradle.properties @@ -3,6 +3,10 @@ version=1.0.0 jupyterApiTCRepo= kotlin.jupyter.add.scanner=false org.gradle.jvmargs=-Xmx4G + +# We don't support ksp2 +ksp.useKSP2=false + # build.number.detection=false # build.number=0.8.0 diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 5bcfceb862..2ca04b0499 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,19 +1,19 @@ [versions] -ksp = "2.0.20-1.0.24" -kotlinJupyter = "0.12.0-383" +ksp = "2.1.0-1.0.29" +kotlinJupyter = "0.12.0-426" -ktlint = "12.1.2" +ktlint = "12.2.0" -# make sure to sync both manually with :generator module -kotlin = "2.0.20" # needs jupyter compatibility with Kotlin 2.1 to update +# make sure to sync both manually with :keywords-generator module +kotlin = "2.1.0" kotlinpoet = "1.18.1" -dokka = "1.9.20" libsPublisher = "1.9.23-dev-45" # "Bootstrap" version of the dataframe, used in the build itself to generate @DataSchema APIs, # dogfood Gradle / KSP plugins in tests and idea-examples modules dataframe = "1.0.0-dev-6538" +#dataframe = "1.0.0-dev" korro = "0.1.6" binaryCompatibilityValidator = "0.17.0" @@ -52,7 +52,7 @@ plugin-publish = "1.3.0" shadow = "8.3.5" android-gradle-api = "7.3.1" # need to revise our tests to update ktor = "3.0.1" # needs jupyter compatibility with Kotlin 2.1 to update -kotlin-compile-testing = "1.6.0" +kotlin-compile-testing = "0.7.1" duckdb = "1.1.3" buildconfig = "5.5.1" benchmark = "0.4.12" @@ -139,8 +139,8 @@ android-gradle = { group = "com.android.tools.build", name = "gradle", version.r kotlin-gradle-plugin = { group = "org.jetbrains.kotlin", name = "kotlin-gradle-plugin" } kotlin-gradle-plugin-api = { group = "org.jetbrains.kotlin", name = "kotlin-gradle-plugin-api" } ktor-server-netty = { group = "io.ktor", name = "ktor-server-netty", version.ref = "ktor" } -kotlin-compile-testing = { group = "com.github.tschuchortdev", name = "kotlin-compile-testing", version.ref = "kotlin-compile-testing" } -kotlin-compile-testing-ksp = { group = "com.github.tschuchortdev", name = "kotlin-compile-testing-ksp", version.ref = "kotlin-compile-testing" } +kotlin-compile-testing = { group = "dev.zacsweers.kctfork", name = "core", version.ref = "kotlin-compile-testing" } +kotlin-compile-testing-ksp = { group = "dev.zacsweers.kctfork", name = "ksp", version.ref = "kotlin-compile-testing" } kotlin-compiler = { group = "org.jetbrains.kotlin", name = "kotlin-compiler", version.ref = "kotlin" } kotlin-compiler-embeddable = { group = "org.jetbrains.kotlin", name = "kotlin-compiler-embeddable", version.ref = "kotlin" } kotlin-compiler-internal-test-framework = { group = "org.jetbrains.kotlin", name = "kotlin-compiler-internal-test-framework", version.ref = "kotlin" } @@ -158,7 +158,6 @@ jupyter-api = { id = "org.jetbrains.kotlin.jupyter.api", version.ref = "kotlinJu ksp = { id = "com.google.devtools.ksp", version.ref = "ksp" } binary-compatibility-validator = { id = "org.jetbrains.kotlinx.binary-compatibility-validator", version.ref = "binaryCompatibilityValidator" } kotlin-jvm = { id = "org.jetbrains.kotlin.jvm", version.ref = "kotlin" } -dokka = { id = "org.jetbrains.dokka", version.ref = "dokka" } keywordGenerator = { id = "org.jetbrains.dataframe.generator", version = "1.0" } publisher = { id = "org.jetbrains.kotlin.libs.publisher", version.ref = "libsPublisher" } korro = { id = "io.github.devcrocod.korro", version.ref = "korro" } diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index a4b76b9530..1b33c55baa 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 94113f200e..e18bc253b8 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.11-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12.1-bin.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/gradlew b/gradlew index f5feea6d6b..23d15a9367 100755 --- a/gradlew +++ b/gradlew @@ -86,8 +86,7 @@ done # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s -' "$PWD" ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -115,7 +114,7 @@ case "$( uname )" in #( NONSTOP* ) nonstop=true ;; esac -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar +CLASSPATH="\\\"\\\"" # Determine the Java command to use to start the JVM. @@ -206,7 +205,7 @@ fi DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Collect all arguments for the java command: -# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, # and any embedded shellness will be escaped. # * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be # treated as '${Hostname}' itself on the command line. @@ -214,7 +213,7 @@ DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ -classpath "$CLASSPATH" \ - org.gradle.wrapper.GradleWrapperMain \ + -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \ "$@" # Stop when "xargs" is not available. diff --git a/gradlew.bat b/gradlew.bat index 9b42019c79..5eed7ee845 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -70,11 +70,11 @@ goto fail :execute @rem Setup the command line -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar +set CLASSPATH= @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %* :end @rem End local scope for the variables with windows NT shell diff --git a/plugins/dataframe-gradle-plugin/src/main/kotlin/org/jetbrains/dataframe/gradle/SchemaGeneratorPlugin.kt b/plugins/dataframe-gradle-plugin/src/main/kotlin/org/jetbrains/dataframe/gradle/SchemaGeneratorPlugin.kt index 4e3f4f01b8..71b3d7405b 100644 --- a/plugins/dataframe-gradle-plugin/src/main/kotlin/org/jetbrains/dataframe/gradle/SchemaGeneratorPlugin.kt +++ b/plugins/dataframe-gradle-plugin/src/main/kotlin/org/jetbrains/dataframe/gradle/SchemaGeneratorPlugin.kt @@ -39,7 +39,7 @@ class SchemaGeneratorPlugin : Plugin { val generationTasks = extension.schemas.map { createTask(target, extension, appliedPlugin, it) } - val generateAll = target.tasks.create("generateDataFrames") { + val generateAll = target.tasks.register("generateDataFrames") { group = GROUP dependsOn(*generationTasks.toTypedArray()) } @@ -124,7 +124,7 @@ class SchemaGeneratorPlugin : Plugin { val defaultPath = schema.defaultPath ?: extension.defaultPath ?: true val delimiters = schema.withNormalizationBy ?: extension.withNormalizationBy ?: setOf('\t', ' ', '_') - return target.tasks.create("generateDataFrame$interfaceName", GenerateDataSchemaTask::class.java) { + return target.tasks.register("generateDataFrame$interfaceName", GenerateDataSchemaTask::class.java) { (logging as? DefaultLoggingManager)?.setLevelInternal(LogLevel.QUIET) group = GROUP data.set(schema.data) @@ -138,7 +138,7 @@ class SchemaGeneratorPlugin : Plugin { this.defaultPath.set(defaultPath) this.delimiters.set(delimiters) this.enableExperimentalOpenApi.set(extension.enableExperimentalOpenApi) - } + }.get() } private fun getInterfaceName(schema: Schema): String? { diff --git a/plugins/expressions-converter/build.gradle.kts b/plugins/expressions-converter/build.gradle.kts index 36369bc865..0d2ac92ad8 100644 --- a/plugins/expressions-converter/build.gradle.kts +++ b/plugins/expressions-converter/build.gradle.kts @@ -58,7 +58,7 @@ sourceSets { } } -tasks.create("generateTests") { +tasks.register("generateTests") { classpath = sourceSets.test.get().runtimeClasspath mainClass = "org.jetbrains.kotlinx.dataframe.GenerateTestsKt" } diff --git a/plugins/expressions-converter/src/org/jetbrains/kotlinx/dataframe/ExplainerIrTransformer.kt b/plugins/expressions-converter/src/org/jetbrains/kotlinx/dataframe/ExplainerIrTransformer.kt index 812082b11e..c7493d12ef 100644 --- a/plugins/expressions-converter/src/org/jetbrains/kotlinx/dataframe/ExplainerIrTransformer.kt +++ b/plugins/expressions-converter/src/org/jetbrains/kotlinx/dataframe/ExplainerIrTransformer.kt @@ -28,6 +28,7 @@ import org.jetbrains.kotlin.ir.expressions.impl.IrConstImpl import org.jetbrains.kotlin.ir.expressions.impl.IrFunctionExpressionImpl import org.jetbrains.kotlin.ir.expressions.impl.IrGetObjectValueImpl import org.jetbrains.kotlin.ir.expressions.impl.IrGetValueImpl +import org.jetbrains.kotlin.ir.symbols.UnsafeDuringIrConstructionAPI import org.jetbrains.kotlin.ir.symbols.impl.IrSimpleFunctionSymbolImpl import org.jetbrains.kotlin.ir.symbols.impl.IrValueParameterSymbolImpl import org.jetbrains.kotlin.ir.types.classFqName @@ -48,15 +49,16 @@ import java.io.File data class ContainingDeclarations(val clazz: IrClass?, val function: IrFunction?, val statementIndex: Int = 0) +@OptIn(UnsafeDuringIrConstructionAPI::class) class ExplainerIrTransformer(val pluginContext: IrPluginContext) : FileLoweringPass, IrElementTransformer { + lateinit var file: IrFile lateinit var source: String override fun lower(irFile: IrFile) { - var file: File - file = File("testData/box/${irFile.path}") + var file = File("testData/box/${irFile.path}") if (!file.exists()) { file = File(irFile.path) } @@ -139,7 +141,12 @@ class ExplainerIrTransformer(val pluginContext: IrPluginContext) : override fun visitGetValue(expression: IrGetValue, data: ContainingDeclarations): IrExpression { if (expression.startOffset < 0) return expression if (expression.type.classFqName in dataFrameLike) { - return transformDataFrameExpression(expression, expression.symbol.owner.name, receiver = null, data) + return transformDataFrameExpression( + expression = expression, + ownerName = expression.symbol.owner.name, + receiver = null, + data = data, + ) } return super.visitExpression(expression, data) } @@ -177,7 +184,13 @@ class ExplainerIrTransformer(val pluginContext: IrPluginContext) : CallableId(FqName("kotlin"), Name.identifier("also")), ).single() - val result = IrCallImpl(-1, -1, expression.type, alsoReference, 1, 1).apply { + val result = IrCallImpl( + startOffset = -1, + endOffset = -1, + type = expression.type, + symbol = alsoReference, + typeArgumentsCount = 1, + ).apply { this.extensionReceiver = expression putTypeArgument(0, expression.type) @@ -226,16 +239,16 @@ class ExplainerIrTransformer(val pluginContext: IrPluginContext) : } val expressionId = expressionId(expression) val receiverId = receiver?.let { expressionId(it) } - val valueArguments = buildList { - add(source.irConstImpl()) - add(ownerName.asStringStripSpecialMarkers().irConstImpl()) - add(IrGetValueImpl(-1, -1, itSymbol)) - add(expressionId.irConstImpl()) - add(receiverId.irConstImpl()) - add(data.clazz?.fqNameWhenAvailable?.asString().irConstImpl()) - add(data.function?.name?.asString().irConstImpl()) - add(IrConstImpl.int(-1, -1, pluginContext.irBuiltIns.intType, data.statementIndex)) - } + val valueArguments = listOf( + source.irConstImpl(), + ownerName.asStringStripSpecialMarkers().irConstImpl(), + IrGetValueImpl(-1, -1, itSymbol), + expressionId.irConstImpl(), + receiverId.irConstImpl(), + data.clazz?.fqNameWhenAvailable?.asString().irConstImpl(), + data.function?.name?.asString().irConstImpl(), + IrConstImpl.int(-1, -1, pluginContext.irBuiltIns.intType, data.statementIndex), + ) body = pluginContext.irFactory.createBlockBody(-1, -1).apply { val callableId = CallableId( explainerPackage, @@ -249,7 +262,6 @@ class ExplainerIrTransformer(val pluginContext: IrPluginContext) : type = doAction.owner.returnType, symbol = doAction, typeArgumentsCount = 0, - valueArgumentsCount = valueArguments.size, ).apply { val clazz = ClassId(explainerPackage, Name.identifier("PluginCallbackProxy")) val plugin = pluginContext.referenceClass(clazz)!! @@ -274,7 +286,7 @@ class ExplainerIrTransformer(val pluginContext: IrPluginContext) : return result } - private fun String?.irConstImpl(): IrConstImpl { + private fun String?.irConstImpl(): IrConstImpl { val nullableString = pluginContext.irBuiltIns.stringType.makeNullable() val argument = if (this == null) { IrConstImpl.constNull(-1, -1, nullableString) diff --git a/plugins/keywords-generator/gradle.properties b/plugins/keywords-generator/gradle.properties index 7eade7a0da..ac43b4291f 100644 --- a/plugins/keywords-generator/gradle.properties +++ b/plugins/keywords-generator/gradle.properties @@ -1,2 +1,2 @@ -kotlinCompilerVersion=2.0.20 -kotlinPoetVersion=2.0.0 +kotlinCompilerVersion=2.1.0 +kotlinPoetVersion=2.1.0 diff --git a/plugins/keywords-generator/settings.gradle.kts b/plugins/keywords-generator/settings.gradle.kts index e69de29bb2..f579624716 100644 --- a/plugins/keywords-generator/settings.gradle.kts +++ b/plugins/keywords-generator/settings.gradle.kts @@ -0,0 +1,3 @@ +//enableFeaturePreview("TYPESAFE_PROJECT_ACCESSORS") + +rootProject.name = "keywords-generator" diff --git a/plugins/kotlin-dataframe/build.gradle.kts b/plugins/kotlin-dataframe/build.gradle.kts index 4a78ff10d4..cdab139dc3 100644 --- a/plugins/kotlin-dataframe/build.gradle.kts +++ b/plugins/kotlin-dataframe/build.gradle.kts @@ -98,7 +98,7 @@ tasks.compileTestKotlin { } } -tasks.create("generateTests") { +tasks.register("generateTests") { classpath = sourceSets.test.get().runtimeClasspath mainClass = "org.jetbrains.kotlin.fir.dataframe.GenerateTestsKt" } diff --git a/plugins/symbol-processor/src/test/kotlin/org/jetbrains/dataframe/ksp/runner/KspCompilationTestRunner.kt b/plugins/symbol-processor/src/test/kotlin/org/jetbrains/dataframe/ksp/runner/KspCompilationTestRunner.kt index ed673e2d18..634c040123 100644 --- a/plugins/symbol-processor/src/test/kotlin/org/jetbrains/dataframe/ksp/runner/KspCompilationTestRunner.kt +++ b/plugins/symbol-processor/src/test/kotlin/org/jetbrains/dataframe/ksp/runner/KspCompilationTestRunner.kt @@ -2,9 +2,10 @@ package org.jetbrains.dataframe.ksp.runner +import com.tschuchort.compiletesting.JvmCompilationResult import com.tschuchort.compiletesting.KotlinCompilation import com.tschuchort.compiletesting.SourceFile -import com.tschuchort.compiletesting.kspArgs +import com.tschuchort.compiletesting.kspProcessorOptions import com.tschuchort.compiletesting.kspSourcesDir import com.tschuchort.compiletesting.symbolProcessorProviders import org.jetbrains.dataframe.ksp.DataFrameSymbolProcessorProvider @@ -15,7 +16,7 @@ import java.nio.file.Paths @Suppress("unused") internal class KotlinCompileTestingCompilationResult( - val delegate: KotlinCompilation.Result, + val delegate: JvmCompilationResult, val successfulCompilation: Boolean, val kspGeneratedFiles: List, val outputSourceDirs: List, @@ -43,8 +44,8 @@ internal object KspCompilationTestRunner { classpaths = params.classpath, tempDir = compilationDir, ) - kspCompilation.kspArgs.putAll(params.options) - kspCompilation.symbolProcessorProviders = listOf(DataFrameSymbolProcessorProvider()) + kspCompilation.kspProcessorOptions.putAll(params.options) + kspCompilation.symbolProcessorProviders = mutableListOf(DataFrameSymbolProcessorProvider()) kspCompilation.compile().also { println(it.messages) if (it.exitCode == KotlinCompilation.ExitCode.COMPILATION_ERROR) { diff --git a/settings.gradle.kts b/settings.gradle.kts index bade37d6ac..e69bca03f3 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -31,7 +31,7 @@ val jupyterApiTCRepo: String by settings dependencyResolutionManagement { repositories { mavenCentral() - maven(jupyterApiTCRepo) + if (jupyterApiTCRepo.isNotBlank()) maven(jupyterApiTCRepo) } }