diff --git a/CHANGELOG.md b/CHANGELOG.md index c8fc5263..78419f6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://www.npmjs.com/package/@google-cloud/bigquery?activeTab=versions +## [7.9.3](https://github.com/googleapis/nodejs-bigquery/compare/v7.9.2...v7.9.3) (2025-03-17) + + +### Bug Fixes + +* Make sure to pass selectedFields to tabledata.list method ([#1449](https://github.com/googleapis/nodejs-bigquery/issues/1449)) ([206aff9](https://github.com/googleapis/nodejs-bigquery/commit/206aff93d3d3520199388fc31314fa7ec221cee8)) + ## [7.9.2](https://github.com/googleapis/nodejs-bigquery/compare/v7.9.1...v7.9.2) (2025-02-12) diff --git a/package.json b/package.json index 9a7ba93d..c04ee8f0 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@google-cloud/bigquery", "description": "Google BigQuery Client Library for Node.js", - "version": "7.9.2", + "version": "7.9.3", "license": "Apache-2.0", "author": "Google LLC", "engines": { @@ -51,7 +51,7 @@ "@google-cloud/common": "^5.0.0", "@google-cloud/paginator": "^5.0.2", "@google-cloud/precise-date": "^4.0.0", - "@google-cloud/promisify": "^4.0.0", + "@google-cloud/promisify": "4.0.0", "arrify": "^2.0.1", "big.js": "^6.0.0", "duplexify": "^4.0.0", diff --git a/samples/package.json b/samples/package.json index 9fe44b62..d8f826b3 100644 --- a/samples/package.json +++ b/samples/package.json @@ -17,7 +17,7 @@ "fix": "gts fix" }, "dependencies": { - "@google-cloud/bigquery": "^7.9.2", + "@google-cloud/bigquery": "^7.9.3", "@google-cloud/storage": "^7.0.0", "google-auth-library": "^9.6.0", "readline-promise": "^1.0.4", diff --git a/src/bigquery.ts b/src/bigquery.ts index 392a069c..5d9ebfb4 100644 --- a/src/bigquery.ts +++ b/src/bigquery.ts @@ -588,14 +588,18 @@ export class BigQuery extends Service { parseJSON?: boolean; } ) { - // copy schema fields to avoid mutation when filtering selected fields - let schemaFields = schema.fields ? [...schema.fields] : []; + // deep copy schema fields to avoid mutation + let schemaFields: TableField[] = extend(true, [], schema.fields); + let selectedFields: string[] = extend(true, [], options.selectedFields); if (options.selectedFields && options.selectedFields!.length > 0) { const selectedFieldsArray = options.selectedFields!.map(c => { return c.split('.'); }); - const currentFields = selectedFieldsArray.map(c => c.shift()); + const currentFields = selectedFieldsArray + .map(c => c.shift()) + .filter(c => c !== undefined); + //filter schema fields based on selected fields. schemaFields = schemaFields.filter( field => @@ -603,7 +607,7 @@ export class BigQuery extends Service { .map(c => c!.toLowerCase()) .indexOf(field.name!.toLowerCase()) >= 0 ); - options.selectedFields = selectedFieldsArray + selectedFields = selectedFieldsArray .filter(c => c.length > 0) .map(c => c.join('.')); } @@ -614,12 +618,18 @@ export class BigQuery extends Service { return row.f!.map((field: TableRowField, index: number) => { const schemaField = schemaFields[index]; let value = field.v; - if (schemaField.mode === 'REPEATED') { + if (schemaField && schemaField.mode === 'REPEATED') { value = (value as TableRowField[]).map(val => { - return convertSchemaFieldValue(schemaField, val.v, options); + return convertSchemaFieldValue(schemaField, val.v, { + ...options, + selectedFields, + }); }); } else { - value = convertSchemaFieldValue(schemaField, value, options); + value = convertSchemaFieldValue(schemaField, value, { + ...options, + selectedFields, + }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any const fieldObject: any = {}; diff --git a/src/table.ts b/src/table.ts index c9f5ffb9..5af4f7ba 100644 --- a/src/table.ts +++ b/src/table.ts @@ -1833,7 +1833,6 @@ class Table extends ServiceObject { const selectedFields = options.selectedFields ? options.selectedFields.split(',') : []; - delete options.selectedFields; const onComplete = ( err: Error | null, rows: TableRow[] | null, diff --git a/src/types.d.ts b/src/types.d.ts index e20cc0a0..4dd05b41 100644 --- a/src/types.d.ts +++ b/src/types.d.ts @@ -13,7 +13,7 @@ // limitations under the License. /** - * Discovery Revision: 20250128 + * Discovery Revision: 20250216 */ /** @@ -49,7 +49,7 @@ declare namespace bigquery { */ rocAuc?: number; /** - * Threshold at which the metrics are computed. For binary classification models this is the positive class threshold. For multi-class classfication models this is the confidence threshold. + * Threshold at which the metrics are computed. For binary classification models this is the positive class threshold. For multi-class classification models this is the confidence threshold. */ threshold?: number; }; @@ -1596,7 +1596,15 @@ declare namespace bigquery { */ csvOptions?: ICsvOptions; /** - * Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. + * Optional. Format used to parse DATE values. Supports C-style and SQL-style values. + */ + dateFormat?: string; + /** + * Optional. Format used to parse DATETIME values. Supports C-style and SQL-style values. + */ + datetimeFormat?: string; + /** + * Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exceeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */ decimalTargetTypes?: Array< 'DECIMAL_TARGET_TYPE_UNSPECIFIED' | 'NUMERIC' | 'BIGNUMERIC' | 'STRING' @@ -1662,6 +1670,18 @@ declare namespace bigquery { * [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the '*' wildcard character is not allowed. */ sourceUris?: Array; + /** + * Optional. Format used to parse TIME values. Supports C-style and SQL-style values. + */ + timeFormat?: string; + /** + * Optional. Time zone used when parsing timestamp values that do not have specific time zone information (e.g. 2024-04-20 12:34:56). The expected format is a IANA timezone string (e.g. America/Los_Angeles). + */ + timeZone?: string; + /** + * Optional. Format used to parse TIMESTAMP values. Supports C-style and SQL-style values. + */ + timestampFormat?: string; }; /** @@ -2389,7 +2409,15 @@ declare namespace bigquery { */ createSession?: boolean; /** - * Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. + * Optional. Date format used for parsing DATE values. + */ + dateFormat?: string; + /** + * Optional. Date format used for parsing DATETIME values. + */ + datetimeFormat?: string; + /** + * Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exceeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */ decimalTargetTypes?: Array< 'DECIMAL_TARGET_TYPE_UNSPECIFIED' | 'NUMERIC' | 'BIGNUMERIC' | 'STRING' @@ -2492,10 +2520,22 @@ declare namespace bigquery { * [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed. */ sourceUris?: Array; + /** + * Optional. Date format used for parsing TIME values. + */ + timeFormat?: string; /** * Time-based partitioning specification for the destination table. Only one of timePartitioning and rangePartitioning should be specified. */ timePartitioning?: ITimePartitioning; + /** + * Optional. [Experimental] Default time zone that will apply when parsing timestamp values that have no specific time zone. + */ + timeZone?: string; + /** + * Optional. Date format used for parsing TIMESTAMP values. + */ + timestampFormat?: string; /** * Optional. If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). */ @@ -3369,7 +3409,7 @@ declare namespace bigquery { }; /** - * Statistics for metadata caching in BigLake tables. + * Statistics for metadata caching in queried tables. */ type IMetadataCacheStatistics = { /** @@ -3893,6 +3933,10 @@ declare namespace bigquery { * Optional. Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'. */ defaultDataset?: IDatasetReference; + /** + * Optional. Custom encryption configuration (e.g., Cloud KMS keys) + */ + destinationEncryptionConfiguration?: IEncryptionConfiguration; /** * Optional. If set to true, BigQuery doesn't run the job. Instead, if the query is valid, BigQuery returns statistics about the job such as how many bytes would be processed. If the query is invalid, an error returns. The default value is false. */ @@ -3908,6 +3952,10 @@ declare namespace bigquery { | 'JOB_CREATION_MODE_UNSPECIFIED' | 'JOB_CREATION_REQUIRED' | 'JOB_CREATION_OPTIONAL'; + /** + * Optional. Job timeout in milliseconds. If this time limit is exceeded, BigQuery will attempt to stop a longer job, but may not always succeed in canceling it before the job completes. For example, a job that takes more than 60 seconds to complete has a better chance of being stopped than a job that takes 10 seconds to complete. This timeout applies to the query even if a job does not need to be created. + */ + jobTimeoutMs?: string; /** * The resource type of the request. */ @@ -5381,7 +5429,7 @@ declare namespace bigquery { */ view?: { /** - * Specifices the privacy policy for the view. + * Specifies the privacy policy for the view. */ privacyPolicy?: IPrivacyPolicy; /** @@ -6232,7 +6280,7 @@ declare namespace bigquery { */ foreignDefinitions?: Array; /** - * Optional. Specifices the privacy policy for the view. + * Optional. Specifies the privacy policy for the view. */ privacyPolicy?: IPrivacyPolicy; /** @@ -6297,7 +6345,7 @@ declare namespace bigquery { */ all?: boolean; /** - * An expression for filtering the results of the request by label. The syntax is `labels.[:]`. Multiple filters can be ANDed together by connecting with a space. Example: `labels.department:receiving labels.active`. See [Filtering datasets using labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for details. + * An expression for filtering the results of the request by label. The syntax is `labels.[:]`. Multiple filters can be AND-ed together by connecting with a space. Example: `labels.department:receiving labels.active`. See [Filtering datasets using labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) for details. */ filter?: string; /** diff --git a/test/table.ts b/test/table.ts index 577a1ae3..5394f006 100644 --- a/test/table.ts +++ b/test/table.ts @@ -2179,6 +2179,7 @@ describe('BigQuery/Table', () => { sandbox.restore(); table.request = (reqOpts: DecorateRequestOptions, callback: Function) => { + assert(reqOpts.qs.selectedFields, selectedFields); callback(null, {rows}); }; @@ -2190,15 +2191,38 @@ describe('BigQuery/Table', () => { }); it('should return selected fields after consecutive calls', done => { + const buildNestedObject = (value: Record) => { + return [ + { + v: { + f: [ + { + v: { + f: Object.values(value).map(v => ({v})), + }, + }, + ], + }, + }, + ]; + }; const callSequence = [ { - selectedFields: ['age'], - rows: [{f: [{v: 40}]}], - expected: [{age: 40}], + selectedFields: ['age', 'nested.object.a'], + rows: [ + { + f: [{v: 40}, {v: buildNestedObject({a: '1'})}], + }, + ], + expected: [{age: 40, nested: [{object: {a: '1'}}]}], }, { selectedFields: ['name', 'address'], - rows: [{f: [{v: 'John'}, {v: '1234 Fake St, Springfield'}]}], + rows: [ + { + f: [{v: 'John'}, {v: '1234 Fake St, Springfield'}], + }, + ], expected: [{name: 'John', address: '1234 Fake St, Springfield'}], }, { @@ -2212,6 +2236,27 @@ describe('BigQuery/Table', () => { {name: 'name', type: 'string'}, {name: 'age', type: 'INTEGER'}, {name: 'address', type: 'string'}, + { + name: 'nested', + type: 'RECORD', + mode: 'REPEATED', + fields: [ + { + name: 'object', + type: 'RECORD', + fields: [ + { + name: 'a', + type: 'STRING', + }, + { + name: 'b', + type: 'STRING', + }, + ], + }, + ], + }, ], };