diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 2041f92e9c5..5d7ebc676e1 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -13,5 +13,5 @@
# limitations under the License.
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-nodejs-mono-repo:latest
- digest: sha256:94d55995d53fb47e6e265a2e02bfdb778ccaaef4f3618a0ea881b0ee559e88bf
+ digest: sha256:2e454636c6197216df757d53b0f865c1c7dcf57c92c489a612375bfca981ee81
# created: 2026-02-19T18:08:41.831765454Z
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index ae7db338588..c56b20068ee 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -9,3 +9,5 @@
* @googleapis/cloud-sdk-nodejs-team
/handwritten/bigquery @googleapis/bigquery-team
/handwritten/cloud-profiler @googleapis/cloud-profiler-team
+/handwritten/spanner @googleapis/spanner-team
+/handwritten/storage @googleapis/gcs-team
diff --git a/.github/workflows/conformance-test.yaml b/.github/workflows/conformance-test.yaml
new file mode 100644
index 00000000000..845d280462d
--- /dev/null
+++ b/.github/workflows/conformance-test.yaml
@@ -0,0 +1,21 @@
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'handwritten/storage/**'
+ pull_request:
+ paths:
+ - 'handwritten/storage/**'
+name: conformance
+jobs:
+ conformance-test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 14
+ - run: node --version
+ - run: cd handwritten/storage && npm install
+ - run: cd handwritten/storage && npm run conformance-test
diff --git a/.github/workflows/system-tests-against-emulator.yaml b/.github/workflows/system-tests-against-emulator.yaml
new file mode 100644
index 00000000000..01551f4688c
--- /dev/null
+++ b/.github/workflows/system-tests-against-emulator.yaml
@@ -0,0 +1,28 @@
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+name: system-tests-against-emulator
+jobs:
+ test:
+ runs-on: ubuntu-latest
+
+ services:
+ emulator:
+ image: gcr.io/cloud-spanner-emulator/emulator:latest
+ ports:
+ - 9010:9010
+ - 9020:9020
+
+ steps:
+ - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
+ - uses: actions/setup-node@v5
+ with:
+ node-version: 22
+ - run: node --version
+ - run: npm install
+ - run: npm run system-test
+ env:
+ SPANNER_EMULATOR_HOST: localhost:9010
+ GCLOUD_PROJECT: emulator-test-project
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 9afa1ee8000..e02e4bb0f08 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,12 +1,14 @@
{
- "handwritten/datastore": "10.1.0",
- "handwritten/logging-winston": "6.0.1",
"handwritten/bigquery": "8.2.0",
"handwritten/cloud-profiler": "6.0.4",
+ "handwritten/datastore": "10.1.0",
"handwritten/logging-bunyan": "5.1.1",
- "packages/gapic-node-processing": "0.1.6",
+ "handwritten/logging-winston": "6.0.1",
+ "handwritten/spanner": "8.6.0",
+ "handwritten/storage": "7.19.0",
+ "packages/gapic-node-processing": "0.1.7",
"packages/google-ads-admanager": "0.5.0",
- "packages/google-ads-datamanager": "0.1.0",
+ "packages/google-ads-datamanager": "0.2.0",
"packages/google-ai-generativelanguage": "3.7.0",
"packages/google-analytics-admin": "9.0.1",
"packages/google-analytics-data": "5.2.1",
@@ -22,7 +24,7 @@
"packages/google-chat": "0.23.0",
"packages/google-cloud-accessapproval": "4.2.1",
"packages/google-cloud-advisorynotifications": "2.2.1",
- "packages/google-cloud-aiplatform": "6.4.0",
+ "packages/google-cloud-aiplatform": "6.5.0",
"packages/google-cloud-alloydb": "2.4.0",
"packages/google-cloud-apigateway": "4.2.1",
"packages/google-cloud-apigeeconnect": "4.2.1",
@@ -59,7 +61,7 @@
"packages/google-cloud-clouddms": "4.1.1",
"packages/google-cloud-cloudsecuritycompliance": "0.2.0",
"packages/google-cloud-commerce-consumer-procurement": "0.7.1",
- "packages/google-cloud-compute": "6.7.0",
+ "packages/google-cloud-compute": "6.8.0",
"packages/google-cloud-confidentialcomputing": "2.2.2",
"packages/google-cloud-config": "0.11.1",
"packages/google-cloud-configdelivery": "0.1.1",
@@ -76,11 +78,11 @@
"packages/google-cloud-dataqna": "4.1.1",
"packages/google-cloud-datastream": "4.3.1",
"packages/google-cloud-deploy": "5.2.1",
- "packages/google-cloud-developerconnect": "0.6.1",
+ "packages/google-cloud-developerconnect": "0.7.0",
"packages/google-cloud-devicestreaming": "0.2.1",
"packages/google-cloud-dialogflow": "7.5.0",
"packages/google-cloud-dialogflow-cx": "5.7.0",
- "packages/google-cloud-discoveryengine": "2.5.2",
+ "packages/google-cloud-discoveryengine": "2.5.3",
"packages/google-cloud-dns": "5.3.1",
"packages/google-cloud-documentai": "9.5.0",
"packages/google-cloud-domains": "4.2.1",
@@ -105,7 +107,7 @@
"packages/google-cloud-ids": "4.2.1",
"packages/google-cloud-iot": "5.2.1",
"packages/google-cloud-kms": "5.4.0",
- "packages/google-cloud-kms-inventory": "2.4.0",
+ "packages/google-cloud-kms-inventory": "2.5.0",
"packages/google-cloud-language": "7.2.1",
"packages/google-cloud-licensemanager": "0.1.1",
"packages/google-cloud-lifesciences": "4.2.1",
@@ -124,7 +126,7 @@
"packages/google-cloud-monitoring": "5.3.1",
"packages/google-cloud-netapp": "0.16.0",
"packages/google-cloud-networkconnectivity": "4.6.0",
- "packages/google-cloud-networkmanagement": "5.2.0",
+ "packages/google-cloud-networkmanagement": "5.3.0",
"packages/google-cloud-networksecurity": "3.3.1",
"packages/google-cloud-networkservices": "0.12.0",
"packages/google-cloud-notebooks": "4.2.1",
@@ -160,9 +162,9 @@
"packages/google-cloud-servicedirectory": "6.1.1",
"packages/google-cloud-servicehealth": "0.7.1",
"packages/google-cloud-shell": "4.1.1",
- "packages/google-cloud-speech": "7.2.1",
+ "packages/google-cloud-speech": "7.3.0",
"packages/google-cloud-sql": "0.24.0",
- "packages/google-cloud-storagebatchoperations": "0.2.0",
+ "packages/google-cloud-storagebatchoperations": "0.3.0",
"packages/google-cloud-storageinsights": "2.2.1",
"packages/google-cloud-support": "2.2.1",
"packages/google-cloud-talent": "7.1.1",
@@ -171,7 +173,7 @@
"packages/google-cloud-texttospeech": "6.4.0",
"packages/google-cloud-tpu": "4.1.1",
"packages/google-cloud-translate": "9.3.0",
- "packages/google-cloud-vectorsearch": "0.2.0",
+ "packages/google-cloud-vectorsearch": "0.3.0",
"packages/google-cloud-video-livestream": "2.2.1",
"packages/google-cloud-video-stitcher": "3.2.1",
"packages/google-cloud-video-transcoder": "4.4.1",
@@ -182,7 +184,7 @@
"packages/google-cloud-vpcaccess": "4.2.1",
"packages/google-cloud-webrisk": "5.3.1",
"packages/google-cloud-websecurityscanner": "4.2.1",
- "packages/google-cloud-workflows": "5.0.0",
+ "packages/google-cloud-workflows": "5.1.0",
"packages/google-cloud-workstations": "2.2.1",
"packages/google-container": "6.7.0",
"packages/google-dataflow": "4.1.1",
@@ -223,5 +225,8 @@
"packages/google-storagetransfer": "4.2.1",
"packages/google-streetview-publish": "0.4.1",
"packages/grafeas": "6.1.1",
- "packages/typeless-sample-bot": "3.1.1"
+ "packages/typeless-sample-bot": "3.1.1",
+ "packages/google-cloud-visionai": "0.1.0",
+ "packages/google-cloud-workloadmanager": "0.1.0",
+ "packages/google-cloud-ces": "0.1.0"
}
diff --git a/changelog.json b/changelog.json
index fe7b26aa31e..55e7506dc8d 100644
--- a/changelog.json
+++ b/changelog.json
@@ -1,6 +1,328 @@
{
"repository": "googleapis/google-cloud-node",
"entries": [
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "77042f508c0e4d03569ef4eae7e0dd8f235cd469",
+ "message": "[ces] add public libraries for CES v1beta",
+ "issues": [
+ "7285"
+ ]
+ },
+ {
+ "type": "feat",
+ "sha": "9146f2714244d2e7c50220ccb4380b738acab857",
+ "message": "Add initial files for google.cloud.ces.v1",
+ "issues": [
+ "7241"
+ ]
+ },
+ {
+ "type": "fix",
+ "sha": "41c2ff2851b5fdadabf4f9bd3500167c34b32ff7",
+ "message": "[gkeconnect-gateway] remove unused GatewayServiceClient",
+ "issues": [
+ "6775"
+ ]
+ }
+ ],
+ "version": "0.1.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/ces",
+ "id": "82a005b5-f13a-4f62-b6e3-6e88408f9ae1",
+ "createTime": "2026-03-02T16:41:40.032Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "8a30212142e5403294368da827cb33d3b167209d",
+ "message": "Add initial files for google.cloud.workloadmanager.v1",
+ "issues": [
+ "7265"
+ ]
+ },
+ {
+ "type": "fix",
+ "sha": "41c2ff2851b5fdadabf4f9bd3500167c34b32ff7",
+ "message": "[gkeconnect-gateway] remove unused GatewayServiceClient",
+ "issues": [
+ "6775"
+ ]
+ }
+ ],
+ "version": "0.1.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/workloadmanager",
+ "id": "69c8f974-5f6f-4363-b9d7-8db7adc29259",
+ "createTime": "2026-03-02T16:41:40.001Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "1cc4a84c3cab7894d8877488dcd7454aef62a8a2",
+ "message": "Add initial files for google.cloud.visionai.v1",
+ "issues": [
+ "7275"
+ ]
+ },
+ {
+ "type": "fix",
+ "sha": "41c2ff2851b5fdadabf4f9bd3500167c34b32ff7",
+ "message": "[gkeconnect-gateway] remove unused GatewayServiceClient",
+ "issues": [
+ "6775"
+ ]
+ }
+ ],
+ "version": "0.1.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/visionai",
+ "id": "2b2a2e09-ed6f-4239-8005-a9309c8964db",
+ "createTime": "2026-03-02T16:41:39.968Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "faf79df319ddd7d732413aa0b745160bfb815198",
+ "message": "Expand workflows to receive both workflows and executions",
+ "issues": [
+ "7271"
+ ]
+ },
+ {
+ "type": "fix",
+ "sha": "41c2ff2851b5fdadabf4f9bd3500167c34b32ff7",
+ "message": "[gkeconnect-gateway] remove unused GatewayServiceClient",
+ "issues": [
+ "6775"
+ ]
+ }
+ ],
+ "version": "5.1.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/workflows",
+ "id": "3eba0a4f-2744-4eb9-8d9c-ad4d2b45ab98",
+ "createTime": "2026-03-02T16:41:39.937Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "0faaf8cf11c10c807fad495758c33e601589cf4c",
+ "message": "[vectorsearch] introduce new v1 vectorsearch surface and make it default version",
+ "issues": [
+ "7273"
+ ],
+ "breakingChangeNote": "[vectorsearch] introduce new v1 vectorsearch surface and make it default version ([#7273](https://github.com/googleapis/google-cloud-node/issues/7273))"
+ }
+ ],
+ "version": "0.3.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/vectorsearch",
+ "id": "8d909ea4-d3b1-4642-b846-3bb70801ad20",
+ "createTime": "2026-03-02T16:41:39.898Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "4b8da3eefef2c4af0f39f186b4d60257fa146eae",
+ "message": "[storagebatchoperations] add new transformation `update_object_custom_context`",
+ "issues": [
+ "7309"
+ ]
+ }
+ ],
+ "version": "0.3.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/storagebatchoperations",
+ "id": "d5ce6e2d-882a-4987-bbff-bae0c22ef452",
+ "createTime": "2026-03-02T16:41:39.867Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "4373c581d895ad3785f1b3493bda83def6a0024c",
+ "message": "Adds endpointing sensitivity to streaming recognition features",
+ "issues": [
+ "7270"
+ ]
+ }
+ ],
+ "version": "7.3.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/speech",
+ "id": "4524053f-fd26-4b60-ae8c-df2b6f8bf957",
+ "createTime": "2026-03-02T16:41:39.833Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "d6fb4665242b13e9c8b29ae3df6696155076297d",
+ "message": "[networkmanagement] Add API fields related to GKE Pods and Network Policies",
+ "issues": [
+ "7292"
+ ]
+ }
+ ],
+ "version": "5.3.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/network-management",
+ "id": "f87378b8-a21d-4633-853c-e0ce5b6e78af",
+ "createTime": "2026-03-02T16:41:39.799Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "34a365988184038c96de95cf0d8bf4bb5d8a8ccf",
+ "message": "[inventory] Add support for project level key usage tracking",
+ "issues": []
+ }
+ ],
+ "version": "2.5.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/kms-inventory",
+ "id": "54be806f-3d7a-449d-b521-33ec30ba360e",
+ "createTime": "2026-03-02T16:41:39.764Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "fix",
+ "sha": "a28d23fde5441c969b380dbd0dff0dc48836c01e",
+ "message": "Enable google-cloud-discoveryengine tests to run on windows",
+ "issues": []
+ },
+ {
+ "type": "fix",
+ "sha": "f9b1ee9a92eb12f57a56db1bf591d3ec3afb4864",
+ "message": "Try an alternative bash script for windows compatibility without an additional dependency",
+ "issues": []
+ },
+ {
+ "type": "fix",
+ "sha": "23b81e4ae4c2dbe26f3731df4bd6cac9c687753b",
+ "message": "Enable google-cloud-discoveryengine tests to run on windows",
+ "issues": []
+ }
+ ],
+ "version": "2.5.3",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/discoveryengine",
+ "id": "394651a4-0bfd-4335-b80e-b65dba1fef35",
+ "createTime": "2026-03-02T16:41:39.736Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "b23e571f96992f373cc6c413d026f322e8e58176",
+ "message": "[developerconnect] Add Secure Source Manager and Generic HTTP Endpoint connection types",
+ "issues": [
+ "7284"
+ ]
+ }
+ ],
+ "version": "0.7.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/developerconnect",
+ "id": "300f53ce-1df2-4071-9efd-2dba3822b202",
+ "createTime": "2026-03-02T16:41:39.704Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "1bacdf222c3995c0b3e44d972c952645367e3fa0",
+ "message": "[compute] Update Compute Engine v1beta API to revision 20260213",
+ "issues": [
+ "7296"
+ ]
+ },
+ {
+ "type": "feat",
+ "sha": "2d1a22c828cd8dfecb2083d3bc270210a79a41e3",
+ "message": "[compute] Update Compute Engine v1beta API to revision 20260106 (#1147)",
+ "issues": [
+ "7272"
+ ]
+ }
+ ],
+ "version": "6.8.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/compute",
+ "id": "974dfa02-4a93-42e2-9fcd-8cb33113b9db",
+ "createTime": "2026-03-02T16:41:39.673Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "9f91e0513bf7a60239aef5e4222993184c63fbb2",
+ "message": "Add `traffic_type, tool_use_prompt_tokens_details` to message `GenerateContentResponse.UsageMetadata`",
+ "issues": [
+ "7266"
+ ]
+ }
+ ],
+ "version": "6.5.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-cloud/aiplatform",
+ "id": "b753f790-11a4-4fc1-a364-393a85e1e90c",
+ "createTime": "2026-03-02T16:41:39.642Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "feat",
+ "sha": "32115d95be2fc2e06da22aa5101c94ddb3bf82db",
+ "message": "[datamanager] add `UserListService` for creating and managing user lists",
+ "issues": [
+ "7290"
+ ]
+ }
+ ],
+ "version": "0.2.0",
+ "language": "JAVASCRIPT",
+ "artifactName": "@google-ads/datamanager",
+ "id": "9b0bb1b0-9564-4ab3-81d3-cd76c26a94b2",
+ "createTime": "2026-03-02T16:41:39.608Z"
+ },
+ {
+ "changes": [
+ {
+ "type": "fix",
+ "sha": "f1456f9e8bdd5c2b2d579d6a451036d98b8ee26e",
+ "message": "Bug preventing apiPath generation when apiId includes letter v",
+ "issues": []
+ },
+ {
+ "type": "fix",
+ "sha": "6d705f413ead1e2adfe3dc1d703d4637ab36b6d9",
+ "message": "Bug preventing apiPath generation when apiId includes letter v",
+ "issues": []
+ },
+ {
+ "type": "fix",
+ "sha": "76baebbdc05758953af5550b71f3144a7eafbfb4",
+ "message": "Bug preventing apiPath generation when apiId includes letter v",
+ "issues": []
+ }
+ ],
+ "version": "0.1.7",
+ "language": "JAVASCRIPT",
+ "artifactName": "gapic-node-processing",
+ "id": "0bce40fb-fc26-4fef-ba28-8f1c2c90521c",
+ "createTime": "2026-03-02T16:41:39.566Z"
+ },
{
"changes": [
{
@@ -68363,5 +68685,5 @@
"createTime": "2023-01-28T04:18:24.718Z"
}
],
- "updateTime": "2026-02-19T13:03:03.636Z"
+ "updateTime": "2026-03-02T16:41:40.032Z"
}
\ No newline at end of file
diff --git a/ci/run_conditional_tests.sh b/ci/run_conditional_tests.sh
index 1dc4fb72458..573a74e2c04 100755
--- a/ci/run_conditional_tests.sh
+++ b/ci/run_conditional_tests.sh
@@ -88,7 +88,7 @@ tests_with_credentials="packages/google-analytics-admin/ packages/google-area120
# on Windows due to incompatible npm scripts.
#
# Until these packages can be updated to be OS agnostic, we will skip them on Windows.
-windows_exempt_tests=".github/scripts/fixtures/ .github/scripts/tests/ packages/gapic-node-processing/ packages/google-cloud-discoveryengine/ packages/typeless-sample-bot/"
+windows_exempt_tests=".github/scripts/fixtures/ .github/scripts/tests/ packages/gapic-node-processing/ packages/typeless-sample-bot/"
for subdir in ${subdirs[@]}; do
for d in `ls -d ${subdir}/*/`; do
diff --git a/handwritten/spanner/.OwlBot.yaml b/handwritten/spanner/.OwlBot.yaml
new file mode 100644
index 00000000000..0e4f41709ed
--- /dev/null
+++ b/handwritten/spanner/.OwlBot.yaml
@@ -0,0 +1,30 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+deep-remove-regex:
+ - /owl-bot-staging
+
+deep-copy-regex:
+ - source: /google/spanner/(v.*)/.*-nodejs
+ dest: /owl-bot-staging/spanner/$1
+ - source: /google/spanner/(admin/database/v.*)/.*-nodejs
+ dest: /owl-bot-staging/spanner/$1
+ - source: /google/spanner/(admin/instance/v.*)/.*-nodejs
+ dest: /owl-bot-staging/spanner/$1
+ - source: /google/spanner/(executor/v.*)/.*-nodejs
+ dest: /owl-bot-staging/spanner/$1
+
+begin-after-commit-hash: 46f25fb1121747b994ff5818963fda84b5e6bfd3
+
diff --git a/handwritten/spanner/.devcontainer/Dockerfile b/handwritten/spanner/.devcontainer/Dockerfile
new file mode 100644
index 00000000000..a90e0fbda36
--- /dev/null
+++ b/handwritten/spanner/.devcontainer/Dockerfile
@@ -0,0 +1,16 @@
+ARG VARIANT="18"
+FROM mcr.microsoft.com/devcontainers/typescript-node:${VARIANT}
+
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive
+
+RUN type -p curl >/dev/null || (apt-get install curl -y)
+
+# install gh
+RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
+&& chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
+&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
+&& apt-get update \
+&& apt-get install gh -y
+
+# install gloud sdk
+RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && apt-get update -y && apt-get install google-cloud-cli -y
\ No newline at end of file
diff --git a/handwritten/spanner/.devcontainer/devcontainer.json b/handwritten/spanner/.devcontainer/devcontainer.json
new file mode 100644
index 00000000000..138324fdd2b
--- /dev/null
+++ b/handwritten/spanner/.devcontainer/devcontainer.json
@@ -0,0 +1,24 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the
+// README at: https://github.com/devcontainers/templates/tree/main/src/typescript-node
+{
+ "name": "Node.js & TypeScript",
+ "build": {
+ // Sets the run context to one level up instead of the .devcontainer folder.
+ "args": { "VARIANT": "18" },
+ // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
+ "dockerfile": "Dockerfile"
+ },
+
+ "postCreateCommand": "bash .devcontainer/postCreate.sh",
+
+ "customizations": {
+ "vscode": {
+ "settings": {
+ "debug.javascript.autoAttachFilter":"smart"
+ },
+ "extensions": [
+ "ms-azuretools.vscode-docker"
+ ]
+ }
+ }
+}
diff --git a/handwritten/spanner/.devcontainer/postCreate.sh b/handwritten/spanner/.devcontainer/postCreate.sh
new file mode 100644
index 00000000000..b627d596f0f
--- /dev/null
+++ b/handwritten/spanner/.devcontainer/postCreate.sh
@@ -0,0 +1,4 @@
+echo "Post Create Starting"
+
+npm install
+npm test
\ No newline at end of file
diff --git a/handwritten/spanner/.eslintignore b/handwritten/spanner/.eslintignore
new file mode 100644
index 00000000000..ea5b04aebe6
--- /dev/null
+++ b/handwritten/spanner/.eslintignore
@@ -0,0 +1,7 @@
+**/node_modules
+**/coverage
+test/fixtures
+build/
+docs/
+protos/
+samples/generated/
diff --git a/handwritten/spanner/.eslintrc.json b/handwritten/spanner/.eslintrc.json
new file mode 100644
index 00000000000..78215349546
--- /dev/null
+++ b/handwritten/spanner/.eslintrc.json
@@ -0,0 +1,3 @@
+{
+ "extends": "./node_modules/gts"
+}
diff --git a/handwritten/spanner/.gitattributes b/handwritten/spanner/.gitattributes
new file mode 100644
index 00000000000..33739cb74e4
--- /dev/null
+++ b/handwritten/spanner/.gitattributes
@@ -0,0 +1,4 @@
+*.ts text eol=lf
+*.js text eol=lf
+protos/* linguist-generated
+**/api-extractor.json linguist-language=JSON-with-Comments
diff --git a/handwritten/spanner/.gitignore b/handwritten/spanner/.gitignore
new file mode 100644
index 00000000000..d4f03a0df2e
--- /dev/null
+++ b/handwritten/spanner/.gitignore
@@ -0,0 +1,14 @@
+**/*.log
+**/node_modules
+/.coverage
+/coverage
+/.nyc_output
+/docs/
+/out/
+/build/
+system-test/secrets.js
+system-test/*key.json
+*.lock
+.DS_Store
+package-lock.json
+__pycache__
diff --git a/handwritten/spanner/.jsdoc.js b/handwritten/spanner/.jsdoc.js
new file mode 100644
index 00000000000..89dbb7cd9e2
--- /dev/null
+++ b/handwritten/spanner/.jsdoc.js
@@ -0,0 +1,55 @@
+// Copyright 2026 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ** This file is automatically generated by gapic-generator-typescript. **
+// ** https://github.com/googleapis/gapic-generator-typescript **
+// ** All changes to this file may be overwritten. **
+
+'use strict';
+
+module.exports = {
+ opts: {
+ readme: './README.md',
+ package: './package.json',
+ template: './node_modules/jsdoc-fresh',
+ recurse: true,
+ verbose: true,
+ destination: './docs/'
+ },
+ plugins: [
+ 'plugins/markdown',
+ 'jsdoc-region-tag'
+ ],
+ source: {
+ excludePattern: '(^|\\/|\\\\)[._]',
+ include: [
+ 'build/src',
+ 'protos'
+ ],
+ includePattern: '\\.js$'
+ },
+ templates: {
+ copyright: 'Copyright 2026 Google LLC',
+ includeDate: false,
+ sourceFiles: false,
+ systemName: '@google-cloud/spanner',
+ theme: 'lumen',
+ default: {
+ outputSourceFiles: false
+ }
+ },
+ markdown: {
+ idInHeadings: true
+ }
+};
diff --git a/handwritten/spanner/.kokoro/.gitattributes b/handwritten/spanner/.kokoro/.gitattributes
new file mode 100644
index 00000000000..87acd4f484e
--- /dev/null
+++ b/handwritten/spanner/.kokoro/.gitattributes
@@ -0,0 +1 @@
+* linguist-generated=true
diff --git a/handwritten/spanner/.kokoro/cleanup.sh b/handwritten/spanner/.kokoro/cleanup.sh
new file mode 100755
index 00000000000..cd841d81e5f
--- /dev/null
+++ b/handwritten/spanner/.kokoro/cleanup.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+export NPM_CONFIG_PREFIX=/home/node/.npm-global
+
+# Setup service account credentials.
+export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/spanner-key.json
+export GCLOUD_PROJECT=long-door-651
+
+cd $(dirname $0)/..
+
+npm install
+
+npm run cleanup
diff --git a/handwritten/spanner/.kokoro/common.cfg b/handwritten/spanner/.kokoro/common.cfg
new file mode 100644
index 00000000000..f115a8b7026
--- /dev/null
+++ b/handwritten/spanner/.kokoro/common.cfg
@@ -0,0 +1,24 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline_v2.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:18-user"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/test.sh"
+}
diff --git a/handwritten/spanner/.kokoro/continuous/node18/common.cfg b/handwritten/spanner/.kokoro/continuous/node18/common.cfg
new file mode 100644
index 00000000000..f115a8b7026
--- /dev/null
+++ b/handwritten/spanner/.kokoro/continuous/node18/common.cfg
@@ -0,0 +1,24 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline_v2.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:18-user"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/test.sh"
+}
diff --git a/handwritten/spanner/.kokoro/continuous/node18/lint.cfg b/handwritten/spanner/.kokoro/continuous/node18/lint.cfg
new file mode 100644
index 00000000000..8aeed86c220
--- /dev/null
+++ b/handwritten/spanner/.kokoro/continuous/node18/lint.cfg
@@ -0,0 +1,4 @@
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/lint.sh"
+}
diff --git a/handwritten/spanner/.kokoro/continuous/node18/samples-test.cfg b/handwritten/spanner/.kokoro/continuous/node18/samples-test.cfg
new file mode 100644
index 00000000000..3c4bbe887e3
--- /dev/null
+++ b/handwritten/spanner/.kokoro/continuous/node18/samples-test.cfg
@@ -0,0 +1,12 @@
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/samples-test.sh"
+}
+
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "long-door-651-kokoro-system-test-service-account"
+}
\ No newline at end of file
diff --git a/handwritten/spanner/.kokoro/continuous/node18/system-test.cfg b/handwritten/spanner/.kokoro/continuous/node18/system-test.cfg
new file mode 100644
index 00000000000..b50a1195edc
--- /dev/null
+++ b/handwritten/spanner/.kokoro/continuous/node18/system-test.cfg
@@ -0,0 +1,12 @@
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/system-test.sh"
+}
+
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "long-door-651-kokoro-system-test-service-account"
+}
\ No newline at end of file
diff --git a/handwritten/spanner/.kokoro/continuous/node18/test.cfg b/handwritten/spanner/.kokoro/continuous/node18/test.cfg
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/handwritten/spanner/.kokoro/docs.sh b/handwritten/spanner/.kokoro/docs.sh
new file mode 100755
index 00000000000..85901242b5e
--- /dev/null
+++ b/handwritten/spanner/.kokoro/docs.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+
+cd $(dirname $0)/..
+
+npm install
+
+npm run docs-test
diff --git a/handwritten/spanner/.kokoro/lint.sh b/handwritten/spanner/.kokoro/lint.sh
new file mode 100755
index 00000000000..c7ffa6438b0
--- /dev/null
+++ b/handwritten/spanner/.kokoro/lint.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+export PATH="${NPM_CONFIG_PREFIX}/bin:${PATH}"
+
+# Ensure the npm global directory is writable, otherwise rebuild `npm`
+mkdir -p ${NPM_CONFIG_PREFIX}/lib
+npm config -g ls || npm i -g npm@`npm --version`
+
+cd $(dirname $0)/..
+
+npm install
+
+# Install and link samples
+if [ -f samples/package.json ]; then
+ cd samples/
+ npm link ../
+ npm install
+ cd ..
+fi
+
+npm run lint
diff --git a/handwritten/spanner/.kokoro/populate-secrets.sh b/handwritten/spanner/.kokoro/populate-secrets.sh
new file mode 100755
index 00000000000..deb2b199eb4
--- /dev/null
+++ b/handwritten/spanner/.kokoro/populate-secrets.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+# Copyright 2020 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is called in the early stage of `trampoline_v2.sh` to
+# populate secrets needed for the CI builds.
+
+set -eo pipefail
+
+function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;}
+function msg { println "$*" >&2 ;}
+function println { printf '%s\n' "$(now) $*" ;}
+
+# Populates requested secrets set in SECRET_MANAGER_KEYS
+
+# In Kokoro CI builds, we use the service account attached to the
+# Kokoro VM. This means we need to setup auth on other CI systems.
+# For local run, we just use the gcloud command for retrieving the
+# secrets.
+
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ GCLOUD_COMMANDS=(
+ "docker"
+ "run"
+ "--entrypoint=gcloud"
+ "--volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR}"
+ "gcr.io/google.com/cloudsdktool/cloud-sdk"
+ )
+ if [[ "${TRAMPOLINE_CI:-}" == "kokoro" ]]; then
+ SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager"
+ else
+ echo "Authentication for this CI system is not implemented yet."
+ exit 2
+ # TODO: Determine appropriate SECRET_LOCATION and the GCLOUD_COMMANDS.
+ fi
+else
+ # For local run, use /dev/shm or temporary directory for
+ # KOKORO_GFILE_DIR.
+ if [[ -d "/dev/shm" ]]; then
+ export KOKORO_GFILE_DIR=/dev/shm
+ else
+ export KOKORO_GFILE_DIR=$(mktemp -d -t ci-XXXXXXXX)
+ fi
+ SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager"
+ GCLOUD_COMMANDS=("gcloud")
+fi
+
+msg "Creating folder on disk for secrets: ${SECRET_LOCATION}"
+mkdir -p ${SECRET_LOCATION}
+
+for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g")
+do
+ msg "Retrieving secret ${key}"
+ "${GCLOUD_COMMANDS[@]}" \
+ secrets versions access latest \
+ --project cloud-devrel-kokoro-resources \
+ --secret $key > \
+ "$SECRET_LOCATION/$key"
+ if [[ $? == 0 ]]; then
+ msg "Secret written to ${SECRET_LOCATION}/${key}"
+ else
+ msg "Error retrieving secret ${key}"
+ exit 2
+ fi
+done
diff --git a/handwritten/spanner/.kokoro/presubmit/node18/common.cfg b/handwritten/spanner/.kokoro/presubmit/node18/common.cfg
new file mode 100644
index 00000000000..f115a8b7026
--- /dev/null
+++ b/handwritten/spanner/.kokoro/presubmit/node18/common.cfg
@@ -0,0 +1,24 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline_v2.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:18-user"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/test.sh"
+}
diff --git a/handwritten/spanner/.kokoro/presubmit/node18/samples-test.cfg b/handwritten/spanner/.kokoro/presubmit/node18/samples-test.cfg
new file mode 100644
index 00000000000..3c4bbe887e3
--- /dev/null
+++ b/handwritten/spanner/.kokoro/presubmit/node18/samples-test.cfg
@@ -0,0 +1,12 @@
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/samples-test.sh"
+}
+
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "long-door-651-kokoro-system-test-service-account"
+}
\ No newline at end of file
diff --git a/handwritten/spanner/.kokoro/presubmit/node18/system-test-regular-session.cfg b/handwritten/spanner/.kokoro/presubmit/node18/system-test-regular-session.cfg
new file mode 100644
index 00000000000..ffed97284db
--- /dev/null
+++ b/handwritten/spanner/.kokoro/presubmit/node18/system-test-regular-session.cfg
@@ -0,0 +1,27 @@
+# Download resources(service account key, etc.) for system tests when multiplexed session is enabled
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/system-test.sh"
+}
+
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "long-door-651-kokoro-system-test-service-account"
+}
+
+env_vars: {
+ key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"
+ value: "false"
+}
+
+env_vars: {
+ key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS"
+ value: "false"
+}
+
+env_vars: {
+ key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"
+ value: "false"
+}
\ No newline at end of file
diff --git a/handwritten/spanner/.kokoro/presubmit/node18/system-test.cfg b/handwritten/spanner/.kokoro/presubmit/node18/system-test.cfg
new file mode 100644
index 00000000000..b50a1195edc
--- /dev/null
+++ b/handwritten/spanner/.kokoro/presubmit/node18/system-test.cfg
@@ -0,0 +1,12 @@
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/system-test.sh"
+}
+
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "long-door-651-kokoro-system-test-service-account"
+}
\ No newline at end of file
diff --git a/handwritten/spanner/.kokoro/presubmit/node18/test.cfg b/handwritten/spanner/.kokoro/presubmit/node18/test.cfg
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/handwritten/spanner/.kokoro/presubmit/windows/common.cfg b/handwritten/spanner/.kokoro/presubmit/windows/common.cfg
new file mode 100644
index 00000000000..d6e25e0b1b8
--- /dev/null
+++ b/handwritten/spanner/.kokoro/presubmit/windows/common.cfg
@@ -0,0 +1,2 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
diff --git a/handwritten/spanner/.kokoro/presubmit/windows/test.cfg b/handwritten/spanner/.kokoro/presubmit/windows/test.cfg
new file mode 100644
index 00000000000..f6f23ee43a1
--- /dev/null
+++ b/handwritten/spanner/.kokoro/presubmit/windows/test.cfg
@@ -0,0 +1,2 @@
+# Use the test file directly
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/test.bat"
diff --git a/handwritten/spanner/.kokoro/publish.sh b/handwritten/spanner/.kokoro/publish.sh
new file mode 100755
index 00000000000..ca1d47af347
--- /dev/null
+++ b/handwritten/spanner/.kokoro/publish.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+
+# Start the releasetool reporter
+python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script
+
+cd $(dirname $0)/..
+
+NPM_TOKEN=$(cat $KOKORO_KEYSTORE_DIR/73713_google-cloud-npm-token-1)
+echo "//wombat-dressing-room.appspot.com/:_authToken=${NPM_TOKEN}" > ~/.npmrc
+
+npm install
+npm pack .
+# npm provides no way to specify, observe, or predict the name of the tarball
+# file it generates. We have to look in the current directory for the freshest
+# .tgz file.
+TARBALL=$(ls -1 -t *.tgz | head -1)
+
+npm publish --access=public --registry=https://wombat-dressing-room.appspot.com "$TARBALL"
+
+# Kokoro collects *.tgz and package-lock.json files and stores them in Placer
+# so we can generate SBOMs and attestations.
+# However, we *don't* want Kokoro to collect package-lock.json and *.tgz files
+# that happened to be installed with dependencies.
+find node_modules -name package-lock.json -o -name "*.tgz" | xargs rm -f
\ No newline at end of file
diff --git a/handwritten/spanner/.kokoro/release/cleanup.cfg b/handwritten/spanner/.kokoro/release/cleanup.cfg
new file mode 100644
index 00000000000..fd3d30eb3a0
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/cleanup.cfg
@@ -0,0 +1,27 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Download resources for system tests (service account key, etc.)
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:8-user"
+}
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/cleanup.sh"
+}
diff --git a/handwritten/spanner/.kokoro/release/common.cfg b/handwritten/spanner/.kokoro/release/common.cfg
new file mode 100644
index 00000000000..3ba2eb095fe
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/common.cfg
@@ -0,0 +1,8 @@
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "yoshi-automation-github-key"
+ }
+ }
+}
diff --git a/handwritten/spanner/.kokoro/release/docs-devsite.cfg b/handwritten/spanner/.kokoro/release/docs-devsite.cfg
new file mode 100644
index 00000000000..e4a1172942c
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/docs-devsite.cfg
@@ -0,0 +1,26 @@
+# service account used to publish up-to-date docs.
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "docuploader_service_account"
+ }
+ }
+}
+
+# doc publications use a Python image.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:18-user"
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline_v2.sh"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/release/docs-devsite.sh"
+}
diff --git a/handwritten/spanner/.kokoro/release/docs-devsite.sh b/handwritten/spanner/.kokoro/release/docs-devsite.sh
new file mode 100755
index 00000000000..81a89f6c172
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/docs-devsite.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+if [[ -z "$CREDENTIALS" ]]; then
+ # if CREDENTIALS are explicitly set, assume we're testing locally
+ # and don't set NPM_CONFIG_PREFIX.
+ export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+ export PATH="$PATH:${NPM_CONFIG_PREFIX}/bin"
+ cd $(dirname $0)/../..
+fi
+
+npm install
+npm install --no-save @google-cloud/cloud-rad@^0.4.0
+# publish docs to devsite
+npx @google-cloud/cloud-rad . cloud-rad
diff --git a/handwritten/spanner/.kokoro/release/docs.cfg b/handwritten/spanner/.kokoro/release/docs.cfg
new file mode 100644
index 00000000000..3e74f70d200
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/docs.cfg
@@ -0,0 +1,26 @@
+# service account used to publish up-to-date docs.
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "docuploader_service_account"
+ }
+ }
+}
+
+# doc publications use a Python image.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:18-user"
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline_v2.sh"
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/release/docs.sh"
+}
diff --git a/handwritten/spanner/.kokoro/release/docs.sh b/handwritten/spanner/.kokoro/release/docs.sh
new file mode 100755
index 00000000000..e9079a60530
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/docs.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+# build jsdocs (Python is installed on the Node 18 docker image).
+if [[ -z "$CREDENTIALS" ]]; then
+ # if CREDENTIALS are explicitly set, assume we're testing locally
+ # and don't set NPM_CONFIG_PREFIX.
+ export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+ export PATH="$PATH:${NPM_CONFIG_PREFIX}/bin"
+ cd $(dirname $0)/../..
+fi
+npm install
+npm run docs
+
+# create docs.metadata, based on package.json and .repo-metadata.json.
+npm i json@9.0.6 -g
+python3 -m docuploader create-metadata \
+ --name=$(cat .repo-metadata.json | json name) \
+ --version=$(cat package.json | json version) \
+ --language=$(cat .repo-metadata.json | json language) \
+ --distribution-name=$(cat .repo-metadata.json | json distribution_name) \
+ --product-page=$(cat .repo-metadata.json | json product_documentation) \
+ --github-repository=$(cat .repo-metadata.json | json repo) \
+ --issue-tracker=$(cat .repo-metadata.json | json issue_tracker)
+cp docs.metadata ./docs/docs.metadata
+
+# deploy the docs.
+if [[ -z "$CREDENTIALS" ]]; then
+ CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account
+fi
+if [[ -z "$BUCKET" ]]; then
+ BUCKET=docs-staging
+fi
+python3 -m docuploader upload ./docs --credentials $CREDENTIALS --staging-bucket $BUCKET
diff --git a/handwritten/spanner/.kokoro/release/publish.cfg b/handwritten/spanner/.kokoro/release/publish.cfg
new file mode 100644
index 00000000000..29842f29a88
--- /dev/null
+++ b/handwritten/spanner/.kokoro/release/publish.cfg
@@ -0,0 +1,51 @@
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "docuploader_service_account"
+ }
+ }
+}
+
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google-cloud-npm-token-1"
+ }
+ }
+}
+
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
+}
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "nodejs-spanner/handwritten/spanner/.kokoro/trampoline_v2.sh"
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/node:18-user"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/nodejs-spanner/handwritten/spanner/.kokoro/publish.sh"
+}
+
+# Store the packages we uploaded to npmjs.com and their corresponding
+# package-lock.jsons in Placer. That way, we have a record of exactly
+# what we published, and which version of which tools we used to publish
+# it, which we can use to generate SBOMs and attestations.
+action {
+ define_artifacts {
+ regex: "github/**/*.tgz"
+ regex: "github/**/package-lock.json"
+ strip_prefix: "github"
+ }
+}
diff --git a/handwritten/spanner/.kokoro/samples-test.sh b/handwritten/spanner/.kokoro/samples-test.sh
new file mode 100755
index 00000000000..a05dabce172
--- /dev/null
+++ b/handwritten/spanner/.kokoro/samples-test.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+# Ensure the npm global directory is writable, otherwise rebuild `npm`
+mkdir -p $NPM_CONFIG_PREFIX
+npm config -g ls || npm i -g npm@`npm --version`
+
+# Setup service account credentials.
+export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/secret_manager/long-door-651-kokoro-system-test-service-account
+export GCLOUD_PROJECT=long-door-651
+
+cd $(dirname $0)/..
+
+# Run a pre-test hook, if a pre-samples-test.sh is in the project
+if [ -f .kokoro/pre-samples-test.sh ]; then
+ set +x
+ . .kokoro/pre-samples-test.sh
+ set -x
+fi
+
+if [ -f samples/package.json ]; then
+ npm install
+
+ # Install and link samples
+ cd samples/
+ npm link ../
+ npm install
+ cd ..
+ # If tests are running against main branch, configure flakybot
+ # to open issues on failures:
+ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then
+ export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml
+ export MOCHA_REPORTER=xunit
+ cleanup() {
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ }
+ trap cleanup EXIT HUP
+ else
+ # Check if there are any changes in the "samples" directory only for presubmits.
+ if git diff --quiet HEAD main -- samples/; then
+ echo "No changes detected in the samples directory. Skipping sample tests."
+ exit 0
+ fi
+ fi
+
+ npm run samples-test
+fi
+
+# codecov combines coverage across integration and unit tests. Include
+# the logic below for any environment you wish to collect coverage for:
+COVERAGE_NODE=18
+if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then
+ NYC_BIN=./node_modules/nyc/bin/nyc.js
+ if [ -f "$NYC_BIN" ]; then
+ $NYC_BIN report || true
+ fi
+ bash $KOKORO_GFILE_DIR/codecov.sh
+else
+ echo "coverage is only reported for Node $COVERAGE_NODE"
+fi
diff --git a/handwritten/spanner/.kokoro/system-test.sh b/handwritten/spanner/.kokoro/system-test.sh
new file mode 100755
index 00000000000..a90d5cfec89
--- /dev/null
+++ b/handwritten/spanner/.kokoro/system-test.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+
+# Setup service account credentials.
+export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/secret_manager/long-door-651-kokoro-system-test-service-account
+export GCLOUD_PROJECT=long-door-651
+
+cd $(dirname $0)/..
+
+# Run a pre-test hook, if a pre-system-test.sh is in the project
+if [ -f .kokoro/pre-system-test.sh ]; then
+ set +x
+ . .kokoro/pre-system-test.sh
+ set -x
+fi
+
+npm install
+
+# If tests are running against main branch, configure flakybot
+# to open issues on failures:
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then
+ export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml
+ export MOCHA_REPORTER=xunit
+ cleanup() {
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ }
+ trap cleanup EXIT HUP
+fi
+
+npm run system-test
+
+# codecov combines coverage across integration and unit tests. Include
+# the logic below for any environment you wish to collect coverage for:
+COVERAGE_NODE=18
+if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then
+ NYC_BIN=./node_modules/nyc/bin/nyc.js
+ if [ -f "$NYC_BIN" ]; then
+ $NYC_BIN report || true
+ fi
+ bash $KOKORO_GFILE_DIR/codecov.sh
+else
+ echo "coverage is only reported for Node $COVERAGE_NODE"
+fi
diff --git a/handwritten/spanner/.kokoro/test.bat b/handwritten/spanner/.kokoro/test.bat
new file mode 100644
index 00000000000..caf825656c2
--- /dev/null
+++ b/handwritten/spanner/.kokoro/test.bat
@@ -0,0 +1,33 @@
+@rem Copyright 2018 Google LLC. All rights reserved.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@echo "Starting Windows build"
+
+cd /d %~dp0
+cd ..
+
+@rem npm path is not currently set in our image, we should fix this next time
+@rem we upgrade Node.js in the image:
+SET PATH=%PATH%;/cygdrive/c/Program Files/nodejs/npm
+
+call nvm use 18
+call which node
+
+call npm install || goto :error
+call npm run test || goto :error
+
+goto :EOF
+
+:error
+exit /b 1
diff --git a/handwritten/spanner/.kokoro/test.sh b/handwritten/spanner/.kokoro/test.sh
new file mode 100755
index 00000000000..0d9f6392a75
--- /dev/null
+++ b/handwritten/spanner/.kokoro/test.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+export NPM_CONFIG_PREFIX=${HOME}/.npm-global
+
+cd $(dirname $0)/..
+
+npm install
+# If tests are running against main branch, configure flakybot
+# to open issues on failures:
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then
+ export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml
+ export MOCHA_REPORTER=xunit
+ cleanup() {
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ }
+ trap cleanup EXIT HUP
+fi
+# Unit tests exercise the entire API surface, which may include
+# deprecation warnings:
+export MOCHA_THROW_DEPRECATION=false
+npm test
+
+# codecov combines coverage across integration and unit tests. Include
+# the logic below for any environment you wish to collect coverage for:
+COVERAGE_NODE=18
+if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then
+ NYC_BIN=./node_modules/nyc/bin/nyc.js
+ if [ -f "$NYC_BIN" ]; then
+ $NYC_BIN report || true
+ fi
+ bash $KOKORO_GFILE_DIR/codecov.sh
+else
+ echo "coverage is only reported for Node $COVERAGE_NODE"
+fi
diff --git a/handwritten/spanner/.kokoro/trampoline.sh b/handwritten/spanner/.kokoro/trampoline.sh
new file mode 100755
index 00000000000..f693a1ce7aa
--- /dev/null
+++ b/handwritten/spanner/.kokoro/trampoline.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is not used any more, but we keep this file for making it
+# easy to roll back.
+# TODO: Remove this file from the template.
+
+set -eo pipefail
+
+# Always run the cleanup script, regardless of the success of bouncing into
+# the container.
+function cleanup() {
+ chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ echo "cleanup";
+}
+trap cleanup EXIT
+
+$(dirname $0)/populate-secrets.sh # Secret Manager secrets.
+python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py"
diff --git a/handwritten/spanner/.kokoro/trampoline_v2.sh b/handwritten/spanner/.kokoro/trampoline_v2.sh
new file mode 100755
index 00000000000..2ed993bbdff
--- /dev/null
+++ b/handwritten/spanner/.kokoro/trampoline_v2.sh
@@ -0,0 +1,513 @@
+#!/usr/bin/env bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# trampoline_v2.sh
+#
+# If you want to make a change to this file, consider doing so at:
+# https://github.com/googlecloudplatform/docker-ci-helper
+#
+# This script is for running CI builds. For Kokoro builds, we
+# set this script to `build_file` field in the Kokoro configuration.
+
+# This script does 3 things.
+#
+# 1. Prepare the Docker image for the test
+# 2. Run the Docker with appropriate flags to run the test
+# 3. Upload the newly built Docker image
+#
+# in a way that is somewhat compatible with trampoline_v1.
+#
+# These environment variables are required:
+# TRAMPOLINE_IMAGE: The docker image to use.
+# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile.
+#
+# You can optionally change these environment variables:
+# TRAMPOLINE_IMAGE_UPLOAD:
+# (true|false): Whether to upload the Docker image after the
+# successful builds.
+# TRAMPOLINE_BUILD_FILE: The script to run in the docker container.
+# TRAMPOLINE_WORKSPACE: The workspace path in the docker container.
+# Defaults to /workspace.
+# Potentially there are some repo specific envvars in .trampolinerc in
+# the project root.
+#
+# Here is an example for running this script.
+# TRAMPOLINE_IMAGE=gcr.io/cloud-devrel-kokoro-resources/node:18-user \
+# TRAMPOLINE_BUILD_FILE=.kokoro/system-test.sh \
+# .kokoro/trampoline_v2.sh
+
+set -euo pipefail
+
+TRAMPOLINE_VERSION="2.0.7"
+
+if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then
+ readonly IO_COLOR_RED="$(tput setaf 1)"
+ readonly IO_COLOR_GREEN="$(tput setaf 2)"
+ readonly IO_COLOR_YELLOW="$(tput setaf 3)"
+ readonly IO_COLOR_RESET="$(tput sgr0)"
+else
+ readonly IO_COLOR_RED=""
+ readonly IO_COLOR_GREEN=""
+ readonly IO_COLOR_YELLOW=""
+ readonly IO_COLOR_RESET=""
+fi
+
+function function_exists {
+ [ $(LC_ALL=C type -t $1)"" == "function" ]
+}
+
+# Logs a message using the given color. The first argument must be one
+# of the IO_COLOR_* variables defined above, such as
+# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the
+# given color. The log message will also have an RFC-3339 timestamp
+# prepended (in UTC). You can disable the color output by setting
+# TERM=vt100.
+function log_impl() {
+ local color="$1"
+ shift
+ local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")"
+ echo "================================================================"
+ echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}"
+ echo "================================================================"
+}
+
+# Logs the given message with normal coloring and a timestamp.
+function log() {
+ log_impl "${IO_COLOR_RESET}" "$@"
+}
+
+# Logs the given message in green with a timestamp.
+function log_green() {
+ log_impl "${IO_COLOR_GREEN}" "$@"
+}
+
+# Logs the given message in yellow with a timestamp.
+function log_yellow() {
+ log_impl "${IO_COLOR_YELLOW}" "$@"
+}
+
+# Logs the given message in red with a timestamp.
+function log_red() {
+ log_impl "${IO_COLOR_RED}" "$@"
+}
+
+readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX)
+readonly tmphome="${tmpdir}/h"
+mkdir -p "${tmphome}"
+
+function cleanup() {
+ rm -rf "${tmpdir}"
+}
+trap cleanup EXIT
+
+RUNNING_IN_CI="${RUNNING_IN_CI:-false}"
+
+# The workspace in the container, defaults to /workspace.
+TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}"
+
+pass_down_envvars=(
+ # TRAMPOLINE_V2 variables.
+ # Tells scripts whether they are running as part of CI or not.
+ "RUNNING_IN_CI"
+ # Indicates which CI system we're in.
+ "TRAMPOLINE_CI"
+ # Indicates the version of the script.
+ "TRAMPOLINE_VERSION"
+ # Contains path to build artifacts being executed.
+ "KOKORO_BUILD_ARTIFACTS_SUBDIR"
+)
+
+log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}"
+
+# Detect which CI systems we're in. If we're in any of the CI systems
+# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be
+# the name of the CI system. Both envvars will be passing down to the
+# container for telling which CI system we're in.
+if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then
+ # descriptive env var for indicating it's on CI.
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="kokoro"
+ if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then
+ if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then
+ log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting."
+ exit 1
+ fi
+ # This service account will be activated later.
+ TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json"
+ else
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ gcloud auth list
+ fi
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+ fi
+ pass_down_envvars+=(
+ # KOKORO dynamic variables.
+ "KOKORO_BUILD_NUMBER"
+ "KOKORO_BUILD_ID"
+ "KOKORO_JOB_NAME"
+ "KOKORO_GIT_COMMIT"
+ "KOKORO_GITHUB_COMMIT"
+ "KOKORO_GITHUB_PULL_REQUEST_NUMBER"
+ "KOKORO_GITHUB_PULL_REQUEST_COMMIT"
+ # For flakybot
+ "KOKORO_GITHUB_COMMIT_URL"
+ "KOKORO_GITHUB_PULL_REQUEST_URL"
+ "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"
+ "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS"
+ "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"
+ )
+elif [[ "${TRAVIS:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="travis"
+ pass_down_envvars+=(
+ "TRAVIS_BRANCH"
+ "TRAVIS_BUILD_ID"
+ "TRAVIS_BUILD_NUMBER"
+ "TRAVIS_BUILD_WEB_URL"
+ "TRAVIS_COMMIT"
+ "TRAVIS_COMMIT_MESSAGE"
+ "TRAVIS_COMMIT_RANGE"
+ "TRAVIS_JOB_NAME"
+ "TRAVIS_JOB_NUMBER"
+ "TRAVIS_JOB_WEB_URL"
+ "TRAVIS_PULL_REQUEST"
+ "TRAVIS_PULL_REQUEST_BRANCH"
+ "TRAVIS_PULL_REQUEST_SHA"
+ "TRAVIS_PULL_REQUEST_SLUG"
+ "TRAVIS_REPO_SLUG"
+ "TRAVIS_SECURE_ENV_VARS"
+ "TRAVIS_TAG"
+ )
+elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="github-workflow"
+ pass_down_envvars+=(
+ "GITHUB_WORKFLOW"
+ "GITHUB_RUN_ID"
+ "GITHUB_RUN_NUMBER"
+ "GITHUB_ACTION"
+ "GITHUB_ACTIONS"
+ "GITHUB_ACTOR"
+ "GITHUB_REPOSITORY"
+ "GITHUB_EVENT_NAME"
+ "GITHUB_EVENT_PATH"
+ "GITHUB_SHA"
+ "GITHUB_REF"
+ "GITHUB_HEAD_REF"
+ "GITHUB_BASE_REF"
+ )
+elif [[ "${CIRCLECI:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="circleci"
+ pass_down_envvars+=(
+ "CIRCLE_BRANCH"
+ "CIRCLE_BUILD_NUM"
+ "CIRCLE_BUILD_URL"
+ "CIRCLE_COMPARE_URL"
+ "CIRCLE_JOB"
+ "CIRCLE_NODE_INDEX"
+ "CIRCLE_NODE_TOTAL"
+ "CIRCLE_PREVIOUS_BUILD_NUM"
+ "CIRCLE_PROJECT_REPONAME"
+ "CIRCLE_PROJECT_USERNAME"
+ "CIRCLE_REPOSITORY_URL"
+ "CIRCLE_SHA1"
+ "CIRCLE_STAGE"
+ "CIRCLE_USERNAME"
+ "CIRCLE_WORKFLOW_ID"
+ "CIRCLE_WORKFLOW_JOB_ID"
+ "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS"
+ "CIRCLE_WORKFLOW_WORKSPACE_ID"
+ )
+fi
+
+# Configure the service account for pulling the docker image.
+function repo_root() {
+ local dir="$1"
+ while [[ ! -d "${dir}/.git" ]]; do
+ dir="$(dirname "$dir")"
+ done
+ echo "${dir}"
+}
+
+# Detect the project root. In CI builds, we assume the script is in
+# the git tree and traverse from there, otherwise, traverse from `pwd`
+# to find `.git` directory.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ PROGRAM_PATH="$(realpath "$0")"
+ PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")"
+ PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")/handwritten/spanner"
+else
+ PROJECT_ROOT="$(repo_root $(pwd))/handwritten/spanner"
+fi
+
+log_yellow "Changing to the project root: ${PROJECT_ROOT}."
+cd "${PROJECT_ROOT}"
+
+# Auto-injected conditional check
+# Check if the package directory has changes. If not, skip tests.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ # The package path is hardcoded during migration
+ RELATIVE_PKG_PATH="handwritten/spanner"
+
+ echo "Checking for changes in ${RELATIVE_PKG_PATH}..."
+
+ # Determine the diff range based on the CI system/event
+ # Safe default: HEAD~1..HEAD
+ DIFF_RANGE="HEAD~1..HEAD"
+
+ if git diff --quiet "${DIFF_RANGE}" -- "${RELATIVE_PKG_PATH}"; then
+ echo "No changes detected in ${RELATIVE_PKG_PATH}. Skipping tests."
+ exit 0
+ else
+ echo "Changes detected in ${RELATIVE_PKG_PATH}. Proceeding with tests."
+ fi
+fi
+
+# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need
+# to use this environment variable in `PROJECT_ROOT`.
+if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then
+
+ mkdir -p "${tmpdir}/gcloud"
+ gcloud_config_dir="${tmpdir}/gcloud"
+
+ log_yellow "Using isolated gcloud config: ${gcloud_config_dir}."
+ export CLOUDSDK_CONFIG="${gcloud_config_dir}"
+
+ log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication."
+ gcloud auth activate-service-account \
+ --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}"
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+fi
+
+required_envvars=(
+ # The basic trampoline configurations.
+ "TRAMPOLINE_IMAGE"
+ "TRAMPOLINE_BUILD_FILE"
+)
+
+if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then
+ source "${PROJECT_ROOT}/.trampolinerc"
+fi
+
+log_yellow "Checking environment variables."
+for e in "${required_envvars[@]}"
+do
+ if [[ -z "${!e:-}" ]]; then
+ log "Missing ${e} env var. Aborting."
+ exit 1
+ fi
+done
+
+# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1
+# script: e.g. "github/repo-name/.kokoro/run_tests.sh"
+TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}"
+log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}"
+
+# ignore error on docker operations and test execution
+set +e
+
+log_yellow "Preparing Docker image."
+# We only download the docker image in CI builds.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ # Download the docker image specified by `TRAMPOLINE_IMAGE`
+
+ # We may want to add --max-concurrent-downloads flag.
+
+ log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ if docker pull "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="true"
+ else
+ log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="false"
+ fi
+else
+ # For local run, check if we have the image.
+ if docker images "${TRAMPOLINE_IMAGE}" | grep "${TRAMPOLINE_IMAGE%:*}"; then
+ has_image="true"
+ else
+ has_image="false"
+ fi
+fi
+
+
+# The default user for a Docker container has uid 0 (root). To avoid
+# creating root-owned files in the build directory we tell docker to
+# use the current user ID.
+user_uid="$(id -u)"
+user_gid="$(id -g)"
+user_name="$(id -un)"
+
+# To allow docker in docker, we add the user to the docker group in
+# the host os.
+docker_gid=$(cut -d: -f3 < <(getent group docker))
+
+update_cache="false"
+if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then
+ # Build the Docker image from the source.
+ context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}")
+ docker_build_flags=(
+ "-f" "${TRAMPOLINE_DOCKERFILE}"
+ "-t" "${TRAMPOLINE_IMAGE}"
+ "--build-arg" "UID=${user_uid}"
+ "--build-arg" "USERNAME=${user_name}"
+ )
+ if [[ "${has_image}" == "true" ]]; then
+ docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}")
+ fi
+
+ log_yellow "Start building the docker image."
+ if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then
+ echo "docker build" "${docker_build_flags[@]}" "${context_dir}"
+ fi
+
+ # ON CI systems, we want to suppress docker build logs, only
+ # output the logs when it fails.
+ if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ if docker build "${docker_build_flags[@]}" "${context_dir}" \
+ > "${tmpdir}/docker_build.log" 2>&1; then
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ cat "${tmpdir}/docker_build.log"
+ fi
+
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ log_yellow "Dumping the build logs:"
+ cat "${tmpdir}/docker_build.log"
+ exit 1
+ fi
+ else
+ if docker build "${docker_build_flags[@]}" "${context_dir}"; then
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ exit 1
+ fi
+ fi
+else
+ if [[ "${has_image}" != "true" ]]; then
+ log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting."
+ exit 1
+ fi
+fi
+
+# We use an array for the flags so they are easier to document.
+docker_flags=(
+ # Remove the container after it exists.
+ "--rm"
+
+ # Use the host network.
+ "--network=host"
+
+ # Run in priviledged mode. We are not using docker for sandboxing or
+ # isolation, just for packaging our dev tools.
+ "--privileged"
+
+ # Run the docker script with the user id. Because the docker image gets to
+ # write in ${PWD} you typically want this to be your user id.
+ # To allow docker in docker, we need to use docker gid on the host.
+ "--user" "${user_uid}:${docker_gid}"
+
+ # Pass down the USER.
+ "--env" "USER=${user_name}"
+
+ # Mount the project directory inside the Docker container.
+ "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}"
+ "--workdir" "${TRAMPOLINE_WORKSPACE}"
+ "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}"
+
+ # Mount the temporary home directory.
+ "--volume" "${tmphome}:/h"
+ "--env" "HOME=/h"
+
+ # Allow docker in docker.
+ "--volume" "/var/run/docker.sock:/var/run/docker.sock"
+
+ # Mount the /tmp so that docker in docker can mount the files
+ # there correctly.
+ "--volume" "/tmp:/tmp"
+ # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR
+ # TODO(tmatsuo): This part is not portable.
+ "--env" "TRAMPOLINE_SECRET_DIR=/secrets"
+ "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile"
+ "--env" "KOKORO_GFILE_DIR=/secrets/gfile"
+ "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore"
+ "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore"
+)
+
+# Add an option for nicer output if the build gets a tty.
+if [[ -t 0 ]]; then
+ docker_flags+=("-it")
+fi
+
+# Passing down env vars
+for e in "${pass_down_envvars[@]}"
+do
+ if [[ -n "${!e:-}" ]]; then
+ docker_flags+=("--env" "${e}=${!e}")
+ fi
+done
+
+# If arguments are given, all arguments will become the commands run
+# in the container, otherwise run TRAMPOLINE_BUILD_FILE.
+if [[ $# -ge 1 ]]; then
+ log_yellow "Running the given commands '" "${@:1}" "' in the container."
+ readonly commands=("${@:1}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+else
+ log_yellow "Running the tests in a Docker container."
+ docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+fi
+
+
+test_retval=$?
+
+if [[ ${test_retval} -eq 0 ]]; then
+ log_green "Build finished with ${test_retval}"
+else
+ log_red "Build finished with ${test_retval}"
+fi
+
+# Only upload it when the test passes.
+if [[ "${update_cache}" == "true" ]] && \
+ [[ $test_retval == 0 ]] && \
+ [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then
+ log_yellow "Uploading the Docker image."
+ if docker push "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished uploading the Docker image."
+ else
+ log_red "Failed uploading the Docker image."
+ fi
+ # Call trampoline_after_upload_hook if it's defined.
+ if function_exists trampoline_after_upload_hook; then
+ trampoline_after_upload_hook
+ fi
+
+fi
+
+exit "${test_retval}"
diff --git a/handwritten/spanner/.mocharc.js b/handwritten/spanner/.mocharc.js
new file mode 100644
index 00000000000..2431859019f
--- /dev/null
+++ b/handwritten/spanner/.mocharc.js
@@ -0,0 +1,29 @@
+// Copyright 2026 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+const config = {
+ "enable-source-maps": true,
+ "throw-deprecation": true,
+ "timeout": 10000,
+ "recursive": true
+}
+if (process.env.MOCHA_THROW_DEPRECATION === 'false') {
+ delete config['throw-deprecation'];
+}
+if (process.env.MOCHA_REPORTER) {
+ config.reporter = process.env.MOCHA_REPORTER;
+}
+if (process.env.MOCHA_REPORTER_OUTPUT) {
+ config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`;
+}
+module.exports = config
diff --git a/handwritten/spanner/.nycrc b/handwritten/spanner/.nycrc
new file mode 100644
index 00000000000..b18d5472b62
--- /dev/null
+++ b/handwritten/spanner/.nycrc
@@ -0,0 +1,24 @@
+{
+ "report-dir": "./.coverage",
+ "reporter": ["text", "lcov"],
+ "exclude": [
+ "**/*-test",
+ "**/.coverage",
+ "**/apis",
+ "**/benchmark",
+ "**/conformance",
+ "**/docs",
+ "**/samples",
+ "**/scripts",
+ "**/protos",
+ "**/test",
+ "**/*.d.ts",
+ ".jsdoc.js",
+ "**/.jsdoc.js",
+ "karma.conf.js",
+ "webpack-tests.config.js",
+ "webpack.config.js"
+ ],
+ "exclude-after-remap": false,
+ "all": true
+}
diff --git a/handwritten/spanner/.prettierignore b/handwritten/spanner/.prettierignore
new file mode 100644
index 00000000000..9340ad9b86d
--- /dev/null
+++ b/handwritten/spanner/.prettierignore
@@ -0,0 +1,6 @@
+**/node_modules
+**/coverage
+test/fixtures
+build/
+docs/
+protos/
diff --git a/handwritten/spanner/.prettierrc.js b/handwritten/spanner/.prettierrc.js
new file mode 100644
index 00000000000..d2eddc2ed89
--- /dev/null
+++ b/handwritten/spanner/.prettierrc.js
@@ -0,0 +1,17 @@
+// Copyright 2026 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+module.exports = {
+ ...require('gts/.prettierrc.json')
+}
diff --git a/handwritten/spanner/.readme-partials.yml b/handwritten/spanner/.readme-partials.yml
new file mode 100644
index 00000000000..e5b253f6b79
--- /dev/null
+++ b/handwritten/spanner/.readme-partials.yml
@@ -0,0 +1,49 @@
+introduction: |-
+ [Cloud Spanner](https://cloud.google.com/spanner/docs/) is a fully managed, mission-critical, relational database service that
+ offers transactional consistency at global scale, schemas, SQL (ANSI 2011 with extensions),
+ and automatic, synchronous replication for high availability.
+
+body: |-
+ ## Metrics
+
+ Cloud Spanner client supports [client-side metrics](https://cloud.google.com/spanner/docs/view-manage-client-side-metrics) that you can use along with server-side metrics to optimize performance and troubleshoot performance issues if they occur.
+
+ Client-side metrics are measured from the time a request leaves your application to the time your application receives the response.
+ In contrast, server-side metrics are measured from the time Spanner receives a request until the last byte of data is sent to the client.
+
+ These metrics are enabled by default. You can opt out of using client-side metrics with the following code:
+
+ ```javascript
+ const spanner = new Spanner({
+ disableBuiltInMetrics: true
+ });
+ ```
+
+ You can also disable these metrics by setting `SPANNER_DISABLE_BUILTIN_METRICS` to `true`.
+
+ > Note: Client-side metrics needs `monitoring.timeSeries.create` IAM permission to export metrics data. Ask your administrator to grant your service account the [Monitoring Metric Writer](https://cloud.google.com/iam/docs/roles-permissions/monitoring#monitoring.metricWriter) (roles/monitoring.metricWriter) IAM role on the project.
+
+ ## Traces
+ Refer to the Observability README to know more about tracing support in the Cloud Spanner client.
+
+ ## Multiplexed Sessions
+
+ Spanner's Multiplexed Sessions is now default enabled session mode in node client. This feature helps reduce
+ session management overhead and minimize session-related errors.
+
+ For a detailed explanation on multiplexed sessions, please refer to the [official documentation](https://cloud.google.com/spanner/docs/sessions#multiplexed_sessions).
+
+ ## Regular Sessions
+
+ To use regular sessions, disable the multiplexed sessions and set the following environment variables to `false`:
+
+ * **For Read-Only Transactions:**
+ - `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS`
+ * **For Partitioned Operations:**
+ - `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS`
+ - `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS`
+ * **For Read-Write Transactions:**
+ - `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS`
+ - `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW`
+
+ For a detailed explanation on session modes and env configurations, please refer to the [official documentation](https://cloud.google.com/spanner/docs/sessions).
diff --git a/handwritten/spanner/.repo-metadata.json b/handwritten/spanner/.repo-metadata.json
new file mode 100644
index 00000000000..a6f35590c2c
--- /dev/null
+++ b/handwritten/spanner/.repo-metadata.json
@@ -0,0 +1,16 @@
+{
+ "name": "spanner",
+ "name_pretty": "Cloud Spanner",
+ "product_documentation": "https://cloud.google.com/spanner/docs/",
+ "client_documentation": "https://cloud.google.com/nodejs/docs/reference/spanner/latest",
+ "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open",
+ "release_level": "stable",
+ "language": "nodejs",
+ "repo": "googleapis/google-cloud-node",
+ "distribution_name": "@google-cloud/spanner",
+ "api_id": "spanner.googleapis.com",
+ "requires_billing": true,
+ "codeowner_team": "@googleapis/spanner-team",
+ "api_shortname": "spanner",
+ "library_type": "GAPIC_COMBO"
+}
diff --git a/handwritten/spanner/.trampolinerc b/handwritten/spanner/.trampolinerc
new file mode 100644
index 00000000000..dea7e9541e6
--- /dev/null
+++ b/handwritten/spanner/.trampolinerc
@@ -0,0 +1,52 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Template for .trampolinerc
+
+# Add required env vars here.
+required_envvars+=(
+)
+
+# Add env vars which are passed down into the container here.
+pass_down_envvars+=(
+ "AUTORELEASE_PR"
+ "VERSION"
+)
+
+# Prevent unintentional override on the default image.
+if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \
+ [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image."
+ exit 1
+fi
+
+# Define the default value if it makes sense.
+if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then
+ TRAMPOLINE_IMAGE_UPLOAD=""
+fi
+
+if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ TRAMPOLINE_IMAGE=""
+fi
+
+if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then
+ TRAMPOLINE_DOCKERFILE=""
+fi
+
+if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then
+ TRAMPOLINE_BUILD_FILE=""
+fi
+
+# Secret Manager secrets.
+source ${PROJECT_ROOT}/handwritten/spanner/.kokoro/populate-secrets.sh
diff --git a/handwritten/spanner/CHANGELOG.md b/handwritten/spanner/CHANGELOG.md
new file mode 100644
index 00000000000..c3b79867501
--- /dev/null
+++ b/handwritten/spanner/CHANGELOG.md
@@ -0,0 +1,1788 @@
+# Changelog
+
+[npm history][1]
+
+[1]: https://www.npmjs.com/package/nodejs-spanner?activeTab=versions
+
+## [8.6.0](https://github.com/googleapis/nodejs-spanner/compare/v8.5.0...v8.6.0) (2026-01-28)
+
+
+### Features
+
+* Refactor and deprecate databaseRole and Session Labels SessionPoolOptions ([#2511](https://github.com/googleapis/nodejs-spanner/issues/2511)) ([163534c](https://github.com/googleapis/nodejs-spanner/commit/163534c8545a3f1378b001eb118b12f98b4cc1e5))
+
+
+### Bug Fixes
+
+* Ensure all data is consumed before emitting end event in PartialResultStream ([#2516](https://github.com/googleapis/nodejs-spanner/issues/2516)) ([210ef50](https://github.com/googleapis/nodejs-spanner/commit/210ef506cd80f4604e4a135fdfe937190eab5750))
+
+## [8.5.0](https://github.com/googleapis/nodejs-spanner/compare/v8.4.0...v8.5.0) (2026-01-22)
+
+
+### Features
+
+* Added OUTPUT_ONLY annotations to create_time and update_time in InternalRange to reflect existing service behavior ([#2505](https://github.com/googleapis/nodejs-spanner/issues/2505)) ([1058683](https://github.com/googleapis/nodejs-spanner/commit/105868339b1d2b7d7701a6b7591b85e3a1ca4098))
+
+
+### Bug Fixes
+
+* UUID type backward compatibility ([#2509](https://github.com/googleapis/nodejs-spanner/issues/2509)) ([7abb33c](https://github.com/googleapis/nodejs-spanner/commit/7abb33ca523b612f171def64c1ceb0cb7d162e82))
+
+## [8.4.0](https://github.com/googleapis/nodejs-spanner/compare/v8.3.1...v8.4.0) (2026-01-09)
+
+
+### Features
+
+* Add a ClientContext field to Spanner requests ([#2493](https://github.com/googleapis/nodejs-spanner/issues/2493)) ([37504ad](https://github.com/googleapis/nodejs-spanner/commit/37504adcc37a7e95acfb2530313ff783d0c1fe7d))
+* Exposing total CPU related fields in AutoscalingConfig ([#2490](https://github.com/googleapis/nodejs-spanner/issues/2490)) ([508f0ff](https://github.com/googleapis/nodejs-spanner/commit/508f0ff95636b004f4200522018a199263eda8ca))
+* **spanner:** Support for type UUID ([#2482](https://github.com/googleapis/nodejs-spanner/issues/2482)) ([0047e94](https://github.com/googleapis/nodejs-spanner/commit/0047e9407d86521571626c69011b70307f83f8ba))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v5.0.6 ([#2452](https://github.com/googleapis/nodejs-spanner/issues/2452)) ([f9e6b86](https://github.com/googleapis/nodejs-spanner/commit/f9e6b86ff4da03110642c17e5ebc8fac8d903d3a))
+* Flaky metric test ([#2472](https://github.com/googleapis/nodejs-spanner/issues/2472)) ([e169cc5](https://github.com/googleapis/nodejs-spanner/commit/e169cc5344d38812b1ebf20c7a987715a73d6f79))
+* Memory leak and deadlock due to error event in multiplexed session ([#2477](https://github.com/googleapis/nodejs-spanner/issues/2477)) ([c624619](https://github.com/googleapis/nodejs-spanner/commit/c624619a3960892b1d2d412ff79faa5a74de45df))
+* Presubmit failure for samples backups test ([#2492](https://github.com/googleapis/nodejs-spanner/issues/2492)) ([01eb3d5](https://github.com/googleapis/nodejs-spanner/commit/01eb3d5801ddb21517f185b9d585fbce4fa1475c))
+* Type check for key in deleteRows ([#2486](https://github.com/googleapis/nodejs-spanner/issues/2486)) ([7347a16](https://github.com/googleapis/nodejs-spanner/commit/7347a1628ad8635b8f84b36ad1d3850b78862ac7))
+* Type mismatch in Snapshot.run error handler ([#2487](https://github.com/googleapis/nodejs-spanner/issues/2487)) ([4ac0360](https://github.com/googleapis/nodejs-spanner/commit/4ac036047e3a03c073300f288c746389a38d8e42))
+
+## [8.3.1](https://github.com/googleapis/nodejs-spanner/compare/v8.3.0...v8.3.1) (2025-11-19)
+
+
+### Bug Fixes
+
+* Remove Console logging ([e673acd](https://github.com/googleapis/nodejs-spanner/commit/e673acdba717380d25eca6d978272c43950eb1e8))
+
+## [8.3.0](https://github.com/googleapis/nodejs-spanner/compare/v8.2.2...v8.3.0) (2025-11-11)
+
+
+### Features
+
+* Exposing AutoscalingConfig in InstancePartition ([#2449](https://github.com/googleapis/nodejs-spanner/issues/2449)) ([2defcc8](https://github.com/googleapis/nodejs-spanner/commit/2defcc81de4191293cd2d4aa493a0a9dc68f37ce))
+* Multiplexed session as default session mode ([#2451](https://github.com/googleapis/nodejs-spanner/issues/2451)) ([9ef0565](https://github.com/googleapis/nodejs-spanner/commit/9ef0565ca2cc9af18477503b43a506fd7c0c3c33))
+
+
+### Bug Fixes
+
+* Configure gRPC keep alive as 120 sec ([#2445](https://github.com/googleapis/nodejs-spanner/issues/2445)) ([f280e7c](https://github.com/googleapis/nodejs-spanner/commit/f280e7ca7cc60a67da8a5a0b3ada5f414c9e9a5f))
+* **deps:** Update dependency google-gax to v5.0.4 ([#2430](https://github.com/googleapis/nodejs-spanner/issues/2430)) ([4cbed94](https://github.com/googleapis/nodejs-spanner/commit/4cbed94dd74567f12620a493cebace5b2c9196bc))
+
+## [8.2.2](https://github.com/googleapis/nodejs-spanner/compare/v8.2.1...v8.2.2) (2025-10-07)
+
+
+### Bug Fixes
+
+* Correctly determine project ID for metrics export ([#2427](https://github.com/googleapis/nodejs-spanner/issues/2427)) ([0d63312](https://github.com/googleapis/nodejs-spanner/commit/0d633126a87c1274abfd59550cb94052a819fcaa))
+* Metrics Export Error log ([#2425](https://github.com/googleapis/nodejs-spanner/issues/2425)) ([110923e](https://github.com/googleapis/nodejs-spanner/commit/110923ea1dc6f6c891e0f70406b3839224a25b9e))
+
+## [8.2.1](https://github.com/googleapis/nodejs-spanner/compare/v8.2.0...v8.2.1) (2025-09-12)
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v5.0.3 ([#2371](https://github.com/googleapis/nodejs-spanner/issues/2371)) ([8a175e2](https://github.com/googleapis/nodejs-spanner/commit/8a175e2e5cc8d0ed81faee7b24b59b5026758a59))
+* Disable afe_connectivity_error_count metric ([af72d70](https://github.com/googleapis/nodejs-spanner/commit/af72d707c8857d5596bd2b93830e52c8e152967f))
+
+## [8.2.0](https://github.com/googleapis/nodejs-spanner/compare/v8.1.0...v8.2.0) (2025-08-26)
+
+
+### Features
+
+* **spanner:** Add support for multiplexed session for r/w transactions ([#2351](https://github.com/googleapis/nodejs-spanner/issues/2351)) ([6a9f1a2](https://github.com/googleapis/nodejs-spanner/commit/6a9f1a2b2c0dad955593571c71e9d4b6c9e7eeee))
+* **spanner:** Support setting read lock mode ([#2388](https://github.com/googleapis/nodejs-spanner/issues/2388)) ([bd66f61](https://github.com/googleapis/nodejs-spanner/commit/bd66f61f3ecac65678d31cbc841c11cd0fb7c3da))
+
+
+### Bug Fixes
+
+* **deps:** Add uuid to dependencies ([#2376](https://github.com/googleapis/nodejs-spanner/issues/2376)) ([0b2060b](https://github.com/googleapis/nodejs-spanner/commit/0b2060b4ad7302ab23ac757e79fe760e34e81083))
+* **deps:** Update dependency @grpc/proto-loader to ^0.8.0 ([#2354](https://github.com/googleapis/nodejs-spanner/issues/2354)) ([75dc4da](https://github.com/googleapis/nodejs-spanner/commit/75dc4daf114cbc4eb4669ed6cb042af051cdce63))
+* **deps:** Update dependency google-gax to v5.0.1 ([#2362](https://github.com/googleapis/nodejs-spanner/issues/2362)) ([9223470](https://github.com/googleapis/nodejs-spanner/commit/922347014ac3966ec4a48116b61ba4850edf0b50))
+* Provide option to disable built in metrics ([#2380](https://github.com/googleapis/nodejs-spanner/issues/2380)) ([b378e2e](https://github.com/googleapis/nodejs-spanner/commit/b378e2ed6739acf76f3f3f27090311129dd83473))
+* Race condition among transactions when running parallely ([#2369](https://github.com/googleapis/nodejs-spanner/issues/2369)) ([f8b6f63](https://github.com/googleapis/nodejs-spanner/commit/f8b6f6340f4f04e04213fdf0a9665d643f474eeb))
+
+## [8.1.0](https://github.com/googleapis/nodejs-spanner/compare/v8.0.0...v8.1.0) (2025-07-28)
+
+
+### Features
+
+* Add Custom OpenTelemetry Exporter in for Service Metrics ([#2272](https://github.com/googleapis/nodejs-spanner/issues/2272)) ([610d1b9](https://github.com/googleapis/nodejs-spanner/commit/610d1b989ba186c0758791343deaa7f683c4bd26))
+* Add methods from gax to cache proto root and process custom error details ([#2330](https://github.com/googleapis/nodejs-spanner/issues/2330)) ([1b3931a](https://github.com/googleapis/nodejs-spanner/commit/1b3931a799bdd052adc91703e59e1d0c83270065))
+* Add metrics tracers ([#2319](https://github.com/googleapis/nodejs-spanner/issues/2319)) ([192bf2b](https://github.com/googleapis/nodejs-spanner/commit/192bf2bb603bca4ac481fcfd1f04974173adc6a1))
+* Add support for AFE latency metrics ([#2348](https://github.com/googleapis/nodejs-spanner/issues/2348)) ([0666f05](https://github.com/googleapis/nodejs-spanner/commit/0666f05d589e2f229b44dffae8e9649220bccf8b))
+* Add throughput_mode to UpdateDatabaseDdlRequest to be used by Spanner Migration Tool. See https://github.com/GoogleCloudPlatform/spanner-migration-tool ([#2304](https://github.com/googleapis/nodejs-spanner/issues/2304)) ([a29af56](https://github.com/googleapis/nodejs-spanner/commit/a29af56ae3c31f07115cb938bcf3f0f77241b725))
+* Operation, Attempt, and GFE metrics ([#2328](https://github.com/googleapis/nodejs-spanner/issues/2328)) ([646e6ea](https://github.com/googleapis/nodejs-spanner/commit/646e6ea6f1dc5fa1937e512ae9e81ae4d2637ed0))
+* Proto changes for an internal api ([#2356](https://github.com/googleapis/nodejs-spanner/issues/2356)) ([380e770](https://github.com/googleapis/nodejs-spanner/commit/380e7705a23a692168db386ba5426c91bf1587b6))
+* **spanner:** A new field `snapshot_timestamp` is added to message `.google.spanner.v1.CommitResponse` ([#2350](https://github.com/googleapis/nodejs-spanner/issues/2350)) ([0875cd8](https://github.com/googleapis/nodejs-spanner/commit/0875cd82e99fa6c95ab38807e09c5921303775f8))
+* **spanner:** Add new change_stream.proto ([#2315](https://github.com/googleapis/nodejs-spanner/issues/2315)) ([57d67be](https://github.com/googleapis/nodejs-spanner/commit/57d67be2e3b6d6ac2a8a903acf8613b27a049c3b))
+* **spanner:** Add tpc support ([#2333](https://github.com/googleapis/nodejs-spanner/issues/2333)) ([a381cab](https://github.com/googleapis/nodejs-spanner/commit/a381cab92c31373a6a10edca0f8a8bdfc4415e4b))
+* Track precommit token in r/w apis(multiplexed session) ([#2312](https://github.com/googleapis/nodejs-spanner/issues/2312)) ([3676bfa](https://github.com/googleapis/nodejs-spanner/commit/3676bfa60725c43f85a04ead87943be92e4a99f0))
+
+
+### Bug Fixes
+
+* Docs-test ([#2297](https://github.com/googleapis/nodejs-spanner/issues/2297)) ([61c571c](https://github.com/googleapis/nodejs-spanner/commit/61c571c729c2a065df6ff166db784a6e6eaef74d))
+* Ensure context propagation works in Node.js 22 with async/await ([#2326](https://github.com/googleapis/nodejs-spanner/issues/2326)) ([e8cdbed](https://github.com/googleapis/nodejs-spanner/commit/e8cdbedd55f049b8c7766e97388ed045fedd1b4e))
+* Pass the Span correctly ([#2332](https://github.com/googleapis/nodejs-spanner/issues/2332)) ([edaee77](https://github.com/googleapis/nodejs-spanner/commit/edaee7791b2d814f749ed35119dd705924984a78))
+* System test against emulator ([#2339](https://github.com/googleapis/nodejs-spanner/issues/2339)) ([2a6af4c](https://github.com/googleapis/nodejs-spanner/commit/2a6af4c36484f44929a2fac80d8f225dad5d702c))
+* Unhandled exceptions from gax ([#2338](https://github.com/googleapis/nodejs-spanner/issues/2338)) ([6428bcd](https://github.com/googleapis/nodejs-spanner/commit/6428bcd2980852c1bdbc4c3d0ab210a139e5f193))
+
+
+### Performance Improvements
+
+* Skip gRPC trailers for StreamingRead & ExecuteStreamingSql ([#2313](https://github.com/googleapis/nodejs-spanner/issues/2313)) ([8bd0781](https://github.com/googleapis/nodejs-spanner/commit/8bd0781e8b434a421f0e0f3395439a5a86c7847c))
+
+## [8.0.0](https://github.com/googleapis/nodejs-spanner/compare/v7.21.0...v8.0.0) (2025-05-12)
+
+
+### ⚠ BREAKING CHANGES
+
+* remove the arrify package ([#2292](https://github.com/googleapis/nodejs-spanner/issues/2292))
+* migrate to Node 18 ([#2271](https://github.com/googleapis/nodejs-spanner/issues/2271))
+
+### Features
+
+* Add promise based signatures for createQueryPartitions ([#2284](https://github.com/googleapis/nodejs-spanner/issues/2284)) ([255d8a6](https://github.com/googleapis/nodejs-spanner/commit/255d8a6a5749b6a05cd87dd7444cab7dd75d3e42))
+* Add promise based signatures on createReadPartitions ([#2300](https://github.com/googleapis/nodejs-spanner/issues/2300)) ([7b8a1f7](https://github.com/googleapis/nodejs-spanner/commit/7b8a1f70f0de3aa5886a2cde9325c9a36222a311))
+* Support promise based signatures for execute method ([#2301](https://github.com/googleapis/nodejs-spanner/issues/2301)) ([bb857e1](https://github.com/googleapis/nodejs-spanner/commit/bb857e18459f717d67b9b3d144c2b022178363cb))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency @google-cloud/kms to v5 ([#2289](https://github.com/googleapis/nodejs-spanner/issues/2289)) ([1ccb505](https://github.com/googleapis/nodejs-spanner/commit/1ccb505935e70b6f576f06e566325146ee68f3ff))
+* **deps:** Update dependency @google-cloud/precise-date to v5 ([#2290](https://github.com/googleapis/nodejs-spanner/issues/2290)) ([44f7575](https://github.com/googleapis/nodejs-spanner/commit/44f7575efd3751d0595beef2ec4eb9f39bc426d7))
+* **deps:** Update dependency big.js to v7 ([#2286](https://github.com/googleapis/nodejs-spanner/issues/2286)) ([0911297](https://github.com/googleapis/nodejs-spanner/commit/0911297cc33aec93c09ef2be42413f20c75fc2bf))
+
+
+### Miscellaneous Chores
+
+* Migrate to Node 18 ([#2271](https://github.com/googleapis/nodejs-spanner/issues/2271)) ([cab3f22](https://github.com/googleapis/nodejs-spanner/commit/cab3f229ccb2189bd5af0c25a3006b553f8a5453))
+* Remove the arrify package ([#2292](https://github.com/googleapis/nodejs-spanner/issues/2292)) ([e8f5ca1](https://github.com/googleapis/nodejs-spanner/commit/e8f5ca15125d570949769e6e66f0d911cb21f58d))
+
+## [7.21.0](https://github.com/googleapis/nodejs-spanner/compare/v7.20.0...v7.21.0) (2025-04-15)
+
+
+### Features
+
+* Adding sample for pre-split feature ([#2274](https://github.com/googleapis/nodejs-spanner/issues/2274)) ([3d5f080](https://github.com/googleapis/nodejs-spanner/commit/3d5f08065fdf40a1c441d97a049d7dacf1a5be93))
+
+
+### Bug Fixes
+
+* Adding span attributes for request tag and transaction tag ([#2236](https://github.com/googleapis/nodejs-spanner/issues/2236)) ([3f69dad](https://github.com/googleapis/nodejs-spanner/commit/3f69dad36cfdeb4effd191e0d38079ead1bd6654))
+
+## [7.20.0](https://github.com/googleapis/nodejs-spanner/compare/v7.19.1...v7.20.0) (2025-04-11)
+
+
+### Features
+
+* Add support for Interval ([#2192](https://github.com/googleapis/nodejs-spanner/issues/2192)) ([8c886cb](https://github.com/googleapis/nodejs-spanner/commit/8c886cbc0d7523fb99e65cfc5d8f565b630e26f0))
+* **debugging:** Implement x-goog-spanner-request-id propagation per request ([#2205](https://github.com/googleapis/nodejs-spanner/issues/2205)) ([e42caea](https://github.com/googleapis/nodejs-spanner/commit/e42caeaaa656c395d240f4af412ddb947f29c59b))
+* **spanner:** Add support for snapshot isolation ([#2245](https://github.com/googleapis/nodejs-spanner/issues/2245)) ([b60a683](https://github.com/googleapis/nodejs-spanner/commit/b60a683c0e1ddbf704766eb99f102fed925a348c))
+* **spanner:** Support for Multiplexed Session Partitioned Ops ([#2252](https://github.com/googleapis/nodejs-spanner/issues/2252)) ([e7ce471](https://github.com/googleapis/nodejs-spanner/commit/e7ce471332f6e73614638b96ed54c87095d785a2))
+
+## [7.19.1](https://github.com/googleapis/nodejs-spanner/compare/v7.19.0...v7.19.1) (2025-03-13)
+
+
+### Bug Fixes
+
+* CreateQueryPartition with query params ([91f5afd](https://github.com/googleapis/nodejs-spanner/commit/91f5afda53bd9c46fcd1a1fe33f579b6aed5223a))
+
+## [7.19.0](https://github.com/googleapis/nodejs-spanner/compare/v7.18.1...v7.19.0) (2025-02-26)
+
+
+### Features
+
+* Add AddSplitPoints API ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* Paging changes for bigquery ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* **spanner:** A new enum `IsolationLevel` is added ([#2225](https://github.com/googleapis/nodejs-spanner/issues/2225)) ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* **spanner:** A new field `isolation_level` is added to message `.google.spanner.v1.TransactionOptions` ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* **spanner:** Add instance partitions field in backup proto ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* **spanner:** Add support for Multiplexed Session for Read Only Tran… ([#2214](https://github.com/googleapis/nodejs-spanner/issues/2214)) ([3a7a51b](https://github.com/googleapis/nodejs-spanner/commit/3a7a51bee00730c2daf1b9791b45f75531c14a2c))
+* **x-goog-spanner-request-id:** Add bases ([#2211](https://github.com/googleapis/nodejs-spanner/issues/2211)) ([0008038](https://github.com/googleapis/nodejs-spanner/commit/000803812e670ce0f4bac4a6460351f2b08ec660))
+
+
+### Bug Fixes
+
+* Add x-goog-request params to headers for LRO-polling methods ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* Error from fill method should not be emitted ([#2233](https://github.com/googleapis/nodejs-spanner/issues/2233)) ([2cc44cf](https://github.com/googleapis/nodejs-spanner/commit/2cc44cf238bd18f5a456c76ddb8280c2252c2e87)), closes [#2103](https://github.com/googleapis/nodejs-spanner/issues/2103)
+* Finalize fixing typings for headers in generator ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* Fix typings for headers in generator ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* Remove extra protos in ESM & capture ESM in headers ([e4d389a](https://github.com/googleapis/nodejs-spanner/commit/e4d389a23ff4b73b2d0774ad31a84c9a6c19e306))
+* Rollback with no id ([#2231](https://github.com/googleapis/nodejs-spanner/issues/2231)) ([a6919b1](https://github.com/googleapis/nodejs-spanner/commit/a6919b15bd01ed93c62d32533d78181cbd333f5e)), closes [#2103](https://github.com/googleapis/nodejs-spanner/issues/2103)
+
+## [7.18.1](https://github.com/googleapis/nodejs-spanner/compare/v7.18.0...v7.18.1) (2025-02-05)
+
+
+### Bug Fixes
+
+* Fix NodeJS release ([#2229](https://github.com/googleapis/nodejs-spanner/issues/2229)) ([f830fc8](https://github.com/googleapis/nodejs-spanner/commit/f830fc82ce666902db3cddc667326dc2731c14a1))
+
+## [7.18.0](https://github.com/googleapis/nodejs-spanner/compare/v7.17.1...v7.18.0) (2025-01-29)
+
+
+### Features
+
+* Add gcp client attributes for Opentelemetry traces ([#2215](https://github.com/googleapis/nodejs-spanner/issues/2215)) ([d2ff046](https://github.com/googleapis/nodejs-spanner/commit/d2ff046854b4139af6e3a6f0d2122619cdf83131))
+
+## [7.17.1](https://github.com/googleapis/nodejs-spanner/compare/v7.17.0...v7.17.1) (2025-01-03)
+
+
+### Bug Fixes
+
+* Remove default global trace context propagator ([#2209](https://github.com/googleapis/nodejs-spanner/issues/2209)) ([7898e0c](https://github.com/googleapis/nodejs-spanner/commit/7898e0ce0477e2d4327822ac26a2674203b47a64)), closes [#2208](https://github.com/googleapis/nodejs-spanner/issues/2208)
+
+## [7.17.0](https://github.com/googleapis/nodejs-spanner/compare/v7.16.0...v7.17.0) (2024-12-27)
+
+
+### Features
+
+* Add the last statement option to ExecuteSqlRequest and ExecuteBatchDmlRequest ([#2196](https://github.com/googleapis/nodejs-spanner/issues/2196)) ([223f167](https://github.com/googleapis/nodejs-spanner/commit/223f167c1c9bc4da26155637eabbcabce5487ede))
+* Enable e2e tracing ([#2202](https://github.com/googleapis/nodejs-spanner/issues/2202)) ([3cc257e](https://github.com/googleapis/nodejs-spanner/commit/3cc257e99925594776b9a1886f0173ce2dfe904f))
+
+
+### Bug Fixes
+
+* Span events Issue 2166 ([#2184](https://github.com/googleapis/nodejs-spanner/issues/2184)) ([97ed577](https://github.com/googleapis/nodejs-spanner/commit/97ed5776dbdf5e90f8398fffea08e2a968045f9b))
+
+## [7.16.0](https://github.com/googleapis/nodejs-spanner/compare/v7.15.0...v7.16.0) (2024-11-09)
+
+
+### Features
+
+* **spanner:** Add support for Cloud Spanner Default Backup Schedules ([#2135](https://github.com/googleapis/nodejs-spanner/issues/2135)) ([19f137c](https://github.com/googleapis/nodejs-spanner/commit/19f137c870796d60902be8d9d3a82f4abcfc693f))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.4.1 ([#2100](https://github.com/googleapis/nodejs-spanner/issues/2100)) ([2e94bcd](https://github.com/googleapis/nodejs-spanner/commit/2e94bcd06d99a98c7767281e8035d000e186692b))
+
+## [7.15.0](https://github.com/googleapis/nodejs-spanner/compare/v7.14.0...v7.15.0) (2024-10-30)
+
+
+### Features
+
+* (observability, samples): add tracing end-to-end sample ([#2130](https://github.com/googleapis/nodejs-spanner/issues/2130)) ([66d99e8](https://github.com/googleapis/nodejs-spanner/commit/66d99e836cd2bfbb3b0f78980ec2b499f9e5e563))
+* (observability) add spans for BatchTransaction and Table ([#2115](https://github.com/googleapis/nodejs-spanner/issues/2115)) ([d51aae9](https://github.com/googleapis/nodejs-spanner/commit/d51aae9c9c3c0e6319d81c2809573ae54675acf3)), closes [#2114](https://github.com/googleapis/nodejs-spanner/issues/2114)
+* (observability) Add support for OpenTelemetry traces and allow observability options to be passed. ([#2131](https://github.com/googleapis/nodejs-spanner/issues/2131)) ([5237e11](https://github.com/googleapis/nodejs-spanner/commit/5237e118befb4b7fe4aea76a80a91e822d7a22e4)), closes [#2079](https://github.com/googleapis/nodejs-spanner/issues/2079)
+* (observability) propagate database name for every span generated to aid in quick debugging ([#2155](https://github.com/googleapis/nodejs-spanner/issues/2155)) ([0342e74](https://github.com/googleapis/nodejs-spanner/commit/0342e74721a0684d8195a6299c3a634eefc2b522))
+* (observability) trace Database.batchCreateSessions + SessionPool.createSessions ([#2145](https://github.com/googleapis/nodejs-spanner/issues/2145)) ([f489c94](https://github.com/googleapis/nodejs-spanner/commit/f489c9479fa5402f0c960cf896fd3be0e946f182))
+* (observability): trace Database.runPartitionedUpdate ([#2176](https://github.com/googleapis/nodejs-spanner/issues/2176)) ([701e226](https://github.com/googleapis/nodejs-spanner/commit/701e22660d5ac9f0b3e940ad656b9ca6c479251d)), closes [#2079](https://github.com/googleapis/nodejs-spanner/issues/2079)
+* (observability): trace Database.runTransactionAsync ([#2167](https://github.com/googleapis/nodejs-spanner/issues/2167)) ([d0fe178](https://github.com/googleapis/nodejs-spanner/commit/d0fe178623c1c48245d11bcea97fcd340b6615af)), closes [#207](https://github.com/googleapis/nodejs-spanner/issues/207)
+* Allow multiple KMS keys to create CMEK database/backup ([#2099](https://github.com/googleapis/nodejs-spanner/issues/2099)) ([51bc8a7](https://github.com/googleapis/nodejs-spanner/commit/51bc8a7445ab8b3d2239493b69d9c271c1086dde))
+* **observability:** Fix bugs found from product review + negative cases ([#2158](https://github.com/googleapis/nodejs-spanner/issues/2158)) ([cbc86fa](https://github.com/googleapis/nodejs-spanner/commit/cbc86fa80498af6bd745eebb9443612936e26d4e))
+* **observability:** Trace Database methods ([#2119](https://github.com/googleapis/nodejs-spanner/issues/2119)) ([1f06871](https://github.com/googleapis/nodejs-spanner/commit/1f06871f7aca386756e8691013602b069697bb87)), closes [#2114](https://github.com/googleapis/nodejs-spanner/issues/2114)
+* **observability:** Trace Database.batchWriteAtLeastOnce ([#2157](https://github.com/googleapis/nodejs-spanner/issues/2157)) ([2a19ef1](https://github.com/googleapis/nodejs-spanner/commit/2a19ef1af4f6fd1b81d08afc15db76007859a0b9)), closes [#2079](https://github.com/googleapis/nodejs-spanner/issues/2079)
+* **observability:** Trace Transaction ([#2122](https://github.com/googleapis/nodejs-spanner/issues/2122)) ([a464bdb](https://github.com/googleapis/nodejs-spanner/commit/a464bdb5cbb7856b7a08dac3ff48132948b65792)), closes [#2114](https://github.com/googleapis/nodejs-spanner/issues/2114)
+
+
+### Bug Fixes
+
+* Exact staleness timebound ([#2143](https://github.com/googleapis/nodejs-spanner/issues/2143)) ([f01516e](https://github.com/googleapis/nodejs-spanner/commit/f01516ec6ba44730622cfb050c52cd93f30bba7a)), closes [#2129](https://github.com/googleapis/nodejs-spanner/issues/2129)
+* GetMetadata for Session ([#2124](https://github.com/googleapis/nodejs-spanner/issues/2124)) ([2fd63ac](https://github.com/googleapis/nodejs-spanner/commit/2fd63acb87ce06a02d7fdfa78d836dbd7ad59a26)), closes [#2123](https://github.com/googleapis/nodejs-spanner/issues/2123)
+
+## [7.14.0](https://github.com/googleapis/nodejs-spanner/compare/v7.13.0...v7.14.0) (2024-08-14)
+
+
+### Features
+
+* **spanner:** Add resource reference annotation to backup schedules ([#2093](https://github.com/googleapis/nodejs-spanner/issues/2093)) ([df539e6](https://github.com/googleapis/nodejs-spanner/commit/df539e665fe5d8fe01084b8d8cf6094c89b13d48))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.3.9 ([#2094](https://github.com/googleapis/nodejs-spanner/issues/2094)) ([487efc0](https://github.com/googleapis/nodejs-spanner/commit/487efc091e0e143d3c59ac63d66005133b1ef2e5))
+
+## [7.13.0](https://github.com/googleapis/nodejs-spanner/compare/v7.12.0...v7.13.0) (2024-08-09)
+
+
+### Features
+
+* **spanner:** Add support for Cloud Spanner Incremental Backups ([#2085](https://github.com/googleapis/nodejs-spanner/issues/2085)) ([33b9645](https://github.com/googleapis/nodejs-spanner/commit/33b9645d6096e0d77d30fab6aadf5d92da973a67))
+
+
+### Bug Fixes
+
+* Unhandled exception error catch ([#2091](https://github.com/googleapis/nodejs-spanner/issues/2091)) ([e277752](https://github.com/googleapis/nodejs-spanner/commit/e277752fad961908e37e37d88d7b6a61d61a078e))
+
+## [7.12.0](https://github.com/googleapis/nodejs-spanner/compare/v7.11.0...v7.12.0) (2024-08-02)
+
+
+### Features
+
+* Grpc keep alive settings ([#2086](https://github.com/googleapis/nodejs-spanner/issues/2086)) ([7712c35](https://github.com/googleapis/nodejs-spanner/commit/7712c35be21863015bb709f5f89d9ef0bb656024))
+
+## [7.11.0](https://github.com/googleapis/nodejs-spanner/compare/v7.10.0...v7.11.0) (2024-07-29)
+
+
+### Features
+
+* Add support for blind writes ([#2065](https://github.com/googleapis/nodejs-spanner/issues/2065)) ([62fc0a4](https://github.com/googleapis/nodejs-spanner/commit/62fc0a47327017c115466b9e89e53dbd778579af))
+* **spanner:** Add samples for instance partitions ([#2083](https://github.com/googleapis/nodejs-spanner/issues/2083)) ([b91e284](https://github.com/googleapis/nodejs-spanner/commit/b91e2849056df9894e0590cb71e21c13319e6d70))
+
+## [7.10.0](https://github.com/googleapis/nodejs-spanner/compare/v7.9.1...v7.10.0) (2024-07-19)
+
+
+### Features
+
+* Add field lock_hint in spanner.proto ([47520e9](https://github.com/googleapis/nodejs-spanner/commit/47520e927b0fdcc60cb67378b8b49f44329f210b))
+* Add field order_by in spanner.proto ([47520e9](https://github.com/googleapis/nodejs-spanner/commit/47520e927b0fdcc60cb67378b8b49f44329f210b))
+* Add QueryCancellationAction message in executor protos ([47520e9](https://github.com/googleapis/nodejs-spanner/commit/47520e927b0fdcc60cb67378b8b49f44329f210b))
+* Add support for change streams transaction exclusion option for Batch Write ([#2070](https://github.com/googleapis/nodejs-spanner/issues/2070)) ([2a9e443](https://github.com/googleapis/nodejs-spanner/commit/2a9e44328acda310db2d0d65d32ad82d77a9fcb0))
+* **spanner:** Add support for Cloud Spanner Scheduled Backups ([#2045](https://github.com/googleapis/nodejs-spanner/issues/2045)) ([47520e9](https://github.com/googleapis/nodejs-spanner/commit/47520e927b0fdcc60cb67378b8b49f44329f210b))
+* Update Nodejs generator to send API versions in headers for GAPICs ([47520e9](https://github.com/googleapis/nodejs-spanner/commit/47520e927b0fdcc60cb67378b8b49f44329f210b))
+
+
+### Bug Fixes
+
+* Callback in getDatabaseDialect ([#2078](https://github.com/googleapis/nodejs-spanner/issues/2078)) ([7e4a8e9](https://github.com/googleapis/nodejs-spanner/commit/7e4a8e9ad4f785b15b68aaa06b6480098d7995ba))
+* **deps:** Update dependency google-gax to v4.3.8 ([#2077](https://github.com/googleapis/nodejs-spanner/issues/2077)) ([e927880](https://github.com/googleapis/nodejs-spanner/commit/e927880ff786a2528a2bbb063a244af3c42ff69c))
+
+## [7.9.1](https://github.com/googleapis/nodejs-spanner/compare/v7.9.0...v7.9.1) (2024-06-26)
+
+
+### Bug Fixes
+
+* Retry with timeout ([#2071](https://github.com/googleapis/nodejs-spanner/issues/2071)) ([a943257](https://github.com/googleapis/nodejs-spanner/commit/a943257a0402b26fd80196057a9724fd28fc5c1b))
+
+## [7.9.0](https://github.com/googleapis/nodejs-spanner/compare/v7.8.0...v7.9.0) (2024-06-21)
+
+
+### Features
+
+* **spanner:** Add support for batchWrite ([#2054](https://github.com/googleapis/nodejs-spanner/issues/2054)) ([06aab6e](https://github.com/googleapis/nodejs-spanner/commit/06aab6e39bbce9e3786f1ac631c80e8909197e92))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.3.4 ([#2051](https://github.com/googleapis/nodejs-spanner/issues/2051)) ([80abf06](https://github.com/googleapis/nodejs-spanner/commit/80abf06ba8ef9497318ffc597b83fb63e4408f9c))
+* **deps:** Update dependency google-gax to v4.3.5 ([#2055](https://github.com/googleapis/nodejs-spanner/issues/2055)) ([702c9b0](https://github.com/googleapis/nodejs-spanner/commit/702c9b0f34e6cc34233c5aa52b97601b19f70980))
+* **deps:** Update dependency google-gax to v4.3.6 ([#2057](https://github.com/googleapis/nodejs-spanner/issues/2057)) ([74ebf1e](https://github.com/googleapis/nodejs-spanner/commit/74ebf1e45cddf614c180295f3a761a8f84c5cb32))
+* **deps:** Update dependency google-gax to v4.3.7 ([#2068](https://github.com/googleapis/nodejs-spanner/issues/2068)) ([28fec6c](https://github.com/googleapis/nodejs-spanner/commit/28fec6ca505d78d725efc123950be978e0c84ab7))
+
+## [7.8.0](https://github.com/googleapis/nodejs-spanner/compare/v7.7.0...v7.8.0) (2024-05-24)
+
+
+### Features
+
+* Add `RESOURCE_EXHAUSTED` to the list of retryable error codes ([#2032](https://github.com/googleapis/nodejs-spanner/issues/2032)) ([a4623c5](https://github.com/googleapis/nodejs-spanner/commit/a4623c560c16fa1f37a06cb57a5e47a1d6759d27))
+* Add support for multi region encryption config ([81fa610](https://github.com/googleapis/nodejs-spanner/commit/81fa610895fe709cbb7429896493a67407a6343c))
+* Add support for Proto columns ([#1991](https://github.com/googleapis/nodejs-spanner/issues/1991)) ([ae59c7f](https://github.com/googleapis/nodejs-spanner/commit/ae59c7f957660e08cd5965b5e67694fa1ccc0057))
+* **spanner:** Add support for change streams transaction exclusion option ([#2049](https://github.com/googleapis/nodejs-spanner/issues/2049)) ([d95cab5](https://github.com/googleapis/nodejs-spanner/commit/d95cab5abe50cdb56cbc1d6d935aee29526e1096))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.3.3 ([#2038](https://github.com/googleapis/nodejs-spanner/issues/2038)) ([d86c1b0](https://github.com/googleapis/nodejs-spanner/commit/d86c1b0c21c7c95e3110221b3ca6ff9ff3b4a088))
+* Drop table statement ([#2036](https://github.com/googleapis/nodejs-spanner/issues/2036)) ([f31d7b2](https://github.com/googleapis/nodejs-spanner/commit/f31d7b205d74d4a783f0d5159dd5b62efe968fe6))
+
+## [7.7.0](https://github.com/googleapis/nodejs-spanner/compare/v7.6.0...v7.7.0) (2024-04-17)
+
+
+### Features
+
+* OptimisticLock option for getTransaction method ([#2028](https://github.com/googleapis/nodejs-spanner/issues/2028)) ([dacf869](https://github.com/googleapis/nodejs-spanner/commit/dacf8697b20752041684710982035b4c97837d28))
+* **spanner:** Adding `EXPECTED_FULFILLMENT_PERIOD` to the indicate instance creation times (with `FULFILLMENT_PERIOD_NORMAL` or `FULFILLMENT_PERIOD_EXTENDED` ENUM) with the extended instance creation time triggered by On-Demand Capacity Feature ([#2024](https://github.com/googleapis/nodejs-spanner/issues/2024)) ([5292e03](https://github.com/googleapis/nodejs-spanner/commit/5292e035c5278ba6806f9e1eb84809ed893b1e37))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.3.2 ([#2026](https://github.com/googleapis/nodejs-spanner/issues/2026)) ([0ee9831](https://github.com/googleapis/nodejs-spanner/commit/0ee98319f291f552a0afc52629d12af9969d1d10))
+
+## [7.6.0](https://github.com/googleapis/nodejs-spanner/compare/v7.5.0...v7.6.0) (2024-03-26)
+
+
+### Features
+
+* Add instance partition support to spanner instance proto ([#2001](https://github.com/googleapis/nodejs-spanner/issues/2001)) ([4381047](https://github.com/googleapis/nodejs-spanner/commit/43810478e81d3a234e7fa94af90fd49ca379dd98))
+* Managed Autoscaler ([#2015](https://github.com/googleapis/nodejs-spanner/issues/2015)) ([547ca1b](https://github.com/googleapis/nodejs-spanner/commit/547ca1b0da8c5c5e28f85fbd4ea16af21e20c980))
+* **spanner:** Add a sample for max commit delays ([#1993](https://github.com/googleapis/nodejs-spanner/issues/1993)) ([91c7204](https://github.com/googleapis/nodejs-spanner/commit/91c7204e2c8f62e229d7a2b2a0ff059d421dd984))
+* **spanner:** Add support for float32 ([#2020](https://github.com/googleapis/nodejs-spanner/issues/2020)) ([99e2c1d](https://github.com/googleapis/nodejs-spanner/commit/99e2c1d4791a5ca86fdccb3f600aa4592efe0a45))
+
+## [7.5.0](https://github.com/googleapis/nodejs-spanner/compare/v7.4.0...v7.5.0) (2024-03-04)
+
+
+### Features
+
+* **spanner:** Add emulator support for the admin client autogenerated API samples ([#1994](https://github.com/googleapis/nodejs-spanner/issues/1994)) ([e2fe5b7](https://github.com/googleapis/nodejs-spanner/commit/e2fe5b748c3077078fa43e4bfa427fef603656a9))
+
+
+### Bug Fixes
+
+* Revert untyped param type feature ([#2012](https://github.com/googleapis/nodejs-spanner/issues/2012)) ([49fa60d](https://github.com/googleapis/nodejs-spanner/commit/49fa60dd0735fe66db33f7b9137dba0821eb5184))
+
+## [7.4.0](https://github.com/googleapis/nodejs-spanner/compare/v7.3.0...v7.4.0) (2024-02-23)
+
+
+### Features
+
+* **spanner:** Add PG.OID support ([#1948](https://github.com/googleapis/nodejs-spanner/issues/1948)) ([cf9df7a](https://github.com/googleapis/nodejs-spanner/commit/cf9df7a54c21ac995bbea9ad82c3544e4aff41b6))
+* Untyped param types ([#1869](https://github.com/googleapis/nodejs-spanner/issues/1869)) ([6ef44c3](https://github.com/googleapis/nodejs-spanner/commit/6ef44c383a90bf6ae95de531c83e21d2d58da159))
+* Update TransactionOptions to include new option exclude_txn_from_change_streams ([#1998](https://github.com/googleapis/nodejs-spanner/issues/1998)) ([937a7a1](https://github.com/googleapis/nodejs-spanner/commit/937a7a13f8c7660e21d34ebbaecad426b2bacd99))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.3.1 ([#1995](https://github.com/googleapis/nodejs-spanner/issues/1995)) ([bed4832](https://github.com/googleapis/nodejs-spanner/commit/bed4832445e72c7116fe5495c79d989664220b38))
+* Only reset pending value with resume token ([#2000](https://github.com/googleapis/nodejs-spanner/issues/2000)) ([f337089](https://github.com/googleapis/nodejs-spanner/commit/f337089567d7d92c9467e311be7d72b0a7dc8047)), closes [#1959](https://github.com/googleapis/nodejs-spanner/issues/1959)
+
+## [7.3.0](https://github.com/googleapis/nodejs-spanner/compare/v7.2.0...v7.3.0) (2024-02-08)
+
+
+### Features
+
+* **spanner:** Add maxCommitDelay support ([#1992](https://github.com/googleapis/nodejs-spanner/issues/1992)) ([9f84408](https://github.com/googleapis/nodejs-spanner/commit/9f8440843fd8926a37ec300a318dad33b83b4f97))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency google-gax to v4.1.0 ([#1981](https://github.com/googleapis/nodejs-spanner/issues/1981)) ([2a36150](https://github.com/googleapis/nodejs-spanner/commit/2a36150cb61e9abeef073724189cc651d29d8776))
+* **deps:** Update dependency google-gax to v4.2.0 ([#1988](https://github.com/googleapis/nodejs-spanner/issues/1988)) ([005589a](https://github.com/googleapis/nodejs-spanner/commit/005589a7727ee87948a55a6c7710f5150fc1c6a7))
+* **deps:** Update dependency google-gax to v4.2.1 ([#1989](https://github.com/googleapis/nodejs-spanner/issues/1989)) ([d2ae995](https://github.com/googleapis/nodejs-spanner/commit/d2ae9952e7449ce2321e69a6be36c9d50d863095))
+* **deps:** Update dependency google-gax to v4.3.0 ([#1990](https://github.com/googleapis/nodejs-spanner/issues/1990)) ([e625753](https://github.com/googleapis/nodejs-spanner/commit/e625753a37393f32d9e449aa7324763082f6c923))
+
+## [7.2.0](https://github.com/googleapis/nodejs-spanner/compare/v7.1.0...v7.2.0) (2024-01-11)
+
+
+### Features
+
+* Support for Directed Reads ([#1966](https://github.com/googleapis/nodejs-spanner/issues/1966)) ([c0a4363](https://github.com/googleapis/nodejs-spanner/commit/c0a43638c81dd769cc55e021cc4cf1d93db8a72a))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency @google-cloud/precise-date to v4 ([#1903](https://github.com/googleapis/nodejs-spanner/issues/1903)) ([7464c8b](https://github.com/googleapis/nodejs-spanner/commit/7464c8b2412a9b718cd8981363cb982aebbe3723))
+* **deps:** Update dependency @types/stack-trace to v0.0.33 ([#1952](https://github.com/googleapis/nodejs-spanner/issues/1952)) ([45ab751](https://github.com/googleapis/nodejs-spanner/commit/45ab751da1f0f73bc06c8b8e0007b457fa75518f))
+* **deps:** Update dependency retry-request to v7 ([#1934](https://github.com/googleapis/nodejs-spanner/issues/1934)) ([c575c80](https://github.com/googleapis/nodejs-spanner/commit/c575c80b17e5fdf2cbba24c806fa21f26c2010dc))
+
+## [7.1.0](https://github.com/googleapis/nodejs-spanner/compare/v7.0.0...v7.1.0) (2023-11-16)
+
+
+### Features
+
+* Add PG.OID type cod annotation ([69192b5](https://github.com/googleapis/nodejs-spanner/commit/69192b50ead0bde98676cb647ba4bf8a3112bb02))
+* **spanner:** Add autoscaling config to the instance proto ([#1935](https://github.com/googleapis/nodejs-spanner/issues/1935)) ([fe285c6](https://github.com/googleapis/nodejs-spanner/commit/fe285c67074ba36aaf5b49ea867c0d5851d83717))
+* **spanner:** Add directed_read_option in spanner.proto ([69192b5](https://github.com/googleapis/nodejs-spanner/commit/69192b50ead0bde98676cb647ba4bf8a3112bb02))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency @types/stack-trace to v0.0.31 ([#1924](https://github.com/googleapis/nodejs-spanner/issues/1924)) ([96af405](https://github.com/googleapis/nodejs-spanner/commit/96af4051c6717dfcbbc6e117e3ecd7f8e9dd758a))
+* **deps:** Update dependency @types/stack-trace to v0.0.32 ([#1939](https://github.com/googleapis/nodejs-spanner/issues/1939)) ([cb66474](https://github.com/googleapis/nodejs-spanner/commit/cb66474e995a90c1288e70842f723c51f1ffd37d))
+* **deps:** Update dependency google-gax to v4.0.4 ([#1926](https://github.com/googleapis/nodejs-spanner/issues/1926)) ([361fe6a](https://github.com/googleapis/nodejs-spanner/commit/361fe6a812f56c6834f1f7c7db60fc1083243768))
+* **deps:** Update dependency google-gax to v4.0.5 ([#1937](https://github.com/googleapis/nodejs-spanner/issues/1937)) ([ab26075](https://github.com/googleapis/nodejs-spanner/commit/ab260759be2fcc9ff80342f710b4c807742da2c5))
+
+## [7.0.0](https://github.com/googleapis/nodejs-spanner/compare/v6.16.0...v7.0.0) (2023-08-30)
+
+
+### ⚠ BREAKING CHANGES
+
+* upgrade to Node 14 ([#1890](https://github.com/googleapis/nodejs-spanner/issues/1890))
+
+### Bug Fixes
+
+* Idwaiter with multiple requests ([#1910](https://github.com/googleapis/nodejs-spanner/issues/1910)) ([83dd1f8](https://github.com/googleapis/nodejs-spanner/commit/83dd1f8201d07898bd3ddff9e339dfbcef7d7ace))
+
+
+### Miscellaneous Chores
+
+* Upgrade to Node 14 ([#1890](https://github.com/googleapis/nodejs-spanner/issues/1890)) ([0024772](https://github.com/googleapis/nodejs-spanner/commit/0024772b750de404cd44771e320fe89cd430f064))
+
+## [6.16.0](https://github.com/googleapis/nodejs-spanner/compare/v6.15.0...v6.16.0) (2023-08-07)
+
+
+### Features
+
+* Bit reverse sequence ([#1846](https://github.com/googleapis/nodejs-spanner/issues/1846)) ([4154c02](https://github.com/googleapis/nodejs-spanner/commit/4154c02f4c5ac1aa23f4c7c61521ab6fbabadfb8))
+
+
+### Bug Fixes
+
+* Databoost tests ([#1870](https://github.com/googleapis/nodejs-spanner/issues/1870)) ([45e13c7](https://github.com/googleapis/nodejs-spanner/commit/45e13c70607abf717d533a8c5b1c58752a5439cb))
+
+## [6.15.0](https://github.com/googleapis/nodejs-spanner/compare/v6.14.0...v6.15.0) (2023-08-04)
+
+
+### Features
+
+* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([6852d99](https://github.com/googleapis/nodejs-spanner/commit/6852d99b858eb323ac3fc5e61905b8bf59486062))
+
+## [6.14.0](https://github.com/googleapis/nodejs-spanner/compare/v6.13.0...v6.14.0) (2023-07-21)
+
+
+### Features
+
+* Foreign key delete cascade testing, samples ([#1825](https://github.com/googleapis/nodejs-spanner/issues/1825)) ([74a54b0](https://github.com/googleapis/nodejs-spanner/commit/74a54b03f0d73a62edd524fa8d0248aea7ddf344))
+* Set LAR as False ([#1883](https://github.com/googleapis/nodejs-spanner/issues/1883)) ([ed510e8](https://github.com/googleapis/nodejs-spanner/commit/ed510e8545876e188e7bd782b6db80e677c3063c))
+
+## [6.13.0](https://github.com/googleapis/nodejs-spanner/compare/v6.12.0...v6.13.0) (2023-07-21)
+
+
+### Features
+
+* Enable leader aware routing by default. This update contains performance optimisations that will reduce the latency of read/write transactions that originate from a region other than the default leader region. ([87cd5e6](https://github.com/googleapis/nodejs-spanner/commit/87cd5e6ecdf6d888dd0e7fe712b7070c58b32d42))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency yargs to v17 ([#1866](https://github.com/googleapis/nodejs-spanner/issues/1866)) ([24e321f](https://github.com/googleapis/nodejs-spanner/commit/24e321f6327cfdfc191a84bb47d80a156eff5be9))
+
+## [6.12.0](https://github.com/googleapis/nodejs-spanner/compare/v6.11.0...v6.12.0) (2023-06-19)
+
+
+### Features
+
+* Databoostenabled for Query and Read partitions ([#1784](https://github.com/googleapis/nodejs-spanner/issues/1784)) ([66ff70c](https://github.com/googleapis/nodejs-spanner/commit/66ff70cd377d5e3f60a6796bc36bab3a39337f31))
+
+## [6.11.0](https://github.com/googleapis/nodejs-spanner/compare/v6.10.1...v6.11.0) (2023-06-06)
+
+
+### Features
+
+* **spanner:** Add DdlStatementActionInfo and add actions to UpdateDatabaseDdlMetadata ([#1860](https://github.com/googleapis/nodejs-spanner/issues/1860)) ([3e86f36](https://github.com/googleapis/nodejs-spanner/commit/3e86f369b927e3bf0a2046bd13d0b6a39a9bb076))
+* Testing for fgac in pg ([#1811](https://github.com/googleapis/nodejs-spanner/issues/1811)) ([c48945f](https://github.com/googleapis/nodejs-spanner/commit/c48945f536685d6e4ee4097cfac7d5f57853553e))
+
+## [6.10.1](https://github.com/googleapis/nodejs-spanner/compare/v6.10.0...v6.10.1) (2023-05-30)
+
+
+### Bug Fixes
+
+* Set database admin and instance as having handwritten layers (republish docs) ([3e3e624](https://github.com/googleapis/nodejs-spanner/commit/3e3e624187013d62a5ff479386fb8961f279b5ca))
+
+## [6.10.0](https://github.com/googleapis/nodejs-spanner/compare/v6.9.0...v6.10.0) (2023-05-17)
+
+
+### Features
+
+* Add support for UpdateDatabase ([#1802](https://github.com/googleapis/nodejs-spanner/issues/1802)) ([f4fbe71](https://github.com/googleapis/nodejs-spanner/commit/f4fbe71d819fde9a237f25b03af228b27cf58689))
+* Add support for UpdateDatabase in Cloud Spanner ([#1848](https://github.com/googleapis/nodejs-spanner/issues/1848)) ([dd9d505](https://github.com/googleapis/nodejs-spanner/commit/dd9d505e1480b9f45f0f4a09b0abca8282d5fceb))
+
+
+### Bug Fixes
+
+* Set grpc useragent ([#1847](https://github.com/googleapis/nodejs-spanner/issues/1847)) ([021e54e](https://github.com/googleapis/nodejs-spanner/commit/021e54ef469d7d95bae64c687b65489cbfc56cfa))
+
+## [6.9.0](https://github.com/googleapis/nodejs-spanner/compare/v6.8.0...v6.9.0) (2023-04-26)
+
+
+### Features
+
+* Leader aware routing ([#1783](https://github.com/googleapis/nodejs-spanner/issues/1783)) ([0703f41](https://github.com/googleapis/nodejs-spanner/commit/0703f4160c4a0b4c9f9f716174daca110ab8e50f))
+
+## [6.8.0](https://github.com/googleapis/nodejs-spanner/compare/v6.7.2...v6.8.0) (2023-04-06)
+
+
+### Features
+
+* Adding new fields for Serverless analytics ([#1816](https://github.com/googleapis/nodejs-spanner/issues/1816)) ([2a6ca6f](https://github.com/googleapis/nodejs-spanner/commit/2a6ca6f09215752f9451d625ac02837e9d70b66a))
+
+
+### Bug Fixes
+
+* Begin transaction foes not handle error ([#1833](https://github.com/googleapis/nodejs-spanner/issues/1833)) ([6ecd366](https://github.com/googleapis/nodejs-spanner/commit/6ecd366da7183d502c710cb5c879984c276b12db))
+* Correcting the proto field Id for field data_boost_enabled ([#1827](https://github.com/googleapis/nodejs-spanner/issues/1827)) ([7f6d4cc](https://github.com/googleapis/nodejs-spanner/commit/7f6d4ccce9269197312f2d795ef854e1789e8fce))
+* Logic for retrying specifiied internal errors ([#1822](https://github.com/googleapis/nodejs-spanner/issues/1822)) ([f915bd1](https://github.com/googleapis/nodejs-spanner/commit/f915bd16cf7e817243e46a319b3e6f270b24bf68)), closes [#1808](https://github.com/googleapis/nodejs-spanner/issues/1808)
+
+## [6.7.2](https://github.com/googleapis/nodejs-spanner/compare/v6.7.1...v6.7.2) (2023-02-17)
+
+
+### Bug Fixes
+
+* Tests emit empty metadata before emitting unspecified error ([14ef031](https://github.com/googleapis/nodejs-spanner/commit/14ef0318db756e7debad8599b1e274b8877291e1))
+
+## [6.7.1](https://github.com/googleapis/nodejs-spanner/compare/v6.7.0...v6.7.1) (2023-01-23)
+
+
+### Bug Fixes
+
+* Change of tag for fgac ([#1780](https://github.com/googleapis/nodejs-spanner/issues/1780)) ([d75b6dd](https://github.com/googleapis/nodejs-spanner/commit/d75b6dd79ffc2442cbd7a14f1ea952edc6678a64))
+* **codec:** Use index to determine array struct member value ([#1775](https://github.com/googleapis/nodejs-spanner/issues/1775)) ([fc2b695](https://github.com/googleapis/nodejs-spanner/commit/fc2b695d9ea6b65df856b4b081a75165009413ee)), closes [#1774](https://github.com/googleapis/nodejs-spanner/issues/1774)
+
+## [6.7.0](https://github.com/googleapis/nodejs-spanner/compare/v6.6.0...v6.7.0) (2023-01-17)
+
+
+### Features
+
+* Added SuggestConversationSummary RPC ([#1744](https://github.com/googleapis/nodejs-spanner/issues/1744)) ([14346f3](https://github.com/googleapis/nodejs-spanner/commit/14346f3cf8ed0cb0a93c255dc520dc62887c0e1a))
+
+## [6.6.0](https://github.com/googleapis/nodejs-spanner/compare/v6.5.0...v6.6.0) (2022-12-16)
+
+
+### Features
+
+* Export data types in index.ts ([#1726](https://github.com/googleapis/nodejs-spanner/issues/1726)) ([844f57f](https://github.com/googleapis/nodejs-spanner/commit/844f57fa5e79e5e5a5ede80df5e117004427f201)), closes [#1720](https://github.com/googleapis/nodejs-spanner/issues/1720)
+* Fgac support and samples ([#1751](https://github.com/googleapis/nodejs-spanner/issues/1751)) ([0a394df](https://github.com/googleapis/nodejs-spanner/commit/0a394df9bfa193d79edc4c3a3d26238f361c0d45))
+
+
+### Bug Fixes
+
+* Add sleep after admin request intensive tests ([#1758](https://github.com/googleapis/nodejs-spanner/issues/1758)) ([7643ceb](https://github.com/googleapis/nodejs-spanner/commit/7643ceb7cde9f420539877b86fdb0d38b254348d))
+
+## [6.5.0](https://github.com/googleapis/nodejs-spanner/compare/v6.4.0...v6.5.0) (2022-11-30)
+
+
+### Features
+
+* Inline BeginTransaction with first statement ([#1692](https://github.com/googleapis/nodejs-spanner/issues/1692)) ([d1b95d2](https://github.com/googleapis/nodejs-spanner/commit/d1b95d21e2c8cb0eff88351265cad248870bb3ea))
+
+
+### Bug Fixes
+
+* Cleanup different types of session pools ([#1739](https://github.com/googleapis/nodejs-spanner/issues/1739)) ([6f55187](https://github.com/googleapis/nodejs-spanner/commit/6f551877ea0d4b67e3c734377bdadd5d570cf839))
+* **deps:** Use google-gax v3.5.2 ([#1732](https://github.com/googleapis/nodejs-spanner/issues/1732)) ([8341b1f](https://github.com/googleapis/nodejs-spanner/commit/8341b1fa5dfcf0b286892efb8b57c7ad694cdbb8))
+
+## [6.4.0](https://github.com/googleapis/nodejs-spanner/compare/v6.3.0...v6.4.0) (2022-10-27)
+
+
+### Features
+
+* Adding support and samples for Jsonb data type in spangres ([#1729](https://github.com/googleapis/nodejs-spanner/issues/1729)) ([f050354](https://github.com/googleapis/nodejs-spanner/commit/f0503547012ab0ac8a04524ecf7bc92807f35379))
+* Update result_set.proto to return undeclared parameters in ExecuteSql API ([eaa445e](https://github.com/googleapis/nodejs-spanner/commit/eaa445ed314190abefc17e3672bb5e200142618b))
+* Update transaction.proto to include different lock modes ([#1723](https://github.com/googleapis/nodejs-spanner/issues/1723)) ([eaa445e](https://github.com/googleapis/nodejs-spanner/commit/eaa445ed314190abefc17e3672bb5e200142618b))
+
+## [6.3.0](https://github.com/googleapis/nodejs-spanner/compare/v6.2.0...v6.3.0) (2022-10-03)
+
+
+### Features
+
+* Support customer managed instance configurations ([#1611](https://github.com/googleapis/nodejs-spanner/issues/1611)) ([bbe8f69](https://github.com/googleapis/nodejs-spanner/commit/bbe8f697e8838e358973cd4a5f2db9e2d4df5349))
+
+
+### Bug Fixes
+
+* **deps:** Update dependency @google-cloud/precise-date to v3 ([#1676](https://github.com/googleapis/nodejs-spanner/issues/1676)) ([3f20ec4](https://github.com/googleapis/nodejs-spanner/commit/3f20ec47bbf89e1f72546a8ebf41a8b4ba93832f))
+* Do not import the whole google-gax from proto JS ([#1553](https://github.com/googleapis/nodejs-spanner/issues/1553)) ([#1700](https://github.com/googleapis/nodejs-spanner/issues/1700)) ([f9c2640](https://github.com/googleapis/nodejs-spanner/commit/f9c2640e054659a2e8299b8f989fa7936d04b0d7))
+* use google-gax v3.3.0 ([f9c2640](https://github.com/googleapis/nodejs-spanner/commit/f9c2640e054659a2e8299b8f989fa7936d04b0d7))
+
+## [6.2.0](https://github.com/googleapis/nodejs-spanner/compare/v6.1.4...v6.2.0) (2022-09-16)
+
+
+### Features
+
+* Add custom instance config operations ([#1712](https://github.com/googleapis/nodejs-spanner/issues/1712)) ([4b7716b](https://github.com/googleapis/nodejs-spanner/commit/4b7716be5409698e21bb79edec5cdf1019047de8))
+
+
+### Bug Fixes
+
+* Allow passing gax instance to client constructor ([#1698](https://github.com/googleapis/nodejs-spanner/issues/1698)) ([588c1a2](https://github.com/googleapis/nodejs-spanner/commit/588c1a2e0c449cfcb86cac73da32dd5794ee2baa))
+* **deps:** Use grpc-gcp v1.0.0 ([#1710](https://github.com/googleapis/nodejs-spanner/issues/1710)) ([12eab9d](https://github.com/googleapis/nodejs-spanner/commit/12eab9d628b72b5a7fc88f3d5e932b7a4d70dce2))
+* Move runtime dependencies from dev dependencies to dependencies ([#1704](https://github.com/googleapis/nodejs-spanner/issues/1704)) ([b2c1c0f](https://github.com/googleapis/nodejs-spanner/commit/b2c1c0f93653af6cc7bd9893ca14394f2a631b68))
+* Preserve default values in x-goog-request-params header ([#1711](https://github.com/googleapis/nodejs-spanner/issues/1711)) ([f1ae513](https://github.com/googleapis/nodejs-spanner/commit/f1ae51301d4ea9b0ed1ad4d4762c249fef9f8d08))
+
+## [6.1.4](https://github.com/googleapis/nodejs-spanner/compare/v6.1.3...v6.1.4) (2022-09-06)
+
+
+### Bug Fixes
+
+* Add hashes to requirements.txt ([#1544](https://github.com/googleapis/nodejs-spanner/issues/1544)) ([#1697](https://github.com/googleapis/nodejs-spanner/issues/1697)) ([61a1468](https://github.com/googleapis/nodejs-spanner/commit/61a1468fb0282bad642e643fc98a19d63acdcd1c))
+* Better support for fallback mode ([#1694](https://github.com/googleapis/nodejs-spanner/issues/1694)) ([bbc8831](https://github.com/googleapis/nodejs-spanner/commit/bbc88317149a3d86c50ccfd98092d5bfb77a104e))
+* Change import long to require ([#1695](https://github.com/googleapis/nodejs-spanner/issues/1695)) ([9283f4b](https://github.com/googleapis/nodejs-spanner/commit/9283f4bdfaea0ceececacbb80df3c37bd522b657))
+* **deps:** Update dependency @google-cloud/projectify to v3 ([#1678](https://github.com/googleapis/nodejs-spanner/issues/1678)) ([e3c1499](https://github.com/googleapis/nodejs-spanner/commit/e3c1499d0bdcbe3b578c5e7dc1d725630a1a0a30))
+* **deps:** Update dependency protobufjs to v7 ([#1686](https://github.com/googleapis/nodejs-spanner/issues/1686)) ([2839d23](https://github.com/googleapis/nodejs-spanner/commit/2839d2317ca7368d288ee9d7feb806f0ac2069c6))
+* Target new spanner db admin service config ([#1685](https://github.com/googleapis/nodejs-spanner/issues/1685)) ([2495c07](https://github.com/googleapis/nodejs-spanner/commit/2495c0723be70cf679ffa9e86f45199dbcd8c77b))
+* Test case fix to avoid the latest typescript dependency issue ([#1703](https://github.com/googleapis/nodejs-spanner/issues/1703)) ([6282f64](https://github.com/googleapis/nodejs-spanner/commit/6282f64560510ae54be26d992d168091f7e943bc))
+
+## [6.1.3](https://github.com/googleapis/nodejs-spanner/compare/v6.1.2...v6.1.3) (2022-07-07)
+
+
+### Bug Fixes
+
+* **deps:** update dependency @google-cloud/common to v4 ([#1663](https://github.com/googleapis/nodejs-spanner/issues/1663)) ([487c58c](https://github.com/googleapis/nodejs-spanner/commit/487c58ce2a2dbf21cdc1b43ea53d68ea6edbfd81))
+
+## [6.1.2](https://github.com/googleapis/nodejs-spanner/compare/v6.1.1...v6.1.2) (2022-07-07)
+
+
+### Bug Fixes
+
+* **deps:** update dependency @google-cloud/kms to v3 ([#1664](https://github.com/googleapis/nodejs-spanner/issues/1664)) ([42f41e9](https://github.com/googleapis/nodejs-spanner/commit/42f41e99f3cba9c3bdb981f70d6423c48adbc0d6))
+
+## [6.1.1](https://github.com/googleapis/nodejs-spanner/compare/v6.1.0...v6.1.1) (2022-07-06)
+
+
+### Bug Fixes
+
+* call Promise.race without a long pending promise to prevent memory leak ([#1657](https://github.com/googleapis/nodejs-spanner/issues/1657)) ([768acb6](https://github.com/googleapis/nodejs-spanner/commit/768acb6279914dfe84e372afc1d83dd76ca3dd4d))
+* **deps:** update dependency yargs to v17 ([#1537](https://github.com/googleapis/nodejs-spanner/issues/1537)) ([1039f68](https://github.com/googleapis/nodejs-spanner/commit/1039f68c7b459c2abeef4388fd8541576d374b66))
+
+## [6.1.0](https://github.com/googleapis/nodejs-spanner/compare/v6.0.0...v6.1.0) (2022-07-04)
+
+
+### Features
+
+* add Session creator role ([91ef6d3](https://github.com/googleapis/nodejs-spanner/commit/91ef6d373a1ed2c7e191de4003571270bfc4e895))
+* Adding two new fields for Instance create_time and update_time ([#1641](https://github.com/googleapis/nodejs-spanner/issues/1641)) ([91ef6d3](https://github.com/googleapis/nodejs-spanner/commit/91ef6d373a1ed2c7e191de4003571270bfc4e895))
+
+
+### Bug Fixes
+
+* **deps:** update dependency @google-cloud/promisify to v3 ([#1629](https://github.com/googleapis/nodejs-spanner/issues/1629)) ([1467956](https://github.com/googleapis/nodejs-spanner/commit/1467956314c77f66034fa3db166ba68d7c2aba2d))
+* Improve spanner.date handling of years before 1000AD ([#1654](https://github.com/googleapis/nodejs-spanner/issues/1654)) ([fd89a29](https://github.com/googleapis/nodejs-spanner/commit/fd89a294dcab017dbbe7000bce613b0d4ed60f96))
+
+## [6.0.0](https://github.com/googleapis/nodejs-spanner/compare/v5.18.0...v6.0.0) (2022-06-07)
+
+
+### ⚠ BREAKING CHANGES
+
+* update library to use Node 12 (#1637)
+
+### Features
+
+* Adding IT for date and commit timestamp ([#1621](https://github.com/googleapis/nodejs-spanner/issues/1621)) ([1367aa7](https://github.com/googleapis/nodejs-spanner/commit/1367aa7dc9818be5610dfc5ae67d09652e7009e5))
+* AuditConfig for IAM v1 ([#1599](https://github.com/googleapis/nodejs-spanner/issues/1599)) ([c358d66](https://github.com/googleapis/nodejs-spanner/commit/c358d668ca2a25f99ab73a4b6c1ebbe09c34d4de))
+
+
+### Bug Fixes
+
+* **deps:** update dependency grpc-gcp to ^0.4.0 ([#1603](https://github.com/googleapis/nodejs-spanner/issues/1603)) ([f00b3c6](https://github.com/googleapis/nodejs-spanner/commit/f00b3c65e58c2f36e69a05fef0b3c9bc68a4ee55))
+* fixes for dynamic routing and streaming descriptors ([#1639](https://github.com/googleapis/nodejs-spanner/issues/1639)) ([977a543](https://github.com/googleapis/nodejs-spanner/commit/977a543d693ca2f2e8bb303be6df592aa4def1dd))
+* pin version for nodejs gax-node ([#1617](https://github.com/googleapis/nodejs-spanner/issues/1617)) ([fb0017f](https://github.com/googleapis/nodejs-spanner/commit/fb0017ffab3cfa41cd132df4511a47fb68439bf5))
+
+
+### Build System
+
+* update library to use Node 12 ([#1637](https://github.com/googleapis/nodejs-spanner/issues/1637)) ([994acf3](https://github.com/googleapis/nodejs-spanner/commit/994acf3edd7c261085f58722fa2f86f95b3a56f3))
+
+## [5.18.0](https://github.com/googleapis/nodejs-spanner/compare/v5.17.0...v5.18.0) (2022-04-03)
+
+
+### Features
+
+* add support for Cross region backup proto changes ([#1587](https://github.com/googleapis/nodejs-spanner/issues/1587)) ([9439ca4](https://github.com/googleapis/nodejs-spanner/commit/9439ca4cf260923a7cb90e0568864cb719ab8fc6))
+* integration testing for postgres dialect ([#1593](https://github.com/googleapis/nodejs-spanner/issues/1593)) ([ebe06a6](https://github.com/googleapis/nodejs-spanner/commit/ebe06a6cefbeacc37c40f2474b9d265b78c846e2))
+* Postgres Numeric and database support ([#1592](https://github.com/googleapis/nodejs-spanner/issues/1592)) ([7ca3975](https://github.com/googleapis/nodejs-spanner/commit/7ca3975c5e25e78983f77df9e921642c90874f90))
+* Spanner copy backup ([#1530](https://github.com/googleapis/nodejs-spanner/issues/1530)) ([cefb1b4](https://github.com/googleapis/nodejs-spanner/commit/cefb1b4c831997e5c52122c5e6c3fd9cd9cb2c76))
+
+
+### Bug Fixes
+
+* removing table_catalog from schema information ([#1595](https://github.com/googleapis/nodejs-spanner/issues/1595)) ([8bcbd95](https://github.com/googleapis/nodejs-spanner/commit/8bcbd95e7423f03aa16ffd500fca998f75c8d0cf))
+
+## [5.17.0](https://github.com/googleapis/nodejs-spanner/compare/v5.16.3...v5.17.0) (2022-03-09)
+
+
+### Features
+
+* Refactor create database options schema to accept array ([#1578](https://github.com/googleapis/nodejs-spanner/issues/1578)) ([b1c88ac](https://github.com/googleapis/nodejs-spanner/commit/b1c88accee3770bb94e6a7d73e767eaa426a86a3))
+
+### [5.16.3](https://github.com/googleapis/nodejs-spanner/compare/v5.16.3...v5.16.3) (2022-01-31)
+
+
+### Miscellaneous Chores
+
+* Release-As: 5.16.3 ([#1556](https://github.com/googleapis/nodejs-spanner/issues/1556)) ([31f5fbc](https://github.com/googleapis/nodejs-spanner/commit/31f5fbc6d890fd7296497d6992a7d186ea786476))
+
+### [5.16.3](https://github.com/googleapis/nodejs-spanner/compare/v5.16.3...v5.16.3) (2022-01-21)
+
+
+### Miscellaneous Chores
+
+* Release-As: 5.16.3 ([#1556](https://github.com/googleapis/nodejs-spanner/issues/1556)) ([31f5fbc](https://github.com/googleapis/nodejs-spanner/commit/31f5fbc6d890fd7296497d6992a7d186ea786476))
+
+### [5.16.3](https://github.com/googleapis/nodejs-spanner/compare/v5.16.2...v5.16.3) (2022-01-19)
+
+
+### Bug Fixes
+
+* for merging when array/struct chunks contain null ([#1541](https://github.com/googleapis/nodejs-spanner/issues/1541)) ([72871fc](https://github.com/googleapis/nodejs-spanner/commit/72871fca5b67aec3af633484ff70a73be55372be))
+
+### [5.16.2](https://github.com/googleapis/nodejs-spanner/compare/v5.16.1...v5.16.2) (2022-01-17)
+
+
+### Bug Fixes
+
+* fix for new @types/node version ([#1542](https://github.com/googleapis/nodejs-spanner/issues/1542)) ([4b56c58](https://github.com/googleapis/nodejs-spanner/commit/4b56c584fa85ab8b22867157566ce7245daa8d8d))
+
+### [5.16.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.16.0...v5.16.1) (2021-12-29)
+
+
+### Bug Fixes
+
+* change in region ([#1523](https://www.github.com/googleapis/nodejs-spanner/issues/1523)) ([6caefc4](https://www.github.com/googleapis/nodejs-spanner/commit/6caefc4a0aafaa786a1d4b3fe3501d005d03bf6e))
+
+## [5.16.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.15.2...v5.16.0) (2021-12-09)
+
+
+### Features
+
+* add eslintignore for sameple generated code ([#1302](https://www.github.com/googleapis/nodejs-spanner/issues/1302)) ([#1520](https://www.github.com/googleapis/nodejs-spanner/issues/1520)) ([f835b72](https://www.github.com/googleapis/nodejs-spanner/commit/f835b721210d01b11d7f5751ee06f13518e7fe0f))
+
+
+### Bug Fixes
+
+* **build:** set default branch to main ([#1469](https://www.github.com/googleapis/nodejs-spanner/issues/1469)) ([152985a](https://www.github.com/googleapis/nodejs-spanner/commit/152985a1f783534e6b3e3ce332a1333dec67269d))
+* **cloud-rad:** move comments for TSDoc ([#1509](https://www.github.com/googleapis/nodejs-spanner/issues/1509)) ([1c49922](https://www.github.com/googleapis/nodejs-spanner/commit/1c49922c75bd56dbd0456318bee8a336eb088156))
+
+### [5.15.2](https://www.github.com/googleapis/nodejs-spanner/compare/v5.15.1...v5.15.2) (2021-09-10)
+
+
+### Bug Fixes
+
+* never try to create a negative number of sessions ([#1467](https://www.github.com/googleapis/nodejs-spanner/issues/1467)) ([13f5153](https://www.github.com/googleapis/nodejs-spanner/commit/13f51537ab13e0cd5a1fb9142f76796b1911809c))
+
+### [5.15.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.15.0...v5.15.1) (2021-09-08)
+
+
+### Bug Fixes
+
+* **deps:** google-gax v2.17.1 ([#1429](https://www.github.com/googleapis/nodejs-spanner/issues/1429)) ([3a1517c](https://www.github.com/googleapis/nodejs-spanner/commit/3a1517cff95bd00598935b30859c7991b4d4c4ca))
+
+## [5.15.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.14.0...v5.15.0) (2021-08-26)
+
+
+### Features
+
+* add support for JSON data type ([#1368](https://www.github.com/googleapis/nodejs-spanner/issues/1368)) ([b8d6fe5](https://www.github.com/googleapis/nodejs-spanner/commit/b8d6fe5b1e767576ba42d57d2e4e4597bca27883))
+
+## [5.14.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.13.1...v5.14.0) (2021-08-24)
+
+
+### Features
+
+* turns on self-signed JWT feature flag ([#1455](https://www.github.com/googleapis/nodejs-spanner/issues/1455)) ([2867a80](https://www.github.com/googleapis/nodejs-spanner/commit/2867a80319c07b4d40e88026409722f26db47631))
+
+### [5.13.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.13.0...v5.13.1) (2021-08-17)
+
+
+### Bug Fixes
+
+* **deps:** google-gax v2.24.1 ([#1452](https://www.github.com/googleapis/nodejs-spanner/issues/1452)) ([7379eb2](https://www.github.com/googleapis/nodejs-spanner/commit/7379eb260a8fe4a37b71ccf9ad9e2e17d9669c5f))
+
+## [5.13.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.12.0...v5.13.0) (2021-08-04)
+
+
+### Features
+
+* add GetInstanceConfig function ([#1438](https://www.github.com/googleapis/nodejs-spanner/issues/1438)) ([24b3524](https://www.github.com/googleapis/nodejs-spanner/commit/24b35242d3ce52e16a5fa04ff949ca44a6608396))
+
+
+### Bug Fixes
+
+* adding option to skip back up tests ([#1445](https://www.github.com/googleapis/nodejs-spanner/issues/1445)) ([e189e5a](https://www.github.com/googleapis/nodejs-spanner/commit/e189e5a03c6e40d3adc64233e72d9d9748e0bc0a))
+
+## [5.12.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.11.1...v5.12.0) (2021-07-08)
+
+
+### Features
+
+* add tagging support ([#1419](https://www.github.com/googleapis/nodejs-spanner/issues/1419)) ([4770dab](https://www.github.com/googleapis/nodejs-spanner/commit/4770dab607e50e81d79c8c4c8fafbb278cb08954))
+
+### [5.11.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.11.0...v5.11.1) (2021-07-07)
+
+
+### Bug Fixes
+
+* add close method to Spanner client ([#1416](https://www.github.com/googleapis/nodejs-spanner/issues/1416)) ([69cd0b4](https://www.github.com/googleapis/nodejs-spanner/commit/69cd0b474ab6c836724813fd8bea88ec2e1ac9f5)), closes [#1306](https://www.github.com/googleapis/nodejs-spanner/issues/1306)
+
+## [5.11.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.10.0...v5.11.0) (2021-07-01)
+
+
+### Features
+
+* **spanner:** add leader_options to InstanceConfig and default_leader to Database ([#1414](https://www.github.com/googleapis/nodejs-spanner/issues/1414)) ([e67adc2](https://www.github.com/googleapis/nodejs-spanner/commit/e67adc281d603d741af49d957eff05fd4184d38e))
+
+## [5.10.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.9.3...v5.10.0) (2021-06-30)
+
+
+### Features
+
+* create instances with processing units ([#1279](https://www.github.com/googleapis/nodejs-spanner/issues/1279)) ([05c2135](https://www.github.com/googleapis/nodejs-spanner/commit/05c213522a32627186ad9b474b416c1b9996df1f))
+
+
+### Bug Fixes
+
+* replace projectId placeholder in formatted names ([#1407](https://www.github.com/googleapis/nodejs-spanner/issues/1407)) ([4364d2b](https://www.github.com/googleapis/nodejs-spanner/commit/4364d2b25638384a6a1bea2e283b1219b4e5cdf3)), closes [#1375](https://www.github.com/googleapis/nodejs-spanner/issues/1375)
+
+### [5.9.3](https://www.github.com/googleapis/nodejs-spanner/compare/v5.9.2...v5.9.3) (2021-06-29)
+
+
+### Bug Fixes
+
+* **deps:** require google-gax v2.17.0 ([#1409](https://www.github.com/googleapis/nodejs-spanner/issues/1409)) ([080d82f](https://www.github.com/googleapis/nodejs-spanner/commit/080d82f455324d2010187904532032e7905e14ac))
+
+### [5.9.2](https://www.github.com/googleapis/nodejs-spanner/compare/v5.9.1...v5.9.2) (2021-06-25)
+
+
+### Bug Fixes
+
+* reset buffered chunked value before retry ([#1397](https://www.github.com/googleapis/nodejs-spanner/issues/1397)) ([da2ca7b](https://www.github.com/googleapis/nodejs-spanner/commit/da2ca7b15539119fada7869c206ad24460d8edfa)), closes [#1392](https://www.github.com/googleapis/nodejs-spanner/issues/1392)
+
+### [5.9.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.9.0...v5.9.1) (2021-06-24)
+
+
+### Bug Fixes
+
+* make request optional in all cases ([#1400](https://www.github.com/googleapis/nodejs-spanner/issues/1400)) ([0b78770](https://www.github.com/googleapis/nodejs-spanner/commit/0b78770bfef6f463abb0f336999f7dfd61b5b2fe))
+
+## [5.9.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.8.1...v5.9.0) (2021-06-14)
+
+
+### Features
+
+* **spanner:** add processing_units to Instance resource ([#1398](https://www.github.com/googleapis/nodejs-spanner/issues/1398)) ([878cd3f](https://www.github.com/googleapis/nodejs-spanner/commit/878cd3f1596526b6e4e2457babd3dc2c2add11ad))
+
+### [5.8.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.8.0...v5.8.1) (2021-06-10)
+
+
+### Bug Fixes
+
+* unknown errors should not be retried ([#1388](https://www.github.com/googleapis/nodejs-spanner/issues/1388)) ([1d6f4e2](https://www.github.com/googleapis/nodejs-spanner/commit/1d6f4e2923bc1ac20c0a73c342332ec2ae259812)), closes [#1387](https://www.github.com/googleapis/nodejs-spanner/issues/1387)
+
+## [5.8.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.7.0...v5.8.0) (2021-06-07)
+
+
+### Features
+
+* support setting `optimizerStatisticsPackage` ([#1225](https://www.github.com/googleapis/nodejs-spanner/issues/1225)) ([dadc6dc](https://www.github.com/googleapis/nodejs-spanner/commit/dadc6dcf5c01e1bb380555fa9ea2ba9182af049c))
+
+
+### Bug Fixes
+
+* ensure table funcs accept gaxOptions directly ([#1371](https://www.github.com/googleapis/nodejs-spanner/issues/1371)) ([2c57c16](https://www.github.com/googleapis/nodejs-spanner/commit/2c57c1631a93d545bab52e309a5acd7641a747f3))
+* lint issue ([#1372](https://www.github.com/googleapis/nodejs-spanner/issues/1372)) ([3be0b4b](https://www.github.com/googleapis/nodejs-spanner/commit/3be0b4b51c8a76c7682101851d94e0611a87bc24))
+
+## [5.7.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.6.1...v5.7.0) (2021-04-21)
+
+
+### Features
+
+* regenerate protos with new types ([#1335](https://www.github.com/googleapis/nodejs-spanner/issues/1335)) ([cc6980e](https://www.github.com/googleapis/nodejs-spanner/commit/cc6980e364ea641f55b4ff1a765b22333352419a))
+* support RPC priority ([#1282](https://www.github.com/googleapis/nodejs-spanner/issues/1282)) ([8c82694](https://www.github.com/googleapis/nodejs-spanner/commit/8c8269437291a96aaed97db6684f7c8907f1fe43))
+
+
+### Bug Fixes
+
+* prevent unhandled promise rejection while projectId or credential not found ([#1340](https://www.github.com/googleapis/nodejs-spanner/issues/1340)) ([47ce076](https://www.github.com/googleapis/nodejs-spanner/commit/47ce0765cce4bdf8513917c86ec1db9c53f97618))
+* prevent unhandled promise rejections while creating session ([#1332](https://www.github.com/googleapis/nodejs-spanner/issues/1332)) ([b62bf5e](https://www.github.com/googleapis/nodejs-spanner/commit/b62bf5e1a96c495f73512a97419ecf98915b457e))
+
+### [5.6.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.6.0...v5.6.1) (2021-03-30)
+
+
+### Bug Fixes
+
+* remove acquire timeout listener on return of session ([#1327](https://www.github.com/googleapis/nodejs-spanner/issues/1327)) ([72c7cce](https://www.github.com/googleapis/nodejs-spanner/commit/72c7cce0cc00631a0ce46cdb2bf66a0ee48d615b)), closes [#1324](https://www.github.com/googleapis/nodejs-spanner/issues/1324)
+
+## [5.6.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.5.0...v5.6.0) (2021-03-20)
+
+
+### Features
+
+* customer-managed encryption keys ([#1274](https://www.github.com/googleapis/nodejs-spanner/issues/1274)) ([51cabc7](https://www.github.com/googleapis/nodejs-spanner/commit/51cabc7a6d8c96a86acbbeea3a357c261248ddb4))
+
+
+### Bug Fixes
+
+* remove common protos ([#1320](https://www.github.com/googleapis/nodejs-spanner/issues/1320)) ([a73f9fc](https://www.github.com/googleapis/nodejs-spanner/commit/a73f9fc534019186e262a3e5ac6a78f156e7a56d))
+* run [spanner_batch_client] independently ([#1318](https://www.github.com/googleapis/nodejs-spanner/issues/1318)) ([3844ff8](https://www.github.com/googleapis/nodejs-spanner/commit/3844ff89cb53aeeefe7309ce82f31200a0de3ae2))
+
+## [5.5.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.4.0...v5.5.0) (2021-02-19)
+
+
+### Features
+
+* add option for returning Spanner commit stats ([#1297](https://www.github.com/googleapis/nodejs-spanner/issues/1297)) ([bc286e2](https://www.github.com/googleapis/nodejs-spanner/commit/bc286e24b1d6f83cd09bdaad2023e8347ab1d12e))
+* adds PITR fields to backup and database ([#1299](https://www.github.com/googleapis/nodejs-spanner/issues/1299)) ([d7556c8](https://www.github.com/googleapis/nodejs-spanner/commit/d7556c89b92e2a9ab65f1f928faf8c452bf24a7c))
+* adds style enumeration ([#1292](https://www.github.com/googleapis/nodejs-spanner/issues/1292)) ([dcf7013](https://www.github.com/googleapis/nodejs-spanner/commit/dcf7013907f8e232d0b99303a6d30be598944db9))
+* CommitStats in CommitResponse ([#1254](https://www.github.com/googleapis/nodejs-spanner/issues/1254)) ([e3730d2](https://www.github.com/googleapis/nodejs-spanner/commit/e3730d219fb395d1ce8416b1e7cdecb0c8ad995e))
+* Point In Time Recovery (PITR) ([#1250](https://www.github.com/googleapis/nodejs-spanner/issues/1250)) ([c53f677](https://www.github.com/googleapis/nodejs-spanner/commit/c53f677fe33ca5ed6fc65e8ee350f365d03a7642))
+* return ResultSetMetadata for query ([#1308](https://www.github.com/googleapis/nodejs-spanner/issues/1308)) ([6625ef2](https://www.github.com/googleapis/nodejs-spanner/commit/6625ef2168ade370596ffefe92fc75640cf9f6f1))
+
+
+### Bug Fixes
+
+* **deps:** update dependency google-auth-library to v7 ([#1305](https://www.github.com/googleapis/nodejs-spanner/issues/1305)) ([329c901](https://www.github.com/googleapis/nodejs-spanner/commit/329c901927885c6fd34f99c71abead15ff10f7d8))
+* **sample-test:** ensure instance is created before proceeding with tests ([#1291](https://www.github.com/googleapis/nodejs-spanner/issues/1291)) ([577357a](https://www.github.com/googleapis/nodejs-spanner/commit/577357ab8f8cdac70f5483312c7618c6e403fd26))
+* wrong gaxOptions argument in sample ([#1294](https://www.github.com/googleapis/nodejs-spanner/issues/1294)) ([8fec23a](https://www.github.com/googleapis/nodejs-spanner/commit/8fec23a28accbaa28cfb980bac406c50b1935e32))
+
+## [5.4.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.3.0...v5.4.0) (2020-12-02)
+
+
+### Features
+
+* support callbacks with database getRestoreInfo(), getState(), getOperations() ([#1230](https://www.github.com/googleapis/nodejs-spanner/issues/1230)) ([b56758b](https://www.github.com/googleapis/nodejs-spanner/commit/b56758b0e832c6471d14bd88b4580d21d5696fdd))
+
+
+### Bug Fixes
+
+* **browser:** check for fetch on window ([32ac608](https://www.github.com/googleapis/nodejs-spanner/commit/32ac6082383d5265f5022d97f23173e8786f4a82))
+* do not modify options object, use defaultScopes ([#1264](https://www.github.com/googleapis/nodejs-spanner/issues/1264)) ([6628c6a](https://www.github.com/googleapis/nodejs-spanner/commit/6628c6a81d427b8bb8bb1a42ae63f991b1cf73c9))
+* **deps:** update dependency big.js to v6 ([#1244](https://www.github.com/googleapis/nodejs-spanner/issues/1244)) ([259a51e](https://www.github.com/googleapis/nodejs-spanner/commit/259a51ee0726aa8f0b7717acd5253ecb77b16038))
+* do not create sessions after getDatabases call ([#1228](https://www.github.com/googleapis/nodejs-spanner/issues/1228)) ([53d5f37](https://www.github.com/googleapis/nodejs-spanner/commit/53d5f371d54c64dd095ac9ec721d05adf2c7d064))
+* **deps:** update dependency @google-cloud/precise-date to v2 ([#1240](https://www.github.com/googleapis/nodejs-spanner/issues/1240)) ([38dfec2](https://www.github.com/googleapis/nodejs-spanner/commit/38dfec22dd00f4d69750fc20e66c2395a9c6d3b3))
+* **deps:** update dependency yargs to v16 ([#1233](https://www.github.com/googleapis/nodejs-spanner/issues/1233)) ([75fd09a](https://www.github.com/googleapis/nodejs-spanner/commit/75fd09acb5a9a728a8b6403f44351e4b9b44b723))
+
+## [5.3.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.2.1...v5.3.0) (2020-09-04)
+
+
+### Features
+
+* accept gaxOptions in Database and Session #delete() ([#1206](https://www.github.com/googleapis/nodejs-spanner/issues/1206)) ([2444e8e](https://www.github.com/googleapis/nodejs-spanner/commit/2444e8e45f409e783fcbf059d487d50eb4e12d88))
+* add resource header prefix ([#1196](https://www.github.com/googleapis/nodejs-spanner/issues/1196)) ([99744b6](https://www.github.com/googleapis/nodejs-spanner/commit/99744b6e3c7e61e185949913279edae35a234dce))
+* return full nextQuery object in paginated calls ([#1199](https://www.github.com/googleapis/nodejs-spanner/issues/1199)) ([445015f](https://www.github.com/googleapis/nodejs-spanner/commit/445015f82243611fc8de3213957fda352fff4783))
+* set metadata during getBackups and getMetadata calls ([#1198](https://www.github.com/googleapis/nodejs-spanner/issues/1198)) ([aa500a5](https://www.github.com/googleapis/nodejs-spanner/commit/aa500a517a4f825f4440936158f72a0bcc63cd35))
+* spanner NUMERIC support ([#1163](https://www.github.com/googleapis/nodejs-spanner/issues/1163)) ([4724ba3](https://www.github.com/googleapis/nodejs-spanner/commit/4724ba3937de9481356084fc5e8254d8691583e5))
+* Spanner numeric type, add Node 8 tests ([#1189](https://www.github.com/googleapis/nodejs-spanner/issues/1189)) ([c2bc40e](https://www.github.com/googleapis/nodejs-spanner/commit/c2bc40e9b71b658d68c7377d5598cc1a0ef0f75d))
+
+
+### Bug Fixes
+
+* `Database not found` could be returned after create() ([#1220](https://www.github.com/googleapis/nodejs-spanner/issues/1220)) ([1fddbb9](https://www.github.com/googleapis/nodejs-spanner/commit/1fddbb9847068a607e72993c2c4c09b796b70089)), closes [#1219](https://www.github.com/googleapis/nodejs-spanner/issues/1219) [#1219](https://www.github.com/googleapis/nodejs-spanner/issues/1219)
+* batch transaction should use a session from the pool ([#1207](https://www.github.com/googleapis/nodejs-spanner/issues/1207)) ([0708baa](https://www.github.com/googleapis/nodejs-spanner/commit/0708baab276def6cad0f0f3c4b7589084380c2d8)), closes [#1200](https://www.github.com/googleapis/nodejs-spanner/issues/1200)
+* handle potential errors when creating stream ([#1208](https://www.github.com/googleapis/nodejs-spanner/issues/1208)) ([fcf35f5](https://www.github.com/googleapis/nodejs-spanner/commit/fcf35f5749b47b47425285eca5da47b987b0e7cf)), closes [#1078](https://www.github.com/googleapis/nodejs-spanner/issues/1078)
+* retry PDML on Aborted and Internal errors ([#1205](https://www.github.com/googleapis/nodejs-spanner/issues/1205)) ([2b97bac](https://www.github.com/googleapis/nodejs-spanner/commit/2b97bacf4188f2344f23971ec667d3e20f04d420)), closes [#1197](https://www.github.com/googleapis/nodejs-spanner/issues/1197)
+* update minimum gax version to 2.7.0 ([#1213](https://www.github.com/googleapis/nodejs-spanner/issues/1213)) ([224de8f](https://www.github.com/googleapis/nodejs-spanner/commit/224de8f0e69e2e951be6596d83079012736cdc20)), closes [#1209](https://www.github.com/googleapis/nodejs-spanner/issues/1209)
+* **spanner:** update UpdateBackup to be retryable ([#1194](https://www.github.com/googleapis/nodejs-spanner/issues/1194)) ([e53a247](https://www.github.com/googleapis/nodejs-spanner/commit/e53a2471beff22a61c8e17098ba1a9b6cff3caf0))
+* typeo in nodejs .gitattribute ([#1178](https://www.github.com/googleapis/nodejs-spanner/issues/1178)) ([439d5af](https://www.github.com/googleapis/nodejs-spanner/commit/439d5af2124cc02f24e097d8101f9d6843de9b20))
+
+### [5.2.1](https://www.github.com/googleapis/nodejs-spanner/compare/v5.2.0...v5.2.1) (2020-07-07)
+
+
+### Bug Fixes
+
+* remove error listener to prevent memory leak ([#1168](https://www.github.com/googleapis/nodejs-spanner/issues/1168)) ([523bd67](https://www.github.com/googleapis/nodejs-spanner/commit/523bd67ed6d5ecbfe9abce0f1b6ed4cce2c07b30))
+
+
+### Performance Improvements
+
+* increase default min sessions to 25 ([#1167](https://www.github.com/googleapis/nodejs-spanner/issues/1167)) ([e4aba27](https://www.github.com/googleapis/nodejs-spanner/commit/e4aba27d307e5932b223121af5bea37a7418bb20))
+
+## [5.2.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.1.0...v5.2.0) (2020-06-30)
+
+
+### Features
+
+* add code sample for creating an instance ([#1073](https://www.github.com/googleapis/nodejs-spanner/issues/1073)) ([ab6dc62](https://www.github.com/googleapis/nodejs-spanner/commit/ab6dc62061e6893ec170b07fc3ffebbd2a4179f8))
+* **secrets:** begin migration to secret manager from keystore ([#1092](https://www.github.com/googleapis/nodejs-spanner/issues/1092)) ([2031652](https://www.github.com/googleapis/nodejs-spanner/commit/2031652e062004d06a18605dc45fdd00bd52989f))
+
+
+### Bug Fixes
+
+* handle fallback option properly ([#1146](https://www.github.com/googleapis/nodejs-spanner/issues/1146)) ([70d3f2c](https://www.github.com/googleapis/nodejs-spanner/commit/70d3f2c1cd89f71d777b6bc06b48931b8e075417))
+* **samples-test:** race condition in deleteData sample ([#1156](https://www.github.com/googleapis/nodejs-spanner/issues/1156)) ([39d8f0c](https://www.github.com/googleapis/nodejs-spanner/commit/39d8f0cf28f0d7df76d3d0f0d967d1ce574df3ce))
+* race condition in "should transfer value from one record to another using DML statements within a transaction test" ([#1159](https://www.github.com/googleapis/nodejs-spanner/issues/1159)) ([0c46714](https://www.github.com/googleapis/nodejs-spanner/commit/0c4671460b265965482afb1894deacdc60900ee7))
+* set displayName in CreateInstance sample ([#1145](https://www.github.com/googleapis/nodejs-spanner/issues/1145)) ([f9e47d9](https://www.github.com/googleapis/nodejs-spanner/commit/f9e47d9e1b9067b7488ad73b67b656efc4bb93d7))
+* set instanceId to the given id ([#1094](https://www.github.com/googleapis/nodejs-spanner/issues/1094)) ([8973cbc](https://www.github.com/googleapis/nodejs-spanner/commit/8973cbcc9158b37650af2edb6015c575da1cc3ec)), closes [#1093](https://www.github.com/googleapis/nodejs-spanner/issues/1093)
+* unskip PDML tests when run against emulator ([#1150](https://www.github.com/googleapis/nodejs-spanner/issues/1150)) ([8465482](https://www.github.com/googleapis/nodejs-spanner/commit/8465482fad3b40b524f0d3e255983ddd75440e3e))
+* update DELETE samples to match docs ([#1072](https://www.github.com/googleapis/nodejs-spanner/issues/1072)) ([3336e04](https://www.github.com/googleapis/nodejs-spanner/commit/3336e04f6ff75539712d62ce2afabc70d8738150))
+* update node issue template ([#1157](https://www.github.com/googleapis/nodejs-spanner/issues/1157)) ([27d0699](https://www.github.com/googleapis/nodejs-spanner/commit/27d0699851c24b49a77ad7e2751804a7911d698c))
+
+
+### Performance Improvements
+
+* use write fraction when resizing pool ([#1031](https://www.github.com/googleapis/nodejs-spanner/issues/1031)) ([58f773b](https://www.github.com/googleapis/nodejs-spanner/commit/58f773b17459a96abcd3b5345aaaf497a2386840))
+
+## [5.1.0](https://www.github.com/googleapis/nodejs-spanner/compare/v5.0.0...v5.1.0) (2020-06-04)
+
+
+### Features
+
+* expose displayName in createInstance ([#798](https://www.github.com/googleapis/nodejs-spanner/issues/798)) ([39efda1](https://www.github.com/googleapis/nodejs-spanner/commit/39efda194d2d11a578f209e6c149b0ae2974ee27))
+* increase sessions in the pool in batches ([#963](https://www.github.com/googleapis/nodejs-spanner/issues/963)) ([91c53cb](https://www.github.com/googleapis/nodejs-spanner/commit/91c53cb6f6504f48ee3c974dbb8fb2821c226325))
+* support callbacks for exists(), getState(), getExpireTime() methods ([#1070](https://www.github.com/googleapis/nodejs-spanner/issues/1070)) ([7736080](https://www.github.com/googleapis/nodejs-spanner/commit/7736080f0e2a46c7ef8c44c278bff2bec2f28953))
+
+
+### Bug Fixes
+
+* always clean up stale instances if any ([#1030](https://www.github.com/googleapis/nodejs-spanner/issues/1030)) ([87c7edc](https://www.github.com/googleapis/nodejs-spanner/commit/87c7edcb2c3ba729b1e278bb191b695b0cd376cb))
+* pause request stream on backpressure ([#936](https://www.github.com/googleapis/nodejs-spanner/issues/936)) ([558692f](https://www.github.com/googleapis/nodejs-spanner/commit/558692f55cc551db2bd72464b130051a9b28378f)), closes [#934](https://www.github.com/googleapis/nodejs-spanner/issues/934)
+
+## [5.0.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.8.0...v5.0.0) (2020-05-14)
+
+
+### ⚠ BREAKING CHANGES
+
+* **types:** properly format listing methods with gaxOptions (#925)
+* **types:** types for createInstance (#805)
+* add typings for top level object (#781)
+* **deps:** update dependency @google-cloud/common to v3 (#875)
+* drop Node.js 8 support.
+
+### Features
+
+* add typings for top level object ([#781](https://www.github.com/googleapis/nodejs-spanner/issues/781)) ([c2b6f68](https://www.github.com/googleapis/nodejs-spanner/commit/c2b6f685c2f36866ddaa434c923be417de0f89ec))
+* check status of long running operation by its name ([#937](https://www.github.com/googleapis/nodejs-spanner/issues/937)) ([5035e11](https://www.github.com/googleapis/nodejs-spanner/commit/5035e11f55a28def0d524a8e6ea7671367cd345e))
+* run and runStream can return query stats ([#857](https://www.github.com/googleapis/nodejs-spanner/issues/857)) ([1656e4f](https://www.github.com/googleapis/nodejs-spanner/commit/1656e4f14f0dd24f530f36ecf1ccf34b51e726fb))
+* spanner backup and restore support ([#855](https://www.github.com/googleapis/nodejs-spanner/issues/855)) ([967903c](https://www.github.com/googleapis/nodejs-spanner/commit/967903c4152e283f5a09dbd1b8ab3c9bc66728d3))
+
+
+### Bug Fixes
+
+* **deps:** update dependency @google-cloud/common to v3 ([#875](https://www.github.com/googleapis/nodejs-spanner/issues/875)) ([f3da343](https://www.github.com/googleapis/nodejs-spanner/commit/f3da3430645ae277ae40410b6494ea8477937610))
+* **deps:** update dependency @google-cloud/paginator to v3 ([#871](https://www.github.com/googleapis/nodejs-spanner/issues/871)) ([d3b2f2c](https://www.github.com/googleapis/nodejs-spanner/commit/d3b2f2c48a21e41700877677bb2041b368773e36))
+* **deps:** update dependency @google-cloud/precise-date to v2 ([#873](https://www.github.com/googleapis/nodejs-spanner/issues/873)) ([8e8b29c](https://www.github.com/googleapis/nodejs-spanner/commit/8e8b29c389d68d0f872726655c9022d899a3ea3c))
+* **deps:** update dependency @google-cloud/projectify to v2 ([#870](https://www.github.com/googleapis/nodejs-spanner/issues/870)) ([e77460b](https://www.github.com/googleapis/nodejs-spanner/commit/e77460b6cc005049833f206f3fa74fc722ee3536))
+* **deps:** update dependency @google-cloud/promisify to v2 ([#868](https://www.github.com/googleapis/nodejs-spanner/issues/868)) ([afe4b15](https://www.github.com/googleapis/nodejs-spanner/commit/afe4b1518aadee91fb339512470e550295f89c4d))
+* **types:** fix type of ReadRequest ([#876](https://www.github.com/googleapis/nodejs-spanner/issues/876)) ([990fec2](https://www.github.com/googleapis/nodejs-spanner/commit/990fec20c482e11f48b8b7fbaacae8f395e93db9))
+* **types:** properly format listing methods with gaxOptions ([#925](https://www.github.com/googleapis/nodejs-spanner/issues/925)) ([23958ae](https://www.github.com/googleapis/nodejs-spanner/commit/23958ae48f49306cf38755831db091fef16998fb))
+* delete old instances then create new instance ([#955](https://www.github.com/googleapis/nodejs-spanner/issues/955)) ([96813f8](https://www.github.com/googleapis/nodejs-spanner/commit/96813f81913322f6c9a84aa9c7757029ce5f48eb))
+* remove eslint, update gax, fix generated protos, run the generator ([#897](https://www.github.com/googleapis/nodejs-spanner/issues/897)) ([7cfba21](https://www.github.com/googleapis/nodejs-spanner/commit/7cfba215b436e997919a9816bd076c62cce90bbf))
+* remove src/common-grpc/operation.ts ([#879](https://www.github.com/googleapis/nodejs-spanner/issues/879)) ([a30d2b4](https://www.github.com/googleapis/nodejs-spanner/commit/a30d2b47b2ccbbdf0d473281a4f76584c4850659)), closes [#878](https://www.github.com/googleapis/nodejs-spanner/issues/878)
+* remove typescript conversion leftovers ([#901](https://www.github.com/googleapis/nodejs-spanner/issues/901)) ([ccf1b61](https://www.github.com/googleapis/nodejs-spanner/commit/ccf1b61bcf060a72c35712c5d0e529fad9684724))
+* skip some tests when run against the emulator ([#933](https://www.github.com/googleapis/nodejs-spanner/issues/933)) ([2d91757](https://www.github.com/googleapis/nodejs-spanner/commit/2d917575dade110cbb3418d5d48c6fd0e77fae63))
+* update spanner package in sample ([#930](https://www.github.com/googleapis/nodejs-spanner/issues/930)) ([5624b7b](https://www.github.com/googleapis/nodejs-spanner/commit/5624b7bafb585adcbd0a7c9d53f728ff77afb1fa))
+* use DELETE FROM for consistency ([#923](https://www.github.com/googleapis/nodejs-spanner/issues/923)) ([0854c70](https://www.github.com/googleapis/nodejs-spanner/commit/0854c70d810ea05e5077d4c1a801040347415cfd))
+* **types:** types for createInstance ([#805](https://www.github.com/googleapis/nodejs-spanner/issues/805)) ([67b0f54](https://www.github.com/googleapis/nodejs-spanner/commit/67b0f54c65b51ff9cec313f67c4ea54dab7c8123))
+
+
+### Build System
+
+* drop node8 and convert to TypeScript ([#888](https://www.github.com/googleapis/nodejs-spanner/issues/888)) ([4116f81](https://www.github.com/googleapis/nodejs-spanner/commit/4116f81ec9715fde14b48e0daa6930bb8c502dbe))
+
+## [4.8.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.7.0...v4.8.0) (2020-03-12)
+
+
+### Features
+
+* add backups API ([#851](https://www.github.com/googleapis/nodejs-spanner/issues/851)) ([faf224d](https://www.github.com/googleapis/nodejs-spanner/commit/faf224dc4010337829eb0b6ecd7df274f1de5fff))
+* add support for QueryOptions ([#846](https://www.github.com/googleapis/nodejs-spanner/issues/846)) ([c1098c5](https://www.github.com/googleapis/nodejs-spanner/commit/c1098c5f4509918cacd3942b8f09354c88a85bb9))
+
+## [4.7.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.6.2...v4.7.0) (2020-02-27)
+
+
+### Features
+
+* export protos in src/index.ts ([0fa0f93](https://www.github.com/googleapis/nodejs-spanner/commit/0fa0f933ac9655278a4684bfa9e07cc912442fbf))
+
+### [4.6.2](https://www.github.com/googleapis/nodejs-spanner/compare/v4.6.1...v4.6.2) (2020-02-14)
+
+
+### Bug Fixes
+
+* Correctly parse metadata from BatchDML response + fix flaky system test ([#825](https://www.github.com/googleapis/nodejs-spanner/issues/825)) ([8b95da7](https://www.github.com/googleapis/nodejs-spanner/commit/8b95da7c68694d7a4ce5644e82d8485d01efb434))
+* retry 'Session not found' errors on getSnapshot ([#819](https://www.github.com/googleapis/nodejs-spanner/issues/819)) ([59bafbf](https://www.github.com/googleapis/nodejs-spanner/commit/59bafbfbcfe1fc3c45291a6fab6a343299123905))
+* retry 'Session not found' for r/w tx ([#824](https://www.github.com/googleapis/nodejs-spanner/issues/824)) ([1b393c4](https://www.github.com/googleapis/nodejs-spanner/commit/1b393c4c940d196232f8fc6ac99dbb5d1fd61e4d))
+
+### [4.6.1](https://www.github.com/googleapis/nodejs-spanner/compare/v4.6.0...v4.6.1) (2020-01-29)
+
+
+### Bug Fixes
+
+* enum, bytes, and Long types now accept strings ([#816](https://www.github.com/googleapis/nodejs-spanner/issues/816)) ([e63914d](https://www.github.com/googleapis/nodejs-spanner/commit/e63914d19813400daa2b08abfba43e1d87abfe7b))
+* fixed wrong return type of Database.run(..) ([#810](https://www.github.com/googleapis/nodejs-spanner/issues/810)) ([10c31d8](https://www.github.com/googleapis/nodejs-spanner/commit/10c31d80fdde2b91f2498ffcbc90a20a83c1a454)), closes [#809](https://www.github.com/googleapis/nodejs-spanner/issues/809)
+* retry Session not found for Database.run(..) ([#812](https://www.github.com/googleapis/nodejs-spanner/issues/812)) ([6a48fd6](https://www.github.com/googleapis/nodejs-spanner/commit/6a48fd61bc6f424865e5a265bc1d7bac81454a7d))
+* use PreciseDate instead of Date for min read timestamp ([#807](https://www.github.com/googleapis/nodejs-spanner/issues/807)) ([da8c2f8](https://www.github.com/googleapis/nodejs-spanner/commit/da8c2f85b55346d99a9eacc4249e6d6cd1a14556))
+
+## [4.6.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.5.2...v4.6.0) (2020-01-16)
+
+
+### Features
+
+* add fieldNames option in instance#getMetadata() ([#760](https://www.github.com/googleapis/nodejs-spanner/issues/760)) ([fa3154e](https://www.github.com/googleapis/nodejs-spanner/commit/fa3154ebe6754ecce1dd7b32442bda0eb27842f6))
+
+### [4.5.2](https://www.github.com/googleapis/nodejs-spanner/compare/v4.5.1...v4.5.2) (2020-01-15)
+
+
+### Bug Fixes
+
+* max backoff should be 32 seconds ([#792](https://www.github.com/googleapis/nodejs-spanner/issues/792)) ([c697240](https://www.github.com/googleapis/nodejs-spanner/commit/c697240c0f1c5d55bee63732b7346e7c95f25dcc))
+* retry executeStreamingSql when error code is retryable ([#795](https://www.github.com/googleapis/nodejs-spanner/issues/795)) ([1491858](https://www.github.com/googleapis/nodejs-spanner/commit/149185809fe32e05504d398849f7eadfe864fb6b)), closes [#620](https://www.github.com/googleapis/nodejs-spanner/issues/620)
+
+### [4.5.1](https://www.github.com/googleapis/nodejs-spanner/compare/v4.5.0...v4.5.1) (2020-01-08)
+
+
+### Bug Fixes
+
+* session pool should only create session if pending<=waiters ([#791](https://www.github.com/googleapis/nodejs-spanner/issues/791)) ([75345b1](https://www.github.com/googleapis/nodejs-spanner/commit/75345b18d37937b60a89c6b039c6b3a39b0ea6b7)), closes [#790](https://www.github.com/googleapis/nodejs-spanner/issues/790)
+
+## [4.5.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.4.1...v4.5.0) (2020-01-06)
+
+
+### Features
+
+* include potential leaked session stacktraces in error ([#759](https://www.github.com/googleapis/nodejs-spanner/issues/759)) ([1c0cf27](https://www.github.com/googleapis/nodejs-spanner/commit/1c0cf27b6c502fe7cc03f9b4c9191d746761b8b6))
+
+
+### Bug Fixes
+
+* clear stack of session while preparing new r/w tx ([#768](https://www.github.com/googleapis/nodejs-spanner/issues/768)) ([c852709](https://www.github.com/googleapis/nodejs-spanner/commit/c852709cf509d174ff140ad946fbbc20e5594aba))
+* delete env var after test if it was not set ([#774](https://www.github.com/googleapis/nodejs-spanner/issues/774)) ([7a1f40d](https://www.github.com/googleapis/nodejs-spanner/commit/7a1f40d5041e217363722d5a8b45c181f7a1510a))
+* end readWrite transaction in sample ([#766](https://www.github.com/googleapis/nodejs-spanner/issues/766)) ([f419e27](https://www.github.com/googleapis/nodejs-spanner/commit/f419e27e24eb8df78633d8245c364a709e58d007))
+* return different databases for different pool options ([#754](https://www.github.com/googleapis/nodejs-spanner/issues/754)) ([106c7a5](https://www.github.com/googleapis/nodejs-spanner/commit/106c7a513052631cf08f1db23ed099d2e3178635))
+* session pool should use push/pop and return sessions lifo ([#776](https://www.github.com/googleapis/nodejs-spanner/issues/776)) ([384bde1](https://www.github.com/googleapis/nodejs-spanner/commit/384bde1848f2fc72f52601f5b57af17dadebca69))
+* transaction runner should not timeout before first attempt ([#789](https://www.github.com/googleapis/nodejs-spanner/issues/789)) ([c75076e](https://www.github.com/googleapis/nodejs-spanner/commit/c75076e01b8480386289c237bc01d0ea2b42c85c)), closes [#786](https://www.github.com/googleapis/nodejs-spanner/issues/786)
+* year zero was not accepted for SpannerDate ([#783](https://www.github.com/googleapis/nodejs-spanner/issues/783)) ([0ceb862](https://www.github.com/googleapis/nodejs-spanner/commit/0ceb862beac57f3732e0097d808b62dbdfa5d3a9))
+
+### [4.4.1](https://www.github.com/googleapis/nodejs-spanner/compare/v4.4.0...v4.4.1) (2019-12-16)
+
+
+### Bug Fixes
+
+* get stacktrace before any async method call ([#756](https://www.github.com/googleapis/nodejs-spanner/issues/756)) ([3091a78](https://www.github.com/googleapis/nodejs-spanner/commit/3091a7849985330828703018b43f6cfabc0e381a))
+
+## [4.4.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.3.0...v4.4.0) (2019-12-10)
+
+
+### Features
+
+* add plural and singular resource descriptor ([#737](https://www.github.com/googleapis/nodejs-spanner/issues/737)) ([11658bf](https://www.github.com/googleapis/nodejs-spanner/commit/11658bfae8467e6788bb492895e7afb9202c59f4))
+* add replica support ([#726](https://www.github.com/googleapis/nodejs-spanner/issues/726)) ([dea3e59](https://www.github.com/googleapis/nodejs-spanner/commit/dea3e599759f374773ed5e4180187e79f518a7b6))
+
+
+### Bug Fixes
+
+* changes to default rpc timeouts ([da066fc](https://www.github.com/googleapis/nodejs-spanner/commit/da066fc916df4468c5f7d0538aadc5677c5bdb33))
+* **deps:** pin TypeScript below 3.7.0 ([7b1e07b](https://www.github.com/googleapis/nodejs-spanner/commit/7b1e07b33f31f93adf125a19db03fa6d5baf0b6b))
+* **deps:** update dependency yargs to v15 ([#736](https://www.github.com/googleapis/nodejs-spanner/issues/736)) ([e289890](https://www.github.com/googleapis/nodejs-spanner/commit/e2898907511a3426c6c42204c80765716a3317a6))
+* **docs:** snippets are now replaced in jsdoc comments ([#731](https://www.github.com/googleapis/nodejs-spanner/issues/731)) ([843ce6f](https://www.github.com/googleapis/nodejs-spanner/commit/843ce6f1cf14f14ab05c9983f6f5b7a8428fa6c7))
+* createInstance should return a value ([#747](https://www.github.com/googleapis/nodejs-spanner/issues/747)) ([16c013f](https://www.github.com/googleapis/nodejs-spanner/commit/16c013f04a02cbc07222f1d571ff0b016646c672))
+* include long import in proto typescript declaration file ([#732](https://www.github.com/googleapis/nodejs-spanner/issues/732)) ([6fe0757](https://www.github.com/googleapis/nodejs-spanner/commit/6fe0757d659e7c0835fc8c40b4617c688ce69551))
+* restore SessionLeakError name after super call ([#745](https://www.github.com/googleapis/nodejs-spanner/issues/745)) ([d04609b](https://www.github.com/googleapis/nodejs-spanner/commit/d04609b40023b411c08052f503baa54610062994))
+
+## [4.3.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.2.0...v4.3.0) (2019-11-05)
+
+
+### Features
+
+* **database:** batch create sessions ([#692](https://www.github.com/googleapis/nodejs-spanner/issues/692)) ([21f83b1](https://www.github.com/googleapis/nodejs-spanner/commit/21f83b1b13e12fb413138267dd4dc1bdaa24ccb9))
+
+
+### Bug Fixes
+
+* **deps:** bump google-gax to 1.7.5 ([#712](https://www.github.com/googleapis/nodejs-spanner/issues/712)) ([03384d4](https://www.github.com/googleapis/nodejs-spanner/commit/03384d4b93a66c758f1db75fa5efa1572f5c1eaf))
+* don't wrap SpannerDate so timezone does not affect results ([#711](https://www.github.com/googleapis/nodejs-spanner/issues/711)) ([86c0ae5](https://www.github.com/googleapis/nodejs-spanner/commit/86c0ae5fbdddccd915689bbfff3af8834ec64d12))
+
+## [4.2.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.1.0...v4.2.0) (2019-10-02)
+
+
+### Bug Fixes
+
+* adjust timeout values ([#700](https://www.github.com/googleapis/nodejs-spanner/issues/700)) ([4571f15](https://www.github.com/googleapis/nodejs-spanner/commit/4571f15))
+* use compatible version of google-gax ([d312a8f](https://www.github.com/googleapis/nodejs-spanner/commit/d312a8f))
+
+
+### Features
+
+* .d.ts for protos ([4d3d649](https://www.github.com/googleapis/nodejs-spanner/commit/4d3d649))
+
+## [4.1.0](https://www.github.com/googleapis/nodejs-spanner/compare/v4.0.2...v4.1.0) (2019-09-16)
+
+
+### Bug Fixes
+
+* **deps:** update dependency yargs to v14 ([#680](https://www.github.com/googleapis/nodejs-spanner/issues/680)) ([add2f05](https://www.github.com/googleapis/nodejs-spanner/commit/add2f05))
+* **types:** import request types from teeny-request ([#682](https://www.github.com/googleapis/nodejs-spanner/issues/682)) ([a1ecd80](https://www.github.com/googleapis/nodejs-spanner/commit/a1ecd80))
+* set proper version # for x-goog-api-client ([#681](https://www.github.com/googleapis/nodejs-spanner/issues/681)) ([f300fad](https://www.github.com/googleapis/nodejs-spanner/commit/f300fad))
+
+
+### Features
+
+* load protos from JSON, grpc-fallback support ([0b3fb90](https://www.github.com/googleapis/nodejs-spanner/commit/0b3fb90))
+* support batch create sessions ([#685](https://www.github.com/googleapis/nodejs-spanner/issues/685)) ([7bc58cf](https://www.github.com/googleapis/nodejs-spanner/commit/7bc58cf))
+* use JSON proto for transaction-runner ([#690](https://www.github.com/googleapis/nodejs-spanner/issues/690)) ([279fc97](https://www.github.com/googleapis/nodejs-spanner/commit/279fc97))
+
+### [4.0.2](https://www.github.com/googleapis/nodejs-spanner/compare/v4.0.1...v4.0.2) (2019-08-09)
+
+
+### Bug Fixes
+
+* allow calls with no request, add JSON proto ([4a478a7](https://www.github.com/googleapis/nodejs-spanner/commit/4a478a7))
+* **deps:** use the latest extend ([#678](https://www.github.com/googleapis/nodejs-spanner/issues/678)) ([a094fdd](https://www.github.com/googleapis/nodejs-spanner/commit/a094fdd))
+
+### [4.0.1](https://www.github.com/googleapis/nodejs-spanner/compare/v4.0.0...v4.0.1) (2019-07-29)
+
+
+### Bug Fixes
+
+* **deps:** update dependency @google-cloud/paginator to v2 ([#668](https://www.github.com/googleapis/nodejs-spanner/issues/668)) ([86d3638](https://www.github.com/googleapis/nodejs-spanner/commit/86d3638))
+* **deps:** update dependency google-auth-library to v5 ([#669](https://www.github.com/googleapis/nodejs-spanner/issues/669)) ([c6d165e](https://www.github.com/googleapis/nodejs-spanner/commit/c6d165e))
+* **docs:** move docs under overloads to be picked up by JSDoc ([#666](https://www.github.com/googleapis/nodejs-spanner/issues/666)) ([be10eb1](https://www.github.com/googleapis/nodejs-spanner/commit/be10eb1))
+
+## [4.0.0](https://www.github.com/googleapis/nodejs-spanner/compare/v3.1.0...v4.0.0) (2019-07-19)
+
+
+### ⚠ BREAKING CHANGES
+
+* **deps:** this will ship async/await in the generated code
+* upgrade engines field to >=8.10.0 (#587)
+
+### Bug Fixes
+
+* **deps:** update dependency @google-cloud/common-grpc to v1 ([#607](https://www.github.com/googleapis/nodejs-spanner/issues/607)) ([084dc8c](https://www.github.com/googleapis/nodejs-spanner/commit/084dc8c))
+* **deps:** update dependency @google-cloud/paginator to ^0.2.0 ([#560](https://www.github.com/googleapis/nodejs-spanner/issues/560)) ([8fe33a1](https://www.github.com/googleapis/nodejs-spanner/commit/8fe33a1))
+* **deps:** update dependency @google-cloud/paginator to v1 ([#593](https://www.github.com/googleapis/nodejs-spanner/issues/593)) ([bfb2255](https://www.github.com/googleapis/nodejs-spanner/commit/bfb2255))
+* **deps:** update dependency @google-cloud/precise-date to v1 ([#600](https://www.github.com/googleapis/nodejs-spanner/issues/600)) ([f52494f](https://www.github.com/googleapis/nodejs-spanner/commit/f52494f))
+* **deps:** update dependency @google-cloud/projectify to v1 ([#591](https://www.github.com/googleapis/nodejs-spanner/issues/591)) ([22713c1](https://www.github.com/googleapis/nodejs-spanner/commit/22713c1))
+* **deps:** update dependency @google-cloud/promisify to v1 ([#592](https://www.github.com/googleapis/nodejs-spanner/issues/592)) ([cb76922](https://www.github.com/googleapis/nodejs-spanner/commit/cb76922))
+* **deps:** update dependency arrify to v2 ([#577](https://www.github.com/googleapis/nodejs-spanner/issues/577)) ([6e0ddc8](https://www.github.com/googleapis/nodejs-spanner/commit/6e0ddc8))
+* **deps:** update dependency google-auth-library to v4 ([#599](https://www.github.com/googleapis/nodejs-spanner/issues/599)) ([21b9995](https://www.github.com/googleapis/nodejs-spanner/commit/21b9995))
+* **deps:** update dependency google-gax to ^0.26.0 ([#586](https://www.github.com/googleapis/nodejs-spanner/issues/586)) ([0f88be2](https://www.github.com/googleapis/nodejs-spanner/commit/0f88be2))
+* **deps:** update dependency merge-stream to v2 ([#624](https://www.github.com/googleapis/nodejs-spanner/issues/624)) ([3aa676d](https://www.github.com/googleapis/nodejs-spanner/commit/3aa676d))
+* **deps:** update dependency p-queue to v4 ([#558](https://www.github.com/googleapis/nodejs-spanner/issues/558)) ([7547e21](https://www.github.com/googleapis/nodejs-spanner/commit/7547e21))
+* **deps:** update dependency p-queue to v5 ([#578](https://www.github.com/googleapis/nodejs-spanner/issues/578)) ([7827fb4](https://www.github.com/googleapis/nodejs-spanner/commit/7827fb4))
+* **deps:** update dependency p-queue to v6.0.2 ([#643](https://www.github.com/googleapis/nodejs-spanner/issues/643)) ([ace1359](https://www.github.com/googleapis/nodejs-spanner/commit/ace1359))
+* **deps:** upgrade to google-gax 1.x ([#651](https://www.github.com/googleapis/nodejs-spanner/issues/651)) ([a32e838](https://www.github.com/googleapis/nodejs-spanner/commit/a32e838))
+* **docs:** add google.type namespace ([#605](https://www.github.com/googleapis/nodejs-spanner/issues/605)) ([5cc6dc1](https://www.github.com/googleapis/nodejs-spanner/commit/5cc6dc1))
+* **docs:** link to reference docs section on googleapis.dev ([#654](https://www.github.com/googleapis/nodejs-spanner/issues/654)) ([2379dc2](https://www.github.com/googleapis/nodejs-spanner/commit/2379dc2))
+* **docs:** move to new client docs URL ([#647](https://www.github.com/googleapis/nodejs-spanner/issues/647)) ([7dec1bd](https://www.github.com/googleapis/nodejs-spanner/commit/7dec1bd))
+* **transaction:** set/update seqno for all sql requests ([#661](https://www.github.com/googleapis/nodejs-spanner/issues/661)) ([102cae1](https://www.github.com/googleapis/nodejs-spanner/commit/102cae1))
+* DEADLINE_EXCEEDED is no longer retried ([#598](https://www.github.com/googleapis/nodejs-spanner/issues/598)) ([1cac4fc](https://www.github.com/googleapis/nodejs-spanner/commit/1cac4fc))
+* include 'x-goog-request-params' header in requests ([#573](https://www.github.com/googleapis/nodejs-spanner/issues/573)) ([e0cb9dc](https://www.github.com/googleapis/nodejs-spanner/commit/e0cb9dc))
+* treat deadline errors as idempotent ([#602](https://www.github.com/googleapis/nodejs-spanner/issues/602)) ([b3d494a](https://www.github.com/googleapis/nodejs-spanner/commit/b3d494a))
+* update retry config ([#650](https://www.github.com/googleapis/nodejs-spanner/issues/650)) ([f1e8104](https://www.github.com/googleapis/nodejs-spanner/commit/f1e8104))
+
+
+### Build System
+
+* upgrade engines field to >=8.10.0 ([#587](https://www.github.com/googleapis/nodejs-spanner/issues/587)) ([970d335](https://www.github.com/googleapis/nodejs-spanner/commit/970d335))
+
+
+### Features
+
+* add .repo-metadata.json and move to new README template ([#636](https://www.github.com/googleapis/nodejs-spanner/issues/636)) ([11007cf](https://www.github.com/googleapis/nodejs-spanner/commit/11007cf))
+* support apiEndpoint override ([#634](https://www.github.com/googleapis/nodejs-spanner/issues/634)) ([6a5eb36](https://www.github.com/googleapis/nodejs-spanner/commit/6a5eb36))
+* support apiEndpoint override in client constructor ([#639](https://www.github.com/googleapis/nodejs-spanner/issues/639)) ([f6ebb27](https://www.github.com/googleapis/nodejs-spanner/commit/f6ebb27))
+
+
+### Miscellaneous Chores
+
+* **deps:** update dependency gts to v1 ([#584](https://www.github.com/googleapis/nodejs-spanner/issues/584)) ([135ac6d](https://www.github.com/googleapis/nodejs-spanner/commit/135ac6d))
+
+## v3.1.0
+
+03-06-2019 20:13 PST
+
+### New Features
+- feat(transaction): batch dml ([#550](https://github.com/googleapis/nodejs-spanner/pull/550))
+
+### Dependencies
+- chore(deps): update dependency @types/sinon to v7.0.9 ([#553](https://github.com/googleapis/nodejs-spanner/pull/553))
+- chore(deps): fix broken dep types ([#549](https://github.com/googleapis/nodejs-spanner/pull/549))
+
+### Documentation
+- docs: Update grammar ([#544](https://github.com/googleapis/nodejs-spanner/pull/544))
+
+### Internal / Testing Changes
+- chore: update proto docs and code style
+- chore(deps): use bundled p-queue types ([#547](https://github.com/googleapis/nodejs-spanner/pull/547))
+- build: update release configuration ([#545](https://github.com/googleapis/nodejs-spanner/pull/545))
+- build: use node10 to run samples-test, system-test etc ([#551](https://github.com/googleapis/nodejs-spanner/pull/551))
+
+## v3.0.0
+
+02-25-2019 12:38 PST
+
+### Breaking Changes
+- breaking: refactor(transaction): split logic into new classes ([#506](https://github.com/googleapis/nodejs-spanner/pull/506))
+- breaking: feat(timestamp): create new date/timestamp classes ([#517](https://github.com/googleapis/nodejs-spanner/pull/517))
+- fix: run generator to bring in streaming retry configs ([#448](https://github.com/googleapis/nodejs-spanner/pull/448))
+
+#### Read-only Transactions (Snapshots) are no longer runnable via `Database#runTransaction` ([#506](https://github.com/googleapis/nodejs-spanner/pull/506))
+
+`Database#runTransaction` is useful if want to replay a Transaction in its entirety in case you run into an `ABORTED` error. This should never happen with Snapshots, so it felt like it was time to create a new method just for them. *This change also means that `runTransaction` will only ever return read-write transactions.*
+
+Before
+
+```js
+const bounds = {
+ readOnly: true,
+ strong: true,
+};
+
+database.runTransaction(bounds, (err, transaction) => {
+ // ...
+});
+```
+
+After
+
+```js
+const bounds = {
+ strong: true,
+};
+
+database.getSnapshot(bounds, (err, snapshot) => {
+ // ...
+});
+```
+
+#### Timestamp bounds now offer nanosecond precision ([#506](https://github.com/googleapis/nodejs-spanner/pull/506))
+
+This change allows you to specify a Snapshot read timestamp with more precision. Previously one could only specify in seconds, but now we support both milliseconds and nanoseconds.
+
+Before
+
+```js
+const bounds = {
+ exactStaleness: 5
+};
+
+const bounds = {
+ readTimestamp: Date.now()
+};
+```
+
+After
+
+```js
+const bounds = {
+ // millisecond precision for staleness
+ exactStaleness: 5000,
+
+ // or if you need nano/micro precision for staleness
+ exactStaleness: {seconds: 5, nanos: 321} // => 5000000321 nanoseconds
+};
+
+const bounds = {
+ readTimestamp: Spanner.timestamp('2019-01-12T00:30:35.381101032Z')
+};
+```
+
+#### Transaction#end changes. ([#506](https://github.com/googleapis/nodejs-spanner/pull/506))
+
+Transactions saw a sizeable refactor with this version, previously `end()` performed a number of asynchronous tasks when called, however this is no longer true. Because of this, there isn't much of a need to track when end is finished, so we've dropped the callback parameter.
+
+Additionally, `end()` will now be called automatically for failed calls to `Transaction#commit()` and `Transaction#rollback()`. If your code calls end after a failed commit/rollback, it will simply no-op.
+
+Before
+
+```js
+transaction.end(callback);
+```
+
+After
+
+```js
+transaction.end();
+callback();
+```
+
+#### Session#beginTransaction was removed ([#506](https://github.com/googleapis/nodejs-spanner/pull/506))
+
+Spanner supports 4 different types of Transactions:
+
+* ReadWrite
+* ReadOnly
+* PartitionedDml
+* Batch
+
+Using one method for all types became cumbersome when trying to manage the various options available to each, now each type has its own method.
+
+Before
+
+```js
+const transaction = await session.beginTransaction({readWrite: true});
+const snapshot = await session.beginTransaction({readOnly: true});
+```
+
+After
+
+```js
+const transaction = session.transaction();
+await transaction.begin();
+
+const snapshot = session.snapshot({strong: true});
+await snapshot.begin();
+```
+
+#### Timestamps now represented by [`@google-cloud/precise-time`](https://github.com/googleapis/nodejs-precise-date) ([#517](https://github.com/googleapis/nodejs-spanner/pull/517))
+
+While Spanner supports timestamps with nanosecond precision, JavaScript Dates do not. So we created the `PreciseDate` object which extends the native Date and adds both microsecond and nanosecond support.
+
+Before
+
+```js
+const timestamp = Spanner.timestamp('2019-01-12T00:30:35.381101032Z');
+// => {value: '2019-01-12T00:30:35.381Z'}
+```
+
+After
+
+```js
+// PreciseDate object
+const timestamp = Spanner.timestamp('2019-01-12T00:30:35.381101032Z');
+timestamp.toJSON(); // => '2019-01-12T00:30:35.381101032Z'
+timestamp.toFullTimeString(); // => '1547253035381101032' (nanoseconds)
+```
+
+#### SpannerDate now extends the native Date object. ([#517](https://github.com/googleapis/nodejs-spanner/pull/517))
+
+Since Timestamps saw an update, it made sense to give Spanner Date objects a similar update. The `Spanner.date()` method now returns a native Date object.
+
+Before
+
+```js
+const date = Spanner.date('3-22-2018');
+// => {value: '2018-3-22'}
+```
+
+After
+
+```js
+// Date object
+const date = Spanner.date('3-22-2018');
+date.toJSON(); // => '2018-3-22'
+```
+
+### New Features
+- refactor(types): enable noImplicitAny in session-pool.ts ([#508](https://github.com/googleapis/nodejs-spanner/pull/508))
+- refactor(table): improve typescript defs ([#495](https://github.com/googleapis/nodejs-spanner/pull/495))
+- refactor(ts): partial-result-stream types/refactor ([#488](https://github.com/googleapis/nodejs-spanner/pull/488))
+- refactor(codec): improve typescript defs ([#490](https://github.com/googleapis/nodejs-spanner/pull/490))
+- chore(SessionPool): improve typescript types ([#479](https://github.com/googleapis/nodejs-spanner/pull/479))
+- chore(typescript): add types for spanner gapic ([#487](https://github.com/googleapis/nodejs-spanner/pull/487))
+- refactor(ts): enable noImplicitAny on src/session.ts ([#457](https://github.com/googleapis/nodejs-spanner/pull/457))
+
+### Bug Fixes
+- fix: throw on invalid credentials ([#522](https://github.com/googleapis/nodejs-spanner/pull/522))
+- fix(transaction): re-use session in transaction runners ([#540](https://github.com/googleapis/nodejs-spanner/pull/540))
+
+### Dependencies
+- chore(deps): update dependency mocha to v6 ([#532](https://github.com/googleapis/nodejs-spanner/pull/532))
+- fix(deps): update dependency @google-cloud/promisify to ^0.4.0 ([#524](https://github.com/googleapis/nodejs-spanner/pull/524))
+- chore(deps): update dependency @types/p-retry to v3 ([#521](https://github.com/googleapis/nodejs-spanner/pull/521))
+- fix(deps): update dependency yargs to v13 ([#520](https://github.com/googleapis/nodejs-spanner/pull/520))
+- fix(deps): update dependency @google-cloud/common-grpc to ^0.10.0 ([#504](https://github.com/googleapis/nodejs-spanner/pull/504))
+- fix(deps): update dependency google-gax to ^0.25.0 ([#505](https://github.com/googleapis/nodejs-spanner/pull/505))
+- chore(deps): update dependency eslint-config-prettier to v4 ([#502](https://github.com/googleapis/nodejs-spanner/pull/502))
+- fix(deps): update dependency google-gax to ^0.24.0 ([#501](https://github.com/googleapis/nodejs-spanner/pull/501))
+- fix(deps): update dependency google-auth-library to v3 ([#498](https://github.com/googleapis/nodejs-spanner/pull/498))
+- fix(deps): update dependency google-gax to ^0.23.0 ([#496](https://github.com/googleapis/nodejs-spanner/pull/496))
+- chore(deps): update dependency concat-stream to v2 ([#489](https://github.com/googleapis/nodejs-spanner/pull/489))
+- refactor: removed async from dependency list ([#449](https://github.com/googleapis/nodejs-spanner/pull/449))
+- chore(deps): update dependency @types/sinon to v7 ([#480](https://github.com/googleapis/nodejs-spanner/pull/480))
+- fix(deps): update dependency p-retry to v3 ([#481](https://github.com/googleapis/nodejs-spanner/pull/481))
+- chore(deps): update dependency typescript to ~3.2.0 ([#459](https://github.com/googleapis/nodejs-spanner/pull/459))
+
+### Documentation
+- docs: fixed example for table.upsert() ([#533](https://github.com/googleapis/nodejs-spanner/pull/533))
+- docs: update links in contrib guide ([#525](https://github.com/googleapis/nodejs-spanner/pull/525))
+- docs: update contributing path in README ([#515](https://github.com/googleapis/nodejs-spanner/pull/515))
+- docs: add lint/fix example to contributing guide ([#512](https://github.com/googleapis/nodejs-spanner/pull/512))
+- docs: fix example comments ([#511](https://github.com/googleapis/nodejs-spanner/pull/511))
+- chore: update proto licenses
+- build: check broken links in generated docs ([#491](https://github.com/googleapis/nodejs-spanner/pull/491))
+- fix(docs): remove unused long running operations and IAM types
+- refactor: modernize sample tests ([#484](https://github.com/googleapis/nodejs-spanner/pull/484))
+- docs: fix links in docstrings ([#467](https://github.com/googleapis/nodejs-spanner/pull/467))
+- docs: fix typo ([#465](https://github.com/googleapis/nodejs-spanner/pull/465))
+- chore: update license file ([#464](https://github.com/googleapis/nodejs-spanner/pull/464))
+- docs: update readme badges ([#462](https://github.com/googleapis/nodejs-spanner/pull/462))
+- docs(samples): Add sample to delete using a mutation. ([#458](https://github.com/googleapis/nodejs-spanner/pull/458))
+
+### Internal / Testing Changes
+- chore: add spanner_grpc_config.json and enable grpc-gcp support for spanner ([#503](https://github.com/googleapis/nodejs-spanner/pull/503))
+- build: use linkinator for docs test ([#523](https://github.com/googleapis/nodejs-spanner/pull/523))
+- build: create docs test npm scripts ([#519](https://github.com/googleapis/nodejs-spanner/pull/519))
+- build: test using @grpc/grpc-js in CI ([#516](https://github.com/googleapis/nodejs-spanner/pull/516))
+- chore: move CONTRIBUTING.md to root ([#514](https://github.com/googleapis/nodejs-spanner/pull/514))
+- refactor: improve generated code style. ([#510](https://github.com/googleapis/nodejs-spanner/pull/510))
+- build: ignore googleapis.com in doc link check ([#500](https://github.com/googleapis/nodejs-spanner/pull/500))
+- fix: fix the sample tests ([#486](https://github.com/googleapis/nodejs-spanner/pull/486))
+- chore(build): inject yoshi automation key ([#478](https://github.com/googleapis/nodejs-spanner/pull/478))
+- chore: update nyc and eslint configs ([#477](https://github.com/googleapis/nodejs-spanner/pull/477))
+- chore: fix publish.sh permission +x ([#475](https://github.com/googleapis/nodejs-spanner/pull/475))
+- fix(build): fix Kokoro release script ([#474](https://github.com/googleapis/nodejs-spanner/pull/474))
+- build: add Kokoro configs for autorelease ([#473](https://github.com/googleapis/nodejs-spanner/pull/473))
+- chore: always nyc report before calling codecov ([#469](https://github.com/googleapis/nodejs-spanner/pull/469))
+- chore: nyc ignore build/test by default ([#468](https://github.com/googleapis/nodejs-spanner/pull/468))
+- fix(build): fix system key decryption ([#460](https://github.com/googleapis/nodejs-spanner/pull/460))
+- chore: temporarily disable gts ([#534](https://github.com/googleapis/nodejs-spanner/pull/534))
+
+## v2.2.1
+
+11-28-2018 10:43 PST
+
+
+### Implementation Changes
+- Update package.json to include the build directory ([#454](https://github.com/googleapis/nodejs-spanner/pull/454))
+
+## v2.2.0
+
+11-27-2018 09:13 PST
+
+
+### Implementation Changes
+- fix: transaction async error handling that not thrown the full error ([#447](https://github.com/googleapis/nodejs-spanner/pull/447))
+- fix(transaction): accept json options in run/runStream ([#446](https://github.com/googleapis/nodejs-spanner/pull/446))
+- refactor(transaction): error handling ([#360](https://github.com/googleapis/nodejs-spanner/pull/360))
+- refactor(ts): enable noImplicitThis in the tsconfig ([#411](https://github.com/googleapis/nodejs-spanner/pull/411))
+- refactor(ts): use import/export for local files ([#408](https://github.com/googleapis/nodejs-spanner/pull/408))
+- refactor(ts): add type packages for many things ([#406](https://github.com/googleapis/nodejs-spanner/pull/406))
+- refactor(ts): convert tests to typescript ([#404](https://github.com/googleapis/nodejs-spanner/pull/404))
+- refactor(typescript): rename src and system-test files to *.ts ([#402](https://github.com/googleapis/nodejs-spanner/pull/402))
+- refactor(typescript): perform initial TypeScript conversion ([#384](https://github.com/googleapis/nodejs-spanner/pull/384))
+- fix: Only run mutations inside of a transaction. ([#361](https://github.com/googleapis/nodejs-spanner/pull/361))
+
+### New Features
+- feat(session): add label support ([#373](https://github.com/googleapis/nodejs-spanner/pull/373))
+
+### Dependencies
+- chore(deps): update dependency @types/sinon to v5.0.7 ([#444](https://github.com/googleapis/nodejs-spanner/pull/444))
+- fix: Pin @types/sinon to last compatible version ([#443](https://github.com/googleapis/nodejs-spanner/pull/443))
+- chore(deps): update dependency @types/p-queue to v3 ([#440](https://github.com/googleapis/nodejs-spanner/pull/440))
+- fix(deps): update dependency google-gax to ^0.22.0 ([#435](https://github.com/googleapis/nodejs-spanner/pull/435))
+- chore(deps): update dependency gts to ^0.9.0 ([#434](https://github.com/googleapis/nodejs-spanner/pull/434))
+- chore(deps): update dependency @google-cloud/nodejs-repo-tools to v3 ([#429](https://github.com/googleapis/nodejs-spanner/pull/429))
+- chore(deps): update dependency @types/is to v0.0.21 ([#426](https://github.com/googleapis/nodejs-spanner/pull/426))
+- fix(deps): update dependency through2 to v3 ([#423](https://github.com/googleapis/nodejs-spanner/pull/423))
+- chore: remove unused google-proto-files dep ([#421](https://github.com/googleapis/nodejs-spanner/pull/421))
+- chore(deps): update dependency eslint-plugin-node to v8 ([#407](https://github.com/googleapis/nodejs-spanner/pull/407))
+- refactor: drop dependency on delay ([#383](https://github.com/googleapis/nodejs-spanner/pull/383))
+- fix(deps): update dependency google-proto-files to ^0.17.0 ([#369](https://github.com/googleapis/nodejs-spanner/pull/369))
+- chore(deps): update dependency sinon to v7 ([#371](https://github.com/googleapis/nodejs-spanner/pull/371))
+
+### Documentation
+- docs(samples): updated samples code to use async await ([#385](https://github.com/googleapis/nodejs-spanner/pull/385))
+- Add Cloud Spanner DML/PDML samples. ([#366](https://github.com/googleapis/nodejs-spanner/pull/366))
+
+### Internal / Testing Changes
+- chore: add synth.metadata
+- test: fix broken tests ([#441](https://github.com/googleapis/nodejs-spanner/pull/441))
+- refactor(samples): convert ava tests to mocha ([#400](https://github.com/googleapis/nodejs-spanner/pull/400))
+- chore: update eslintignore config ([#433](https://github.com/googleapis/nodejs-spanner/pull/433))
+- chore(build): fix lint rules and build for generated code ([#430](https://github.com/googleapis/nodejs-spanner/pull/430))
+- chore: drop contributors from multiple places ([#427](https://github.com/googleapis/nodejs-spanner/pull/427))
+- chore: use latest npm on Windows ([#425](https://github.com/googleapis/nodejs-spanner/pull/425))
+- fix: update source location for synth ([#422](https://github.com/googleapis/nodejs-spanner/pull/422))
+- fix: re-enable linting and formatting ([#420](https://github.com/googleapis/nodejs-spanner/pull/420))
+- chore: improve typescript config and types ([#417](https://github.com/googleapis/nodejs-spanner/pull/417))
+- chore: update CircleCI config ([#416](https://github.com/googleapis/nodejs-spanner/pull/416))
+- chore: run gts fix ([#413](https://github.com/googleapis/nodejs-spanner/pull/413))
+- chore: remove old issue template ([#397](https://github.com/googleapis/nodejs-spanner/pull/397))
+- chore: update issue templates ([#401](https://github.com/googleapis/nodejs-spanner/pull/401))
+- build: run tests on node11 ([#395](https://github.com/googleapis/nodejs-spanner/pull/395))
+- chores(build): do not collect sponge.xml from windows builds ([#389](https://github.com/googleapis/nodejs-spanner/pull/389))
+- chores(build): run codecov on continuous builds ([#386](https://github.com/googleapis/nodejs-spanner/pull/386))
+- chore: update new issue template ([#382](https://github.com/googleapis/nodejs-spanner/pull/382))
+- fix(tests): use unique label for tests ([#367](https://github.com/googleapis/nodejs-spanner/pull/367))
+- build: fix codecov uploading on Kokoro ([#372](https://github.com/googleapis/nodejs-spanner/pull/372))
+- build(kokoro): test with spanner key ([#364](https://github.com/googleapis/nodejs-spanner/pull/364))
+
+## v2.1.0
+
+### Implementation Changes
+- chore: use arrow functions ([#359](https://github.com/googleapis/nodejs-spanner/pull/359))
+- fix: change exists to return false on error code 5 ([#353](https://github.com/googleapis/nodejs-spanner/pull/353))
+- Switch to let/const ([#328](https://github.com/googleapis/nodejs-spanner/pull/328))
+- Minor: wrap the inner error on retried transactions and return when deadline exceeded ([#309](https://github.com/googleapis/nodejs-spanner/pull/309))
+- chore: convert index to es6 class ([#306](https://github.com/googleapis/nodejs-spanner/pull/306))
+- Fix p-retry is accepting function not object/promise ([#312](https://github.com/googleapis/nodejs-spanner/pull/312))
+
+### New Features
+- feat: dml/pdml support ([#348](https://github.com/googleapis/nodejs-spanner/pull/348))
+- feat(table): drop method and additional error handling to delete ([#358](https://github.com/googleapis/nodejs-spanner/pull/358))
+- feat(PartialResultStream): emit raw responses as event ([#357](https://github.com/googleapis/nodejs-spanner/pull/357))
+- feat(transaction): add backup backoff delay ([#350](https://github.com/googleapis/nodejs-spanner/pull/350))
+
+### Dependencies
+- chore(deps): update dependency eslint-plugin-prettier to v3 ([#351](https://github.com/googleapis/nodejs-spanner/pull/351))
+- fix(deps): update dependency @google-cloud/common-grpc to ^0.9.0 ([#339](https://github.com/googleapis/nodejs-spanner/pull/339))
+- fix(deps): update dependency google-gax to ^0.20.0 ([#327](https://github.com/googleapis/nodejs-spanner/pull/327))
+- fix(deps): update dependency delay to v4 ([#322](https://github.com/googleapis/nodejs-spanner/pull/322))
+- fix: upgrade to the latest common-grpc ([#320](https://github.com/googleapis/nodejs-spanner/pull/320))
+- fix(deps): update dependency google-auth-library to v2 ([#319](https://github.com/googleapis/nodejs-spanner/pull/319))
+- fix(deps): update dependency p-queue to v3 ([#317](https://github.com/googleapis/nodejs-spanner/pull/317))
+- chore(deps): update dependency nyc to v13 ([#314](https://github.com/googleapis/nodejs-spanner/pull/314))
+
+### Documentation
+- docs: add typedefs for commit timestamp ([#356](https://github.com/googleapis/nodejs-spanner/pull/356))
+- docs: various jsdoc fixes ([#352](https://github.com/googleapis/nodejs-spanner/pull/352))
+
+### Internal / Testing Changes
+- chore: update auto-generated config ([#362](https://github.com/googleapis/nodejs-spanner/pull/362))
+- chore: change queries to return expected values ([#355](https://github.com/googleapis/nodejs-spanner/pull/355))
+- Update CI config ([#354](https://github.com/googleapis/nodejs-spanner/pull/354))
+- chore: make sure workloadb benchmark runs properly ([#349](https://github.com/googleapis/nodejs-spanner/pull/349))
+- test: Add delay for system test. ([#16](https://github.com/googleapis/nodejs-spanner/pull/16))
+- Update QuickStart to use "new" syntax for creating Spanner client. ([#344](https://github.com/googleapis/nodejs-spanner/pull/344))
+- test: remove appveyor config ([#342](https://github.com/googleapis/nodejs-spanner/pull/342))
+- Update CI config ([#341](https://github.com/googleapis/nodejs-spanner/pull/341))
+- Fix the failing lint rules ([#338](https://github.com/googleapis/nodejs-spanner/pull/338))
+- Enable prefer-const in the eslint config ([#337](https://github.com/googleapis/nodejs-spanner/pull/337))
+- soften assertion in system tests ([#335](https://github.com/googleapis/nodejs-spanner/pull/335))
+- Update protos and comments ([#334](https://github.com/googleapis/nodejs-spanner/pull/334))
+- fix string comparison in system test ([#333](https://github.com/googleapis/nodejs-spanner/pull/333))
+- Enable no-var in eslint ([#331](https://github.com/googleapis/nodejs-spanner/pull/331))
+- Add synth templates ([#330](https://github.com/googleapis/nodejs-spanner/pull/330))
+- test: throw on deprecation ([#279](https://github.com/googleapis/nodejs-spanner/pull/279))
+- Retry npm install in CI ([#323](https://github.com/googleapis/nodejs-spanner/pull/323))
+- Re-generate library using /synth.py ([#316](https://github.com/googleapis/nodejs-spanner/pull/316))
+- Fix color highlighting in CHANGELOG.md ([#313](https://github.com/googleapis/nodejs-spanner/pull/313))
+- Update sample dependency @google-cloud/spanner to v2 ([#310](https://github.com/googleapis/nodejs-spanner/pull/310))
+- Re-generate library using /synth.py ([#308](https://github.com/googleapis/nodejs-spanner/pull/308))
+
+## v2.0.0
+
+### Breaking Changes
+- Drop support for Node.js v4.x.x and v9.x.x (#226)
+
+- Use es style imports (#302)
+ The import syntax for this library has changed to be [es module](https://nodejs.org/api/esm.html) compliant.
+
+ #### Old code
+ ```js
+ const spanner = require('@google-cloud/spanner')();
+ // or
+ const Spanner = require('@google-cloud/spanner');
+ const spanner = new Spanner();
+ ```
+
+ #### New code
+ ```js
+ const {Spanner} = require('@google-cloud/spanner');
+ const spanner = new Spanner();
+ ```
+
+### New Features
+- add runTransactionAsync method (#294)
+ ```js
+ const {Spanner} = require('@google-cloud/spanner');
+ const spanner = new Spanner();
+
+ const instance = spanner.instance('my-instance');
+ const database = instance.database('my-database');
+
+ await database.runTransactionAsync(async (transaction) => {
+ const [rows] = await transaction.run('SELECT * FROM MyTable');
+ const data = rows.map(row => row.thing);
+ await transaction.commit();
+ return data;
+ }).then(data => {
+ // ...
+ });
+ ```
+- feature(database): make session pool hot swappable (#243)
+
+### Implementation Changes
+- feat: use es style imports (#302)
+- fix: perform type check on grpc value (#300)
+- chore: use es classes in a few places (#297)
+- chore: do not use npm ci (#292)
+- chore: split the common module (#289)
+- test: fix strict equal assertions (#287)
+- chore: ignore package-lock.json (#286)
+- chore: use let and const (#283)
+- chore: update renovate config (#281)
+- Re-generate library using /synth.py (#282)
+- chore: use assert.deepStrictEqual instead of assert.deepEqual (#274)
+- chore: require node 8 for samples (#273)
+- test: use strictEqual in tests (#267)
+- use node_library not not internal generate method (#247)
+- Configure Renovate (#239)
+- fix: drop support for node.js 4.x and 9.x (#226)
+
+### Dependencies
+- fix(deps): update dependency google-gax to ^0.19.0 (#298)
+- chore(deps): update dependency eslint-config-prettier to v3 (#295)
+- fix(deps): update dependency google-gax to ^0.18.0 (#278)
+- chore(deps): update dependency eslint-plugin-node to v7 (#266)
+- refactor: update auth library, common-grpc (#256)
+- fix(deps): update dependency yargs to v12 (#254)
+- chore(deps): update dependency yargs to v12 (#252)
+- chore(deps): update dependency sinon to v6.0.1 (#250)
+- chore(package): update eslint to version 5.0.0 (#240)
+- chore: update sample lockfiles (#246)
+- Update to support google-gax v0.17 (#244)
+- fix(package): update @google-cloud/common-grpc to version 0.7.1 (#235)
+- refactor: drop dependency on safe-buffer (#232)
+- refactor: remove dependency generic-pool (#231)
+- refactor: drop dependency on lodash.flatten (#233)
+- refactor: remove array-uniq as dependency (#227)
+- refactor: remove string-obj-format (#229)
+- refactor: remove methmeth as a dependency (#228)
+- chore: upgrade several dependencies (#221)
+
+### Internal / Testing Changes
+- chore: move mocha options to mocha.opts (#272)
+- refactor: drop repo-tool as an exec wrapper (#248)
+- fix: update linking for samples (#242)
+- Adding Spanner STRUCT param samples (#219)
diff --git a/handwritten/spanner/CODE_OF_CONDUCT.md b/handwritten/spanner/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000..2add2547a81
--- /dev/null
+++ b/handwritten/spanner/CODE_OF_CONDUCT.md
@@ -0,0 +1,94 @@
+
+# Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/handwritten/spanner/CONTRIBUTING.md b/handwritten/spanner/CONTRIBUTING.md
new file mode 100644
index 00000000000..7b36f0328a8
--- /dev/null
+++ b/handwritten/spanner/CONTRIBUTING.md
@@ -0,0 +1,76 @@
+# How to become a contributor and submit your own code
+
+**Table of contents**
+
+* [Contributor License Agreements](#contributor-license-agreements)
+* [Contributing a patch](#contributing-a-patch)
+* [Running the tests](#running-the-tests)
+* [Releasing the library](#releasing-the-library)
+
+## Contributor License Agreements
+
+We'd love to accept your sample apps and patches! Before we can take them, we
+have to jump a couple of legal hurdles.
+
+Please fill out either the individual or corporate Contributor License Agreement
+(CLA).
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual CLA](https://developers.google.com/open-source/cla/individual).
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA](https://developers.google.com/open-source/cla/corporate).
+
+Follow either of the two links above to access the appropriate CLA and
+instructions for how to sign and return it. Once we receive it, we'll be able to
+accept your pull requests.
+
+## Contributing A Patch
+
+1. Submit an issue describing your proposed change to the repo in question.
+1. The repo owner will respond to your issue promptly.
+1. If your proposed change is accepted, and you haven't already done so, sign a
+ Contributor License Agreement (see details above).
+1. Fork the desired repo, develop and test your code changes.
+1. Ensure that your code adheres to the existing style in the code to which
+ you are contributing.
+1. Ensure that your code has an appropriate set of tests which all pass.
+1. Title your pull request following [Conventional Commits](https://www.conventionalcommits.org/) styling.
+1. Submit a pull request.
+
+### Before you begin
+
+1. [Select or create a Cloud Platform project][projects].
+1. [Enable billing for your project][billing].
+1. [Enable the Cloud Spanner API][enable_api].
+1. [Set up authentication with a service account][auth] so you can access the
+ API from your local workstation.
+
+
+## Running the tests
+
+1. [Prepare your environment for Node.js setup][setup].
+
+1. Install dependencies:
+
+ npm install
+
+1. Run the tests:
+
+ # Run unit tests.
+ npm test
+
+ # Run sample integration tests.
+ npm run samples-test
+
+ # Run all system tests.
+ npm run system-test
+
+1. Lint (and maybe fix) any changes:
+
+ npm run fix
+
+[setup]: https://cloud.google.com/nodejs/docs/setup
+[projects]: https://console.cloud.google.com/project
+[billing]: https://support.google.com/cloud/answer/6293499#enable-billing
+[enable_api]: https://console.cloud.google.com/flows/enableapi?apiid=spanner.googleapis.com
+[auth]: https://cloud.google.com/docs/authentication/getting-started
\ No newline at end of file
diff --git a/handwritten/spanner/LICENSE b/handwritten/spanner/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/handwritten/spanner/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/handwritten/spanner/OBSERVABILITY.md b/handwritten/spanner/OBSERVABILITY.md
new file mode 100644
index 00000000000..ce82cab99bb
--- /dev/null
+++ b/handwritten/spanner/OBSERVABILITY.md
@@ -0,0 +1,132 @@
+## Observability with OpenTelemetry
+
+This Cloud Spanner client supports [OpenTelemetry Traces](https://opentelemetry.io/), which gives insight into the client internals and aids in debugging/troubleshooting production issues.
+
+By default, the functionality is disabled. You shall need to add OpenTelemetry dependencies, and must configure and
+enable OpenTelemetry with appropriate exporters at the startup of your application:
+
+**Table of contents:**
+
+* [Observability](#observability)
+ * [Tracing](#tracing)
+ * [OpenTelemetry Dependencies](#opentelemetry-dependencies)
+ * [OpenTelemetry Configuration](#opentelemetry-configuration)
+ * [SQL Statement span annotation](#sql-statement-span-annotation)
+ * [OpenTelemetry gRCP instrumentation](#opentelemetry-grpc-instrumentation)
+ * [Tracing Sample](#tracing-sample)
+
+### Tracing
+
+#### OpenTelemetry Dependencies
+
+Add the following dependencies in your `package.json` or install them directly.
+```javascript
+// Required packages for OpenTelemetry SDKs
+"@opentelemetry/sdk-trace-base": "^1.26.0",
+"@opentelemetry/sdk-trace-node": "^1.26.0",
+
+// Package to use Google Cloud Trace exporter
+"@google-cloud/opentelemetry-cloud-trace-exporter": "^2.4.1",
+
+// Packages to enable gRPC instrumentation
+"@opentelemetry/instrumentation": "^0.53.0",
+"@opentelemetry/instrumentation-grpc": "^0.53.0",
+```
+
+#### OpenTelemetry Configuration
+
+```javascript
+const {
+ NodeTracerProvider,
+ TraceIdRatioBasedSampler,
+} = require('@opentelemetry/sdk-trace-node');
+const {
+ BatchSpanProcessor,
+} = require('@opentelemetry/sdk-trace-base');
+const {
+ TraceExporter,
+} = require('@google-cloud/opentelemetry-cloud-trace-exporter');
+const exporter = new TraceExporter();
+
+// Create the tracerProvider that the exporter shall be attached to.
+const provider = new NodeTracerProvider({
+ resource: resource,
+ spanProcessors: [new BatchSpanProcessor(exporter)]
+});
+
+// Create the Cloud Spanner Client.
+const {Spanner} = require('@google-cloud/spanner');
+const spanner = new Spanner({
+ projectId: projectId,
+ observabilityOptions: {
+ // Inject the TracerProvider via SpannerOptions or
+ // register it as a global by invoking `provider.register()`
+ tracerProvider: provider,
+ },
+});
+```
+
+#### SQL Statement span annotation
+
+To allow your SQL statements to be annotated in the appropriate spans, you need to opt-in, because
+SQL statements can contain sensitive personally-identifiable-information (PII).
+
+You can opt-in by either:
+
+* Setting the environment variable `SPANNER_ENABLE_EXTENDED_TRACING=true` before your application is started
+* In code, setting `enableExtendedTracing: true` in your SpannerOptions before creating the Cloud Spanner client
+
+```javascript
+const spanner = new Spanner({
+ projectId: projectId,
+ observabilityOptions: {
+ tracerProvider: provider,
+ enableExtendedTracing: true,
+ }
+}),
+```
+
+#### End to end tracing
+
+In addition to client-side tracing, you can opt in for end-to-end tracing. End-to-end tracing helps you understand and debug latency issues that are specific to Spanner. Refer [here](https://cloud.google.com/spanner/docs/tracing-overview) for more information.
+
+To configure end-to-end tracing.
+
+1. Opt in for end-to-end tracing. You can opt-in by either:
+* Setting the environment variable `SPANNER_ENABLE_END_TO_END_TRACING=true` before your application is started
+* In code, setting `enableEndToEndTracing: true` in your SpannerOptions before creating the Cloud Spanner client
+
+```javascript
+const spanner = new Spanner({
+ projectId: projectId,
+ observabilityOptions: {
+ tracerProvider: provider,
+ enableEndToEndTracing: true,
+ }
+}),
+```
+
+2. Set the trace context propagation in OpenTelemetry.
+```javascript
+const {propagation} = require('@opentelemetry/api');
+const {W3CTraceContextPropagator} = require('@opentelemetry/core');
+propagation.setGlobalPropagator(new W3CTraceContextPropagator());
+```
+
+#### OpenTelemetry gRPC instrumentation
+
+Optionally, you can enable OpenTelemetry gRPC instrumentation which produces traces of executed remote procedure calls (RPCs)
+in your programs by these imports and instantiation. You could pass in the traceProvider or register it globally
+by invoking `tracerProvider.register()`
+
+```javascript
+ const {registerInstrumentations} = require('@opentelemetry/instrumentation');
+ const {GrpcInstrumentation} = require('@opentelemetry/instrumentation-grpc');
+ registerInstrumentations({
+ tracerProvider: tracerProvider,
+ instrumentations: [new GrpcInstrumentation()],
+ });
+```
+
+#### Tracing Sample
+For more information please see this [sample code](./samples/observability-traces.js)
diff --git a/handwritten/spanner/README.md b/handwritten/spanner/README.md
new file mode 100644
index 00000000000..2f55e5c9aff
--- /dev/null
+++ b/handwritten/spanner/README.md
@@ -0,0 +1,329 @@
+[//]: # "This README.md file is auto-generated, all changes to this file will be lost."
+[//]: # "To regenerate it, use `python -m synthtool`."
+
+
+# [Cloud Spanner: Node.js Client](https://github.com/googleapis/nodejs-spanner)
+
+[](https://cloud.google.com/terms/launch-stages)
+[](https://www.npmjs.com/package/@google-cloud/spanner)
+
+
+
+
+[Cloud Spanner](https://cloud.google.com/spanner/docs/) is a fully managed, mission-critical, relational database service that
+offers transactional consistency at global scale, schemas, SQL (ANSI 2011 with extensions),
+and automatic, synchronous replication for high availability.
+
+
+A comprehensive list of changes in each version may be found in
+[the CHANGELOG](https://github.com/googleapis/nodejs-spanner/blob/main/CHANGELOG.md).
+
+* [Cloud Spanner Node.js Client API Reference][client-docs]
+* [Cloud Spanner Documentation][product-docs]
+* [github.com/googleapis/nodejs-spanner](https://github.com/googleapis/nodejs-spanner)
+
+Read more about the client libraries for Cloud APIs, including the older
+Google APIs Client Libraries, in [Client Libraries Explained][explained].
+
+[explained]: https://cloud.google.com/apis/docs/client-libraries-explained
+
+**Table of contents:**
+
+
+* [Quickstart](#quickstart)
+ * [Before you begin](#before-you-begin)
+ * [Installing the client library](#installing-the-client-library)
+ * [Using the client library](#using-the-client-library)
+* [Samples](#samples)
+* [Versioning](#versioning)
+* [Contributing](#contributing)
+* [License](#license)
+
+## Quickstart
+
+### Before you begin
+
+1. [Select or create a Cloud Platform project][projects].
+1. [Enable billing for your project][billing].
+1. [Enable the Cloud Spanner API][enable_api].
+1. [Set up authentication][auth] so you can access the
+ API from your local workstation.
+
+### Installing the client library
+
+```bash
+npm install @google-cloud/spanner
+```
+
+
+### Using the client library
+
+```javascript
+// Imports the Google Cloud client library
+const {Spanner} = require('@google-cloud/spanner');
+
+// Creates a client
+const spanner = new Spanner({projectId});
+
+// Gets a reference to a Cloud Spanner instance and database
+const instance = spanner.instance(instanceId);
+const database = instance.database(databaseId);
+
+// The query to execute
+const query = {
+ sql: 'SELECT 1',
+};
+
+// Execute a simple SQL statement
+const [rows] = await database.run(query);
+console.log(`Query: ${rows.length} found.`);
+rows.forEach(row => console.log(row));
+
+```
+## Metrics
+
+Cloud Spanner client supports [client-side metrics](https://cloud.google.com/spanner/docs/view-manage-client-side-metrics) that you can use along with server-side metrics to optimize performance and troubleshoot performance issues if they occur.
+
+Client-side metrics are measured from the time a request leaves your application to the time your application receives the response.
+In contrast, server-side metrics are measured from the time Spanner receives a request until the last byte of data is sent to the client.
+
+These metrics are enabled by default. You can opt out of using client-side metrics with the following code:
+
+```javascript
+const spanner = new Spanner({
+ disableBuiltInMetrics: true
+});
+```
+
+You can also disable these metrics by setting `SPANNER_DISABLE_BUILTIN_METRICS` to `true`.
+
+> Note: Client-side metrics needs `monitoring.timeSeries.create` IAM permission to export metrics data. Ask your administrator to grant your service account the [Monitoring Metric Writer](https://cloud.google.com/iam/docs/roles-permissions/monitoring#monitoring.metricWriter) (roles/monitoring.metricWriter) IAM role on the project.
+
+## Traces
+Refer to the Observability README to know more about tracing support in the Cloud Spanner client.
+
+## Multiplexed Sessions
+
+Spanner's Multiplexed Sessions is now default enabled session mode in node client. This feature helps reduce
+session management overhead and minimize session-related errors.
+
+For a detailed explanation on multiplexed sessions, please refer to the [official documentation](https://cloud.google.com/spanner/docs/sessions#multiplexed_sessions).
+
+## Regular Sessions
+
+To use regular sessions, disable the multiplexed sessions and set the following environment variables to `false`:
+
+* **For Read-Only Transactions:**
+- `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS`
+* **For Partitioned Operations:**
+- `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS`
+- `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS`
+* **For Read-Write Transactions:**
+- `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS`
+- `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW`
+
+For a detailed explanation on session modes and env configurations, please refer to the [official documentation](https://cloud.google.com/spanner/docs/sessions).
+
+
+## Samples
+
+Samples are in the [`samples/`](https://github.com/googleapis/nodejs-spanner/tree/main/samples) directory. Each sample's `README.md` has instructions for running its sample.
+
+| Sample | Source Code | Try it |
+| --------------------------- | --------------------------------- | ------ |
+| Add and drop new database role | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/add-and-drop-new-database-role.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/add-and-drop-new-database-role.js,samples/README.md) |
+| Backups-cancel | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-cancel.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-cancel.js,samples/README.md) |
+| Copies a source backup | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-copy-with-multiple-kms-keys.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-copy-with-multiple-kms-keys.js,samples/README.md) |
+| Copies a source backup | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-copy.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-copy.js,samples/README.md) |
+| Backups-create-with-encryption-key | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-create-with-encryption-key.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-create-with-encryption-key.js,samples/README.md) |
+| Backups-create-with-multiple-kms-keys | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-create-with-multiple-kms-keys.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-create-with-multiple-kms-keys.js,samples/README.md) |
+| Backups-create | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-create.js,samples/README.md) |
+| Backups-delete | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-delete.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-delete.js,samples/README.md) |
+| Backups-get-database-operations | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-get-database-operations.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-get-database-operations.js,samples/README.md) |
+| Backups-get-operations | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-get-operations.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-get-operations.js,samples/README.md) |
+| Backups-get | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-get.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-get.js,samples/README.md) |
+| Backups-restore-with-encryption-key | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-restore-with-encryption-key.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-restore-with-encryption-key.js,samples/README.md) |
+| Backups-restore-with-multiple-kms-keys | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-restore-with-multiple-kms-keys.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-restore-with-multiple-kms-keys.js,samples/README.md) |
+| Backups-restore | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-restore.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-restore.js,samples/README.md) |
+| Backups-update | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups-update.js,samples/README.md) |
+| Backups | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/backups.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/backups.js,samples/README.md) |
+| Batch Write | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/batch-write.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/batch-write.js,samples/README.md) |
+| Batch | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/batch.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/batch.js,samples/README.md) |
+| Creates a full backup schedule | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/create-full-backup-schedule.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/create-full-backup-schedule.js,samples/README.md) |
+| Creates an incremental backup schedule | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/create-incremental-backup-schedule.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/create-incremental-backup-schedule.js,samples/README.md) |
+| Create-instance-without-default-backup-schedules | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/create-instance-without-default-backup-schedules.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/create-instance-without-default-backup-schedules.js,samples/README.md) |
+| CRUD | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/crud.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/crud.js,samples/README.md) |
+| Adds split points to a database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-add-split-points.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-add-split-points.js,samples/README.md) |
+| Creates a new database with a specific default leader | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-create-with-default-leader.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-create-with-default-leader.js,samples/README.md) |
+| Database-create-with-encryption-key | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-create-with-encryption-key.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-create-with-encryption-key.js,samples/README.md) |
+| Database-create-with-multiple-kms-keys | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-create-with-multiple-kms-keys.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-create-with-multiple-kms-keys.js,samples/README.md) |
+| Database-create-with-version-retention-period | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-create-with-version-retention-period.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-create-with-version-retention-period.js,samples/README.md) |
+| Gets the schema definition of an existing database | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-get-ddl.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-get-ddl.js,samples/README.md) |
+| Gets the default leader option of an existing database | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-get-default-leader.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-get-default-leader.js,samples/README.md) |
+| Updates the default leader of an existing database | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-update-default-leader.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-update-default-leader.js,samples/README.md) |
+| Updates a Cloud Spanner Database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/database-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/database-update.js,samples/README.md) |
+| Datatypes | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/datatypes.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/datatypes.js,samples/README.md) |
+| Deletes a backup schedule | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/delete-backup-schedule.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/delete-backup-schedule.js,samples/README.md) |
+| Runs an execute sql request with directed read options | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/directed-reads.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/directed-reads.js,samples/README.md) |
+| Delete using DML returning. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/dml-returning-delete.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/dml-returning-delete.js,samples/README.md) |
+| Insert using DML returning. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/dml-returning-insert.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/dml-returning-insert.js,samples/README.md) |
+| Update using DML returning. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/dml-returning-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/dml-returning-update.js,samples/README.md) |
+| DML | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/dml.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/dml.js,samples/README.md) |
+| Enable fine grained access control | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/enable-fine-grained-access.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/enable-fine-grained-access.js,samples/README.md) |
+| Gets a backup schedule | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/get-backup-schedule.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/get-backup-schedule.js,samples/README.md) |
+| Get-commit-stats | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/get-commit-stats.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/get-commit-stats.js,samples/README.md) |
+| List database roles | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/get-database-roles.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/get-database-roles.js,samples/README.md) |
+| Gets the instance config metadata for the configuration nam6 | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/get-instance-config.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/get-instance-config.js,samples/README.md) |
+| Creates a new value-storing index | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/index-create-storing.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/index-create-storing.js,samples/README.md) |
+| Creates a new index | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/index-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/index-create.js,samples/README.md) |
+| Executes a read-only SQL query using an existing index. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/index-query-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/index-query-data.js,samples/README.md) |
+| Reads data using an existing storing index. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/index-read-data-with-storing.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/index-read-data-with-storing.js,samples/README.md) |
+| Read data using an existing index. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/index-read-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/index-read-data.js,samples/README.md) |
+| Indexing | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/indexing.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/indexing.js,samples/README.md) |
+| Creates a user-managed instance configuration. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-config-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-config-create.js,samples/README.md) |
+| Deletes a user-managed instance configuration. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-config-delete.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-config-delete.js,samples/README.md) |
+| Lists the instance configuration operations. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-config-get-operations.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-config-get-operations.js,samples/README.md) |
+| Updates a user-managed instance configuration. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-config-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-config-update.js,samples/README.md) |
+| Creates a new instance partition | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-partition-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-partition-create.js,samples/README.md) |
+| Updates an instance. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-update.js,samples/README.md) |
+| Creates a instance with asymmetric autoscaling config. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-with-asymmetric-autoscaling-config.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-with-asymmetric-autoscaling-config.js,samples/README.md) |
+| Creates a instance with autoscaling config. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-with-autoscaling-config.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-with-autoscaling-config.js,samples/README.md) |
+| Instance-with-processing-units | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance-with-processing-units.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance-with-processing-units.js,samples/README.md) |
+| Instance | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/instance.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/instance.js,samples/README.md) |
+| Json-add-column | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/json-add-column.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/json-add-column.js,samples/README.md) |
+| Json-query-parameter | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/json-query-parameter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/json-query-parameter.js,samples/README.md) |
+| Json-update-data | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/json-update-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/json-update-data.js,samples/README.md) |
+| Lists backup schedules of a database | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/list-backup-schedules.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/list-backup-schedules.js,samples/README.md) |
+| Lists all databases on the selected instance | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/list-databases.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/list-databases.js,samples/README.md) |
+| Lists all the available instance configs for the selected project. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/list-instance-configs.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/list-instance-configs.js,samples/README.md) |
+| Executes request with max commit delay | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/max-commit-delay.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/max-commit-delay.js,samples/README.md) |
+| Numeric-add-column | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/numeric-add-column.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/numeric-add-column.js,samples/README.md) |
+| Numeric-query-parameter | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/numeric-query-parameter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/numeric-query-parameter.js,samples/README.md) |
+| Numeric-update-data | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/numeric-update-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/numeric-update-data.js,samples/README.md) |
+| Observability (Tracing) with OpenTelemetry using OTLP | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/observability-traces-otlp.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/observability-traces-otlp.js,samples/README.md) |
+| Observability (Tracing) with OpenTelemetry | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/observability-traces.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/observability-traces.js,samples/README.md) |
+| Adds a column to an existing table in a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-add-column.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-add-column.js,samples/README.md) |
+| Showcase the rules for case-sensitivity and case folding for a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-case-sensitivity.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-case-sensitivity.js,samples/README.md) |
+| Creates a PostgreSQL Database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-database-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-database-create.js,samples/README.md) |
+| Use cast operator to cast from one data type to another in a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-datatypes-casting.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-datatypes-casting.js,samples/README.md) |
+| Execute a batch of DML statements on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-batch.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-batch.js,samples/README.md) |
+| Updates data in a table in a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-getting-started-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-getting-started-update.js,samples/README.md) |
+| Execute a Partitioned DML on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-partitioned.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-partitioned.js,samples/README.md) |
+| Delete using DML returning on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-returning-delete.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-returning-delete.js,samples/README.md) |
+| Insert using DML returning on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-returning-insert.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-returning-insert.js,samples/README.md) |
+| Update using DML returning on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-returning-update.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-returning-update.js,samples/README.md) |
+| Execute a DML statement with parameters on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-dml-with-parameter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-dml-with-parameter.js,samples/README.md) |
+| Calls a server side function on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-functions.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-functions.js,samples/README.md) |
+| Creates a new storing index in a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-index-create-storing.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-index-create-storing.js,samples/README.md) |
+| Created interleaved table hierarchy using PostgreSQL dialect. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-interleaving.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-interleaving.js,samples/README.md) |
+| Showcase how add a jsonb column in a PostgreSQL table. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-jsonb-add-column.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-jsonb-add-column.js,samples/README.md) |
+| Showcase how query data to a jsonb column in a PostgreSQL table. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-jsonb-query-parameter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-jsonb-query-parameter.js,samples/README.md) |
+| Showcase how update data to a jsonb column in a PostgreSQL table. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-jsonb-update-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-jsonb-update-data.js,samples/README.md) |
+| Showcase how to work with the PostgreSQL NUMERIC/DECIMAL data type on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-numeric-data-type.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-numeric-data-type.js,samples/README.md) |
+| Showcases how a Spanner PostgreSQL database orders null values in a query. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-ordering-nulls.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-ordering-nulls.js,samples/README.md) |
+| Execute a query with parameters on a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-query-parameter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-query-parameter.js,samples/README.md) |
+| Query the information schema metadata in a Spanner PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-schema-information.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-schema-information.js,samples/README.md) |
+| Alters a sequence in a PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-sequence-alter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-sequence-alter.js,samples/README.md) |
+| Creates sequence in PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-sequence-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-sequence-create.js,samples/README.md) |
+| Drops a sequence in PostgreSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/pg-sequence-drop.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/pg-sequence-drop.js,samples/README.md) |
+| Proto-query-data | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/proto-query-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/proto-query-data.js,samples/README.md) |
+| Creates a new database with a proto column and enum | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/proto-type-add-column.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/proto-type-add-column.js,samples/README.md) |
+| Proto-update-data-dml | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/proto-update-data-dml.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/proto-update-data-dml.js,samples/README.md) |
+| Proto-update-data | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/proto-update-data.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/proto-update-data.js,samples/README.md) |
+| Queryoptions | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/queryoptions.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/queryoptions.js,samples/README.md) |
+| Quickstart | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/quickstart.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/quickstart.js,samples/README.md) |
+| Read data with database role | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/read-data-with-database-role.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/read-data-with-database-role.js,samples/README.md) |
+| Performs a read-write transaction with read lock mode option | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/read-lock-mode.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/read-lock-mode.js,samples/README.md) |
+| Performs a read-write transaction with isolation level option | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/repeatable-reads.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/repeatable-reads.js,samples/README.md) |
+| Sets a request tag for a single query | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/request-tag.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/request-tag.js,samples/README.md) |
+| Run Batch update with RPC priority | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/rpc-priority-batch-dml.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/rpc-priority-batch-dml.js,samples/README.md) |
+| Run partitioned update with RPC priority | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/rpc-priority-partitioned-dml.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/rpc-priority-partitioned-dml.js,samples/README.md) |
+| Create partitions with RPC priority | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/rpc-priority-query-partitions.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/rpc-priority-query-partitions.js,samples/README.md) |
+| Read data with RPC Priority | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/rpc-priority-read.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/rpc-priority-read.js,samples/README.md) |
+| Query data with RPC Priority | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/rpc-priority-run.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/rpc-priority-run.js,samples/README.md) |
+| Run transaction with RPC priority | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/rpc-priority-transaction.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/rpc-priority-transaction.js,samples/README.md) |
+| Schema | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/schema.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/schema.js,samples/README.md) |
+| Alters a sequence in a GoogleSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/sequence-alter.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/sequence-alter.js,samples/README.md) |
+| Creates sequence in GoogleSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/sequence-create.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/sequence-create.js,samples/README.md) |
+| Drops a sequence in GoogleSQL database. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/sequence-drop.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/sequence-drop.js,samples/README.md) |
+| Executes a read/write transaction with statement timeout | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/statement-timeout.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/statement-timeout.js,samples/README.md) |
+| Struct | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/struct.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/struct.js,samples/README.md) |
+| Alters a table with foreign key delete cascade action | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/table-alter-with-foreign-key-delete-cascade.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/table-alter-with-foreign-key-delete-cascade.js,samples/README.md) |
+| Creates a table with foreign key delete cascade action | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/table-create-with-foreign-key-delete-cascade.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/table-create-with-foreign-key-delete-cascade.js,samples/README.md) |
+| Drops a foreign key constraint with delete cascade action | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/table-drop-foreign-key-constraint-delete-cascade.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/table-drop-foreign-key-constraint-delete-cascade.js,samples/README.md) |
+| Timestamp | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/timestamp.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/timestamp.js,samples/README.md) |
+| Executes a read/write transaction with transaction and request tags | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/transaction-tag.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/transaction-tag.js,samples/README.md) |
+| Executes a read/write transaction with transaction timeout | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/transaction-timeout.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/transaction-timeout.js,samples/README.md) |
+| Transaction | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/transaction.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/transaction.js,samples/README.md) |
+| Updates a backup schedule | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/update-backup-schedule.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/update-backup-schedule.js,samples/README.md) |
+| Updates an instance. | [source code](https://github.com/googleapis/nodejs-spanner/blob/main/samples/update-instance-default-backup-schedule-type.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-spanner&page=editor&open_in_editor=samples/update-instance-default-backup-schedule-type.js,samples/README.md) |
+
+
+
+The [Cloud Spanner Node.js Client API Reference][client-docs] documentation
+also contains samples.
+
+## Supported Node.js Versions
+
+Our client libraries follow the [Node.js release schedule](https://github.com/nodejs/release#release-schedule).
+Libraries are compatible with all current _active_ and _maintenance_ versions of
+Node.js.
+If you are using an end-of-life version of Node.js, we recommend that you update
+as soon as possible to an actively supported LTS version.
+
+Google's client libraries support legacy versions of Node.js runtimes on a
+best-efforts basis with the following warnings:
+
+* Legacy versions are not tested in continuous integration.
+* Some security patches and features cannot be backported.
+* Dependencies cannot be kept up-to-date.
+
+Client libraries targeting some end-of-life versions of Node.js are available, and
+can be installed through npm [dist-tags](https://docs.npmjs.com/cli/dist-tag).
+The dist-tags follow the naming convention `legacy-(version)`.
+For example, `npm install @google-cloud/spanner@legacy-8` installs client libraries
+for versions compatible with Node.js 8.
+
+## Versioning
+
+This library follows [Semantic Versioning](http://semver.org/).
+
+
+
+This library is considered to be **stable**. The code surface will not change in backwards-incompatible ways
+unless absolutely necessary (e.g. because of critical security issues) or with
+an extensive deprecation period. Issues and requests against **stable** libraries
+are addressed with the highest priority.
+
+
+
+
+
+
+More Information: [Google Cloud Platform Launch Stages][launch_stages]
+
+[launch_stages]: https://cloud.google.com/terms/launch-stages
+
+## Contributing
+
+Contributions welcome! See the [Contributing Guide](https://github.com/googleapis/nodejs-spanner/blob/main/CONTRIBUTING.md).
+
+Please note that this `README.md`, the `samples/README.md`,
+and a variety of configuration files in this repository (including `.nycrc` and `tsconfig.json`)
+are generated from a central template. To edit one of these files, make an edit
+to its templates in
+[directory](https://github.com/googleapis/synthtool).
+
+## License
+
+Apache Version 2.0
+
+See [LICENSE](https://github.com/googleapis/nodejs-spanner/blob/main/LICENSE)
+
+[client-docs]: https://cloud.google.com/nodejs/docs/reference/spanner/latest
+[product-docs]: https://cloud.google.com/spanner/docs/
+[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png
+[projects]: https://console.cloud.google.com/project
+[billing]: https://support.google.com/cloud/answer/6293499#enable-billing
+[enable_api]: https://console.cloud.google.com/flows/enableapi?apiid=spanner.googleapis.com
+[auth]: https://cloud.google.com/docs/authentication/external/set-up-adc-local
diff --git a/handwritten/spanner/benchmark/.eslintrc.yml b/handwritten/spanner/benchmark/.eslintrc.yml
new file mode 100644
index 00000000000..282535f55f6
--- /dev/null
+++ b/handwritten/spanner/benchmark/.eslintrc.yml
@@ -0,0 +1,3 @@
+---
+rules:
+ no-console: off
diff --git a/handwritten/spanner/benchmark/README.md b/handwritten/spanner/benchmark/README.md
new file mode 100644
index 00000000000..8524b54d512
--- /dev/null
+++ b/handwritten/spanner/benchmark/README.md
@@ -0,0 +1,70 @@
+# YCSB Benchmarks
+
+Implements [YCSB](https://github.com/brianfrankcooper/YCSB) clients for cloud
+Spanner in Node.js.
+
+Visit [YCSBClientCloudSpanner](https://github.com/haih-g/YCSBClientCloudSpanner)
+for more details.
+
+## Setup
+
+First you need to setup a Cloud Spanner instance and database. Then you can use
+[YCSB](https://github.com/brianfrankcooper/YCSB) to load the database. Then you
+can run the client benchmarks.
+
+### Set up the database
+
+```sh
+$ gcloud spanner instances create ycsb-instance --nodes 1 \
+ --config regional-us-central1 --description YCSB
+$ gcloud spanner databases create ycsb --instance ycsb-instance
+$ gcloud spanner databases ddl update ycsb --instance ycsb-instance \
+ --ddl="CREATE TABLE usertable (
+ id STRING(MAX),
+ field0 STRING(MAX),
+ field1 STRING(MAX),
+ field2 STRING(MAX),
+ field3 STRING(MAX),
+ field4 STRING(MAX),
+ field5 STRING(MAX),
+ field6 STRING(MAX),
+ field7 STRING(MAX),
+ field8 STRING(MAX),
+ field9 STRING(MAX),
+ ) PRIMARY KEY(id)"
+```
+
+### Use YCSB to load data
+
+You need to set up some environment variables first. You should use your own
+gcloud credentials and project.
+
+```sh
+$ export GOOGLE_APPLICATION_CREDENTIALS=/usr/local/google/home/haih/cloud-spanner-client-benchmark.json
+$ export GCLOUD_PROJECT=cloud-spanner-client-benchmark
+```
+
+Then download YCSB and load the database.
+
+```sh
+$ curl https://storage.googleapis.com/cloud-spanner-ycsb-custom-release/ycsb-cloudspanner-binding-0.13.0.tar.gz | tar -xzv
+$ ycsb-cloudspanner-binding-0.13.0/bin/ycsb load cloudspanner \
+ -P ycsb-cloudspanner-binding-0.13.0/workloads/workloada \
+ -p table=usertable -p cloudspanner.instance=ycsb-instance \
+ -p recordcount=5000 -p operationcount=100 -p cloudspanner.database=ycsb \
+ -threads 32
+```
+
+## Run benchmarks
+
+```sh
+$ npm run ycsb
+```
+
+Or if you want complete control over the parameters.
+
+```sh
+node benchmarks/ycsb.js run -P benchmarks/workloada -p table=usertable \
+ -p cloudspanner.instance=ycsb-542756a4 -p recordcount=5000 \
+ -p operationcount=100 -p cloudspanner.database=ycsb -p num_worker=1
+```
diff --git a/handwritten/spanner/benchmark/benchmarking-multiplexed-session.js b/handwritten/spanner/benchmark/benchmarking-multiplexed-session.js
new file mode 100644
index 00000000000..44b90161c0e
--- /dev/null
+++ b/handwritten/spanner/benchmark/benchmarking-multiplexed-session.js
@@ -0,0 +1,261 @@
+/*!
+ * Copyright 2025 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+const {randomUUID} = require('crypto');
+
+const thread_execution_times = [];
+const transaction_times = [];
+async function main(
+ instanceId,
+ databaseId,
+ projectId,
+ method,
+ numThreads,
+ numQueries,
+) {
+ const {Spanner} = require('../build/src');
+ const {performance} = require('perf_hooks');
+ const spanner = new Spanner({
+ projectId: projectId,
+ });
+
+ // Gets a reference to a Cloud Spanner instance and database
+ const instance = spanner.instance(instanceId);
+ const database = instance.database(databaseId);
+
+ // generate random read queries
+ function generateReadQuery() {
+ const id = Math.floor(Math.random() * 10000) + 1;
+ const query = {
+ sql: 'SELECT SingerId from Singers WHERE SingerId = @id',
+ params: {id: id},
+ };
+ return query;
+ }
+
+ // generate random update queries
+ function generateUpdateQuery() {
+ const id = Math.floor(Math.random() * 10000) + 1;
+ const name = randomUUID();
+ const query = {
+ sql: 'UPDATE Singers SET FirstName = @name WHERE SingerId = @id',
+ params: {
+ id: id,
+ name: name,
+ },
+ };
+ return query;
+ }
+ // warm up queries(~5-6 min)
+ for (let i = 0; i < 60000; i++) {
+ await database.run(generateReadQuery());
+ }
+
+ // case: read and DML
+ async function readAndDML() {
+ const startThreadTime = performance.now();
+
+ for (let i = 0; i < numQueries; i++) {
+ const startTime = performance.now();
+ await database.runTransactionAsync(async tx => {
+ const [rows] = await tx.run(generateReadQuery());
+ rows.forEach(row => {
+ const json = row.toJSON();
+ console.log(`SingerId: ${json.SingerId}`);
+ });
+ await tx.runUpdate(generateUpdateQuery());
+ await tx.commit();
+ console.log('transaction done.');
+ });
+ const operationTime = performance.now() - startTime;
+ // push the time taken by transaction to the array
+ transaction_times.push(operationTime);
+ }
+
+ // push the time taken by thread to the array
+ thread_execution_times.push(
+ (performance.now() - startThreadTime).toFixed(2),
+ );
+ }
+
+ // case: mutations only
+ async function mutationsOnly() {
+ const startThreadTime = performance.now();
+
+ for (let i = 0; i < numQueries; i++) {
+ const startTime = performance.now();
+ await database.runTransactionAsync(async tx => {
+ const id = Math.floor(Math.random() * 10000) + 1;
+ const name = randomUUID();
+ tx.upsert('Singers', [{SingerId: id, FirstName: name}]);
+ await tx.commit();
+ console.log('transaction done.');
+ });
+ const operationTime = performance.now() - startTime;
+ // push the time taken by transaction to the array
+ transaction_times.push(operationTime);
+ }
+
+ // push the time taken by thread to the array
+ thread_execution_times.push(
+ (performance.now() - startThreadTime).toFixed(2),
+ );
+ }
+
+ // case: read and mutations
+ async function readAndMutations() {
+ const startThreadTime = performance.now();
+
+ for (let i = 0; i < numQueries; i++) {
+ const startTime = performance.now();
+ await database.runTransactionAsync(async tx => {
+ const [rows] = await tx.run(generateReadQuery());
+ rows.forEach(row => {
+ const json = row.toJSON();
+ console.log(`SingerId: ${json.SingerId}`);
+ });
+ const id = Math.floor(Math.random() * 10000) + 1;
+ const name = randomUUID();
+ tx.upsert('Singers', [{SingerId: id, FirstName: name}]);
+ await tx.commit();
+ console.log('transaction done.');
+ });
+ const operationTime = performance.now() - startTime;
+ // push the time taken by transaction to the array
+ transaction_times.push(operationTime);
+ }
+
+ // push the time taken by thread to the array
+ thread_execution_times.push(
+ (performance.now() - startThreadTime).toFixed(2),
+ );
+ }
+
+ // case: single use transaction
+ async function singleUseTxn() {
+ const startThreadTime = performance.now();
+
+ for (let i = 0; i < numQueries; i++) {
+ const startTime = performance.now();
+ await database.run(generateReadQuery());
+ const operationTime = performance.now() - startTime;
+ // push the time taken by transaction to the array
+ transaction_times.push(operationTime);
+ }
+
+ // push the time taken by thread to the array
+ thread_execution_times.push(
+ (performance.now() - startThreadTime).toFixed(2),
+ );
+ }
+
+ // case: multi use transaction
+ async function multiUseTxn() {
+ const startThreadTime = performance.now();
+
+ for (let i = 0; i < numQueries; i++) {
+ const startTime = performance.now();
+ const [txn] = await database.getSnapshot();
+ // run 4 queries to make 4 RPC calls
+ await txn.run(generateReadQuery());
+ await txn.run(generateReadQuery());
+ await txn.run(generateReadQuery());
+ await txn.run(generateReadQuery());
+ txn.end();
+ const operationTime = (performance.now() - startTime).toFixed(2);
+ // push the time taken by transaction to the array
+ transaction_times.push(operationTime);
+ }
+
+ // push the time taken by thread to the array
+ thread_execution_times.push(
+ (performance.now() - startThreadTime).toFixed(2),
+ );
+ }
+
+ function calculatePercentiles(latencies) {
+ // Step 1: Sort the array
+ const sortedLatencies = latencies.slice().sort((a, b) => a - b);
+
+ // Step 2: Calculate average
+ const sum = sortedLatencies.reduce((acc, num) => acc + parseFloat(num), 0);
+ const average = (sum / sortedLatencies.length).toFixed(2);
+
+ // Step 3: Calculate p50 (50th percentile)
+ const p50Index = Math.floor(0.5 * sortedLatencies.length);
+ const p50Latency = parseFloat(sortedLatencies[p50Index]).toFixed(2);
+
+ // Step 4: Calculate p90 (90th percentile)
+ const p90Index = Math.floor(0.9 * sortedLatencies.length);
+ const p90Latency = parseFloat(sortedLatencies[p90Index]).toFixed(2);
+
+ // Step 5: Calculate p99 (99th percentile)
+ const p99Index = Math.floor(0.99 * sortedLatencies.length);
+ const p99Latency = parseFloat(sortedLatencies[p99Index]).toFixed(2);
+
+ return {
+ avg: average,
+ p50: p50Latency,
+ p90: p90Latency,
+ p99: p99Latency,
+ };
+ }
+
+ // run the threads concurrently
+ async function runConcurrently() {
+ const methodMap = {
+ readAndDML: readAndDML,
+ readAndMutations: readAndMutations,
+ mutationsOnly: mutationsOnly,
+ singleUseTxn: singleUseTxn,
+ multiUseTxn: multiUseTxn,
+ };
+ const funcToRun = methodMap[method];
+ const promises = [];
+ for (let i = 0; i < numThreads; i++) {
+ promises.push(funcToRun());
+ }
+ await Promise.all(promises);
+ // print the time taken by each thread
+ console.log('excution time taken by threads are: ');
+ thread_execution_times.forEach(executionTime => {
+ console.log(executionTime);
+ });
+ }
+
+ try {
+ // wait for all the threads to complete the execution
+ await runConcurrently();
+ // calculate percentiles
+ const percentiles = calculatePercentiles(transaction_times);
+ // print percentiles results
+ console.log(`average Latency: ${percentiles.avg}`);
+ console.log(`p50 Latency: ${percentiles.p50}`);
+ console.log(`p90 Latency: ${percentiles.p90}`);
+ console.log(`p99 Latency: ${percentiles.p99}`);
+ } catch (error) {
+ // log error if any
+ console.log('error: ', error);
+ }
+}
+
+process.on('unhandledRejection', err => {
+ console.error(err.message);
+ process.exitCode = 1;
+});
+main(...process.argv.slice(2));
diff --git a/handwritten/spanner/benchmark/bin/ycsb b/handwritten/spanner/benchmark/bin/ycsb
new file mode 100755
index 00000000000..def7796f6f1
--- /dev/null
+++ b/handwritten/spanner/benchmark/bin/ycsb
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+
+# A YCSB-like executable that can be integrated into PerfKitBenchmarker.
+#
+# It is intended to be run in a VM that is brought up by PerfKitBenchmarker
+# with node.js, npm, and google-cloud for nodejs installed. The environment can
+# be configured as follows:
+#
+# curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.8/install.sh \
+# | bash
+# export NVM_DIR="$HOME/.nvm"
+# [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
+# nvm install stable'
+# nvm alias default stable')
+# npm install --save express @google-cloud/storage @google-cloud/common-grpc \
+# binary-search-bounds dedent fs path stats-lite yargs checkpoint-stream
+# lodash.chunk merge-stream google-gax lodash.flatten delay p-queue \
+# stack-trace lodash.snakecase lodash.random time-span
+# sudo sudo apt-get -y install git
+# git clone https://github.com/googleapis/nodejs-spanner.git
+
+export NVM_DIR="$HOME/.nvm"
+[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+ln -s ${HOME}/nodejs-spanner/src ${DIR}/../../src
+ln -s ${HOME}/node_modules ${DIR}/../node_modules
+
+node ${DIR}/../ycsb.js run "${@:4}"
diff --git a/handwritten/spanner/benchmark/spanner.ts b/handwritten/spanner/benchmark/spanner.ts
new file mode 100644
index 00000000000..a62b474a6c1
--- /dev/null
+++ b/handwritten/spanner/benchmark/spanner.ts
@@ -0,0 +1,726 @@
+/*!
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {grpc} from 'google-gax';
+import {Database, Instance, SessionPool, Snapshot, Spanner} from '../src';
+import * as mock from '../test/mockserver/mockspanner';
+import {SimulatedExecutionTime} from '../test/mockserver/mockspanner';
+import * as mockInstanceAdmin from '../test/mockserver/mockinstanceadmin';
+import * as mockDatabaseAdmin from '../test/mockserver/mockdatabaseadmin';
+import {google} from '../protos/protos';
+import {SessionPoolOptions} from '../src/session-pool';
+import protobuf = google.spanner.v1;
+import {performance} from 'perf_hooks';
+
+let spannerMock;
+const server = new grpc.Server();
+const selectSql = 'SELECT 1';
+const updateSql = 'UPDATE FOO SET BAR=1 WHERE BAZ=2';
+let port: number;
+let spanner: Spanner;
+let instance: Instance;
+let dbCounter = 1;
+
+/**
+ * This file contains four standardized benchmark tests for Spanner using an
+ * in-memory mock Spanner server. The simulated network latency and execution
+ * times have been selected to be realistic, but at the same time also equal
+ * and repeatable to different platforms for easy comparison.
+ *
+ * The four benchmarks are:
+ * 1. burstRead: Execute a burst of read-only operations using single-use
+ * transactions. This is intended to benchmark the ability of the session
+ * pool to quickly create and hand out new sessions on demand.
+ * 2. burstWrite: Execute a burst of read/write transactions containing one
+ * update statement. This is intended to benchmark the ability of the session
+ * pool to quickly create and hand out new write-prepared sessions on demand.
+ * 3. burstReadAndWrite: Execute a burst of read and write operations in
+ * parallel. This is intended to benchmark the ability of the session pool
+ * to quickly create and hand out both read and write-prepared sessions in
+ * parallel on demand.
+ * 4. steadyIncrease: Execute a stream of read operations that all hold on to
+ * the session for a while. This will force the session pool to grow
+ * step-by-step up to the max number of sessions in the pool. This is
+ * intended to benchmark the ability of the session pool to efficiently
+ * increase the number of sessions in the pool, but not necessarily in
+ * parallel.
+ */
+require('yargs')
+ .demand(1)
+ .command('burstRead', 'Benchmarks a burst of read operations', {}, () =>
+ runBurstRead().then(() => console.log('Benchmark finished')),
+ )
+ .example('node $0 burstRead')
+ .command('burstWrite', 'Benchmarks a burst of write operations', {}, () =>
+ runBurstWrite().then(() => console.log('Benchmark finished')),
+ )
+ .example('node $0 burstWrite')
+ .command(
+ 'burstReadAndWrite',
+ 'Benchmarks a burst of read and write operations',
+ {},
+ () => runBurstReadAndWrite().then(() => console.log('Benchmark finished')),
+ )
+ .example('node $0 burstReadAndWrite')
+ .command(
+ 'multipleWriteBursts',
+ 'Benchmarks a burst of read and then write operations',
+ {},
+ () =>
+ runMultipleWriteBursts().then(() => console.log('Benchmark finished')),
+ )
+ .example('node $0 multipleWriteBursts')
+ .command(
+ 'oneReadTransactionPerSecond',
+ 'Benchmarks on avg one read tx per second',
+ {},
+ () =>
+ runOneReadTransactionPerSecond().then(() =>
+ console.log('Benchmark finished'),
+ ),
+ )
+ .example('node $0 oneReadTransactionPerSecond')
+ .command(
+ 'oneWriteTransactionPerSecond',
+ 'Benchmarks on avg one write tx per second',
+ {},
+ () =>
+ runOneWriteTransactionPerSecond().then(() =>
+ console.log('Benchmark finished'),
+ ),
+ )
+ .example('node $0 oneWriteTransactionPerSecond')
+ .command(
+ 'oneReadAndOneWriteTransactionPerSecond',
+ 'Benchmarks on avg one read and one write tx per second',
+ {},
+ () =>
+ runOneReadAndOneWriteTransactionPerSecond().then(() =>
+ console.log('Benchmark finished'),
+ ),
+ )
+ .example('node $0 oneReadAndOneWriteTransactionPerSecond')
+ .command(
+ 'steadyIncrease',
+ 'Benchmarks getting max sessions sequentially',
+ {},
+ () => runSteadyIncrease().then(() => console.log('Benchmark finished')),
+ )
+ .example('node $0 steadyIncrease')
+ .wrap(120)
+ .recommendCommands()
+ .strict()
+ .help().argv;
+
+async function runBurstRead() {
+ await setup();
+ await burstRead();
+ shutdown();
+}
+
+async function runBurstWrite() {
+ await setup();
+ await burstWrite();
+ shutdown();
+}
+
+async function runBurstReadAndWrite() {
+ await setup();
+ await burstReadAndWrite();
+ shutdown();
+}
+
+async function runMultipleWriteBursts() {
+ await setup();
+ await multipleWriteBursts();
+ shutdown();
+}
+
+async function runOneReadTransactionPerSecond() {
+ await setup();
+ await oneReadTransactionPerSecond();
+ shutdown();
+}
+
+async function runOneWriteTransactionPerSecond() {
+ await setup();
+ await oneWriteTransactionPerSecond();
+ shutdown();
+}
+
+async function runOneReadAndOneWriteTransactionPerSecond() {
+ await setup();
+ await oneReadAndOneWriteTransactionPerSecond();
+ shutdown();
+}
+
+async function runSteadyIncrease() {
+ await setup();
+ await steadyIncrease();
+ shutdown();
+}
+
+function newTestDatabase(options?: SessionPoolOptions): Database {
+ return instance.database(`database-${dbCounter++}`, options);
+}
+
+/**
+ * Sets up the mocked benchmark server with standardized execution times and
+ * network latency. Results for both a simple SELECT and a simple UPDATE
+ * statement are mocked on the server.
+ */
+async function setup() {
+ const NETWORK_LATENCY_TIME = 10;
+ const BATCH_CREATE_SESSIONS_MIN_TIME = 10;
+ const BATCH_CREATE_SESSIONS_RND_TIME = 10;
+ const BEGIN_TRANSACTION_MIN_TIME = 1;
+ const BEGIN_TRANSACTION_RND_TIME = 1;
+ const COMMIT_TRANSACTION_MIN_TIME = 5;
+ const COMMIT_TRANSACTION_RND_TIME = 5;
+ const ROLLBACK_TRANSACTION_MIN_TIME = 1;
+ const ROLLBACK_TRANSACTION_RND_TIME = 1;
+ const EXECUTE_STREAMING_SQL_MIN_TIME = 10;
+ const EXECUTE_STREAMING_SQL_RND_TIME = 10;
+ const EXECUTE_SQL_MIN_TIME = 10;
+ const EXECUTE_SQL_RND_TIME = 10;
+
+ spannerMock = mock.createMockSpanner(server);
+ mockInstanceAdmin.createMockInstanceAdmin(server);
+ mockDatabaseAdmin.createMockDatabaseAdmin(server);
+
+ port = await new Promise((resolve, reject) => {
+ server.bindAsync(
+ '0.0.0.0:0',
+ grpc.ServerCredentials.createInsecure(),
+ (err, assignedPort) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(assignedPort);
+ }
+ },
+ );
+ });
+ server.start();
+
+ spannerMock.setExecutionTime(
+ spannerMock.batchCreateSessions,
+ SimulatedExecutionTime.ofMinAndRandomExecTime(
+ NETWORK_LATENCY_TIME + BATCH_CREATE_SESSIONS_MIN_TIME,
+ BATCH_CREATE_SESSIONS_RND_TIME,
+ ),
+ );
+ spannerMock.setExecutionTime(
+ spannerMock.beginTransaction,
+ SimulatedExecutionTime.ofMinAndRandomExecTime(
+ NETWORK_LATENCY_TIME + BEGIN_TRANSACTION_MIN_TIME,
+ BEGIN_TRANSACTION_RND_TIME,
+ ),
+ );
+ spannerMock.setExecutionTime(
+ spannerMock.commit,
+ SimulatedExecutionTime.ofMinAndRandomExecTime(
+ NETWORK_LATENCY_TIME + COMMIT_TRANSACTION_MIN_TIME,
+ COMMIT_TRANSACTION_RND_TIME,
+ ),
+ );
+ spannerMock.setExecutionTime(
+ spannerMock.rollback,
+ SimulatedExecutionTime.ofMinAndRandomExecTime(
+ NETWORK_LATENCY_TIME + ROLLBACK_TRANSACTION_MIN_TIME,
+ ROLLBACK_TRANSACTION_RND_TIME,
+ ),
+ );
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ SimulatedExecutionTime.ofMinAndRandomExecTime(
+ NETWORK_LATENCY_TIME + EXECUTE_STREAMING_SQL_MIN_TIME,
+ EXECUTE_STREAMING_SQL_RND_TIME,
+ ),
+ );
+ spannerMock.setExecutionTime(
+ spannerMock.executeSql,
+ SimulatedExecutionTime.ofMinAndRandomExecTime(
+ NETWORK_LATENCY_TIME + EXECUTE_SQL_MIN_TIME,
+ EXECUTE_SQL_RND_TIME,
+ ),
+ );
+ spannerMock.putStatementResult(
+ selectSql,
+ mock.StatementResult.resultSet(createSelect1ResultSet()),
+ );
+ spannerMock.putStatementResult(
+ updateSql,
+ mock.StatementResult.updateCount(1),
+ );
+
+ spanner = new Spanner({
+ projectId: 'fake-project-id',
+ servicePath: 'localhost',
+ port,
+ sslCreds: grpc.credentials.createInsecure(),
+ });
+ // Gets a reference to a Cloud Spanner instance and database
+ instance = spanner.instance('instance');
+}
+
+/**
+ * Shutdown the benchmark server.
+ */
+function shutdown() {
+ server.tryShutdown(() => {});
+ console.log('Server closed');
+}
+
+/**
+ * Executes the burstRead benchmark.
+ */
+async function burstRead() {
+ console.log('Starting burstRead');
+ const HOLD_SESSION_TIME = 100;
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 10;
+ const NUM_BURST_READ = 3200;
+ // Value 'undefined' is used to warm up the compiler.
+ for (const incStep of [undefined, 1, 10, 20, 25, 30, 40, 50, 100]) {
+ spannerMock.resetRequests();
+ const database = newTestDatabase({
+ min: 100,
+ max: 400,
+ incStep: incStep,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ if (incStep) {
+ console.time(`burstRead incStep ${incStep}`);
+ }
+ const promises = queueReadOperations(
+ database,
+ NUM_BURST_READ,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ HOLD_SESSION_TIME,
+ );
+ await Promise.all(promises);
+ if (incStep) {
+ console.timeEnd(`burstRead incStep ${incStep}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ }
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+/**
+ * Executes the burstWrite benchmark.
+ */
+async function burstWrite() {
+ console.log('Starting burstWrite');
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 10;
+ const NUM_BURST_WRITE = 3200;
+ // Value 'undefined' is used to warm up the compiler.
+ for (const incStep of [undefined, 1, 10, 20, 25, 30, 40, 50, 100]) {
+ const database = newTestDatabase({
+ min: 100,
+ max: 400,
+ incStep: incStep,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ if (incStep) {
+ console.time(`burstWrite incStep ${incStep}`);
+ }
+ const promises = queueWriteOperations(
+ database,
+ NUM_BURST_WRITE,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ );
+ await Promise.all(promises);
+ if (incStep) {
+ console.timeEnd(`burstWrite incStep ${incStep}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ }
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+/**
+ * Executes the burstReadAndWrite benchmark.
+ */
+async function burstReadAndWrite() {
+ console.log('Starting burstReadAndWrite');
+ const HOLD_SESSION_TIME = 100;
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 10;
+ const NUM_BURST_READ = 1600;
+ const NUM_BURST_WRITE = 1600;
+ // Value 'undefined' is used to warm up the compiler.
+ for (const incStep of [undefined, 1, 10, 20, 25, 30, 40, 50, 100]) {
+ const database = newTestDatabase({
+ min: 100,
+ max: 400,
+ incStep: incStep,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ if (incStep) {
+ console.time(`burstReadAndWrite incStep ${incStep}`);
+ }
+ const readPromises = queueReadOperations(
+ database,
+ NUM_BURST_READ,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ HOLD_SESSION_TIME,
+ );
+ const writePromises = queueWriteOperations(
+ database,
+ NUM_BURST_WRITE,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ );
+ await Promise.all(readPromises.concat(writePromises));
+ if (incStep) {
+ console.timeEnd(`burstReadAndWrite incStep ${incStep}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ }
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+async function multipleWriteBursts() {
+ console.log('Starting multipleWriteBursts');
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 10;
+ const NUM_BURSTS = 4;
+ const NUM_BURST_WRITE = 3200;
+ const WAIT_BETWEEN_BURSTS = 500;
+ // Value 'undefined' is used to warm up the compiler.
+ for (const incStep of [undefined, 1, 10, 20, 25, 30, 40, 50, 100]) {
+ const database = newTestDatabase({
+ min: 100,
+ max: 400,
+ incStep: incStep,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ if (incStep) {
+ console.time(`multipleWriteBursts incStep ${incStep}`);
+ }
+ for (let i = 0; i < NUM_BURSTS; i++) {
+ const writePromises = queueWriteOperations(
+ database,
+ NUM_BURST_WRITE,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ );
+ await Promise.all(writePromises);
+ await new Promise(resolve => setTimeout(resolve, WAIT_BETWEEN_BURSTS));
+ }
+ if (incStep) {
+ console.timeEnd(`multipleWriteBursts incStep ${incStep}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ }
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+async function oneReadTransactionPerSecond() {
+ console.log('Starting oneReadTransactionPerSecond');
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 100000;
+ const NUM_TRANSACTIONS = RND_WAIT_TIME_BETWEEN_REQUESTS / 1000;
+ for (const minSessions of [0, 25]) {
+ const database = newTestDatabase({
+ min: minSessions,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ // Execute a batch of write requests to initialize the session pool with only
+ // write sessions. The dynamic scaling of the session pool should automatically
+ // change this into an appropriate number of read sessions as the test runs.
+ await queueWriteOperations(database, pool.options.incStep!, 0);
+ const readPromises = queueReadOperations(
+ database,
+ NUM_TRANSACTIONS,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ 0,
+ );
+ readPromises.forEach(p =>
+ p.then(t => {
+ console.log(`Time taken: ${t}ms`);
+ }),
+ );
+ const t = await Promise.all(readPromises);
+ const max = Math.max(...t);
+ const min = Math.min(...t);
+ const sum = t.reduce((a, b) => a + b, 0);
+ const avg = sum / t.length || 0;
+ const p90 = percentile(t, 0.9);
+ console.log(`Max: ${max}`);
+ console.log(`Min: ${min}`);
+ console.log(`Avg: ${avg}`);
+ console.log(`P90: ${p90}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+async function oneWriteTransactionPerSecond() {
+ console.log('Starting oneWriteTransactionPerSecond');
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 100000;
+ const NUM_TRANSACTIONS = RND_WAIT_TIME_BETWEEN_REQUESTS / 1000;
+ for (const minSessions of [0, 25]) {
+ const database = newTestDatabase({
+ min: minSessions,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ // Execute one read request to initialize the session pool.
+ await queueReadOperations(database, 1, 0, 0);
+ const writePromises = queueWriteOperations(
+ database,
+ NUM_TRANSACTIONS,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ );
+ writePromises.forEach(p =>
+ p.then(t => {
+ console.log(`Time taken: ${t}ms`);
+ }),
+ );
+ const t = await Promise.all(writePromises);
+ const max = Math.max(...t);
+ const min = Math.min(...t);
+ const sum = t.reduce((a, b) => a + b, 0);
+ const avg = sum / t.length || 0;
+ const p90 = percentile(t, 0.9);
+ console.log(`Max: ${max}`);
+ console.log(`Min: ${min}`);
+ console.log(`Avg: ${avg}`);
+ console.log(`P90: ${p90}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+async function oneReadAndOneWriteTransactionPerSecond() {
+ console.log('Starting oneReadAndOneWriteTransactionPerSecond');
+ const RND_WAIT_TIME_BETWEEN_REQUESTS = 100000;
+ const NUM_READ_TRANSACTIONS = RND_WAIT_TIME_BETWEEN_REQUESTS / 1000;
+ const NUM_WRITE_TRANSACTIONS = RND_WAIT_TIME_BETWEEN_REQUESTS / 1000;
+ for (const minSessions of [0, 25]) {
+ const database = newTestDatabase({
+ min: minSessions,
+ });
+ const pool = database.pool_ as SessionPool;
+ try {
+ const readPromises = queueReadOperations(
+ database,
+ NUM_READ_TRANSACTIONS,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ 0,
+ );
+ const writePromises = queueWriteOperations(
+ database,
+ NUM_WRITE_TRANSACTIONS,
+ RND_WAIT_TIME_BETWEEN_REQUESTS,
+ );
+ readPromises.forEach(p =>
+ p.then(t => {
+ console.log(`Read tx: ${t}ms`);
+ }),
+ );
+ writePromises.forEach(p =>
+ p.then(t => {
+ console.log(`Write tx: ${t}ms`);
+ }),
+ );
+ const t = await Promise.all(readPromises.concat(writePromises));
+ const max = Math.max(...t);
+ const min = Math.min(...t);
+ const sum = t.reduce((a, b) => a + b, 0);
+ const avg = sum / t.length || 0;
+ const p90 = percentile(t, 0.9);
+ console.log(`Max: ${max}`);
+ console.log(`Min: ${min}`);
+ console.log(`Avg: ${avg}`);
+ console.log(`P90: ${p90}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+/**
+ * Executes the steadyIncrease benchmark.
+ */
+async function steadyIncrease() {
+ console.log('Starting steadyIncrease');
+ // Value 'undefined' is used to warm up the compiler.
+ for (const incStep of [undefined, 1, 10, 20, 25, 30, 40, 50, 100]) {
+ const database = newTestDatabase({
+ min: 100,
+ max: 400,
+ incStep: incStep,
+ });
+ const pool = database.pool_ as SessionPool;
+ const snapshots: Snapshot[] = [];
+ try {
+ if (incStep) {
+ console.time(`steadyIncrease incStep ${incStep}`);
+ }
+ for (let i = 0; i < pool.options.max!; i++) {
+ const [snapshot] = await database.getSnapshot();
+ snapshots.unshift(snapshot);
+ }
+ for (const snapshot of snapshots) {
+ snapshot.end();
+ }
+ if (incStep) {
+ console.timeEnd(`steadyIncrease incStep ${incStep}`);
+ console.log(`Current session pool size: ${pool.size}`);
+ }
+ } finally {
+ await database.close();
+ }
+ }
+}
+
+/**
+ * Generates and submits read operations in parallel to the mock benchmark
+ * server.
+ * @param database The database to submit the queries to
+ * @param numRequests The number of read requests to submit.
+ * @param waitBetweenRequests The time to wait between each read request. This
+ * time will be used as the upper bound to get a
+ * randomized value for each request to simulate
+ * requests that come in at random intervals.
+ * @param holdSessionTime The time that the transaction should hold on to the
+ * session. This simulates the application performing
+ * calculations or other operations on the data that have
+ * been returned by Spanner. The time is used as an upper
+ * bound to get a randomized value for each request.
+ */
+function queueReadOperations(
+ database: Database,
+ numRequests: number,
+ waitBetweenRequests: number,
+ holdSessionTime: number,
+): Promise[] {
+ const promises: Promise[] = [];
+ for (let run = 0; run < numRequests; run++) {
+ promises.unshift(
+ new Promise(resolve => {
+ setTimeout(async () => {
+ const t1 = performance.now();
+ let p: Promise;
+ database
+ .runStream(selectSql)
+ .on('data', async () => {
+ p = new Promise(r => {
+ setTimeout(() => {
+ r();
+ }, Math.random() * holdSessionTime);
+ });
+ })
+ .on('end', async () => {
+ await p;
+ resolve(performance.now() - t1);
+ });
+ }, Math.random() * waitBetweenRequests);
+ }),
+ );
+ }
+ return promises;
+}
+
+/**
+ * Generates and submits write operations in parallel to the mock benchmark
+ * server.
+ * @param database The database to submit the updates to
+ * @param numRequests The number of write requests to submit.
+ * @param waitBetweenRequests The time to wait between each write request. This
+ * time will be used as the upper bound to get a
+ * randomized value for each request to simulate
+ * requests that come in at random intervals.
+ */
+function queueWriteOperations(
+ database: Database,
+ numRequests: number,
+ waitBetweenRequests: number,
+): Promise[] {
+ const promises: Promise[] = [];
+ for (let run = 0; run < numRequests; run++) {
+ promises.unshift(
+ new Promise((resolve, rejects) => {
+ setTimeout(() => {
+ const t1 = performance.now();
+ database.runTransaction((err, tx) => {
+ tx!
+ .runUpdate(updateSql)
+ .then(() =>
+ tx!
+ .commit()
+ .then(() => resolve(performance.now() - t1))
+ .catch(err => {
+ rejects(err);
+ }),
+ )
+ .catch(err => {
+ rejects(err);
+ });
+ });
+ }, Math.random() * waitBetweenRequests);
+ }),
+ );
+ }
+ return promises;
+}
+
+/** Creates a simple result set for SELECT 1. */
+function createSelect1ResultSet(): protobuf.ResultSet {
+ const fields = [
+ protobuf.StructType.Field.create({
+ name: 'NUM',
+ type: protobuf.Type.create({code: protobuf.TypeCode.INT64}),
+ }),
+ ];
+ const metadata = new protobuf.ResultSetMetadata({
+ rowType: new protobuf.StructType({
+ fields,
+ }),
+ });
+ return protobuf.ResultSet.create({
+ metadata,
+ rows: [{values: [{stringValue: '1'}]}],
+ });
+}
+
+function percentile(arr, p) {
+ const sorted = arr.sort((a, b) => a - b);
+ const pos = (sorted.length - 1) * p;
+ const base = Math.floor(pos);
+ const rest = pos - base;
+ if (sorted[base + 1] !== undefined) {
+ return sorted[base] + rest * (sorted[base + 1] - sorted[base]);
+ } else {
+ return sorted[base];
+ }
+}
diff --git a/handwritten/spanner/benchmark/workload.js b/handwritten/spanner/benchmark/workload.js
new file mode 100644
index 00000000000..ddce8cf6ea2
--- /dev/null
+++ b/handwritten/spanner/benchmark/workload.js
@@ -0,0 +1,123 @@
+/*!
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+const crypto = require('crypto');
+const PQueue = require('p-queue');
+const random = require('lodash.random');
+const timeSpan = require('time-span');
+
+const OPERATIONS = [
+ 'readproportion',
+ 'updateproportion',
+ 'scanproportion',
+ 'insertproportion',
+];
+
+class Workload {
+ constructor(database, options) {
+ this.database = database;
+ this.options = options;
+
+ this.queue = new PQueue();
+ this.weights = [];
+ this.totalWeight = 0;
+ this.operations = [];
+ this.latencies = {};
+ this.opCounts = {};
+ this.totalOpCount = 0;
+
+ for (const operation of OPERATIONS) {
+ const weight = parseFloat(this.options.get(operation));
+
+ if (weight <= 0) {
+ continue;
+ }
+
+ const shortOpName = operation.replace('proportion', '');
+
+ this.operations.push(shortOpName);
+ this.latencies[shortOpName] = [];
+ this.totalWeight += weight;
+ this.weights.push(this.totalWeight);
+ }
+ }
+
+ getRandomKey() {
+ return this.keys[random(this.keys.length - 1)];
+ }
+
+ loadKeys() {
+ return this.database
+ .run(`SELECT u.id FROM ${this.options.get('table')} u`)
+ .then(data => data[0].map(row => row[0].value))
+ .then(keys => (this.keys = keys));
+ }
+
+ run() {
+ const operationCount = parseInt(this.options.get('operationcount'));
+ const end = timeSpan();
+
+ for (let i = 0; i < operationCount; i++) {
+ const randomWeight = Math.random() * this.totalWeight;
+
+ for (let j = 0; j < this.weights.length; j++) {
+ const weight = this.weights[j];
+ const operation = this.operations[j];
+
+ if (randomWeight <= weight) {
+ this.queue.add(() => this.runOperation(operation));
+ break;
+ }
+ }
+ }
+
+ return this.queue.onIdle().then(() => (this.duration = end()));
+ }
+
+ runOperation(operation) {
+ if (typeof this[operation] !== 'function') {
+ throw new Error(`unsupported operation: ${operation.type}`);
+ }
+
+ const end = timeSpan();
+
+ return this[operation]().then(() => this.latencies[operation].push(end()));
+ }
+
+ read() {
+ const tableName = this.options.get('table');
+ const id = this.getRandomKey();
+ const query = `SELECT u.* FROM ${tableName} u WHERE u.id="${id}"`;
+
+ return this.database.run(query, {readOnly: true});
+ }
+
+ update() {
+ const tableName = this.options.get('table');
+ const id = this.getRandomKey();
+ const field = `field${random(9)}`;
+ const value = crypto.randomBytes(100).toString('hex');
+
+ return this.database.runTransactionAsync(transaction => {
+ transaction.update(tableName, {id, [field]: value});
+ return transaction.commit();
+ });
+ }
+}
+
+module.exports = Workload;
diff --git a/handwritten/spanner/benchmark/workloada b/handwritten/spanner/benchmark/workloada
new file mode 100644
index 00000000000..dfe51c1be46
--- /dev/null
+++ b/handwritten/spanner/benchmark/workloada
@@ -0,0 +1,4 @@
+readproportion=0.95
+updateproportion=0.05
+scanproportion=0
+insertproportion=0
diff --git a/handwritten/spanner/benchmark/workloadb b/handwritten/spanner/benchmark/workloadb
new file mode 100644
index 00000000000..3cdf324b3be
--- /dev/null
+++ b/handwritten/spanner/benchmark/workloadb
@@ -0,0 +1,4 @@
+readproportion=0.5
+updateproportion=0.5
+scanproportion=0
+insertproportion=0
diff --git a/handwritten/spanner/benchmark/ycsb.js b/handwritten/spanner/benchmark/ycsb.js
new file mode 100644
index 00000000000..055d565282b
--- /dev/null
+++ b/handwritten/spanner/benchmark/ycsb.js
@@ -0,0 +1,140 @@
+/*!
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+const bounds = require('binary-search-bounds');
+const dedent = require('dedent');
+const fs = require('fs');
+const path = require('path');
+const stats = require('stats-lite');
+
+const {Spanner} = require('../');
+const Workload = require('./workload');
+
+require('yargs')
+ .version(false)
+ .strict()
+ .command(
+ 'run [args]',
+ 'Run the workload',
+ {
+ P: {
+ alias: 'workload',
+ type: 'string',
+ description: 'The path to a YCSB workload file',
+ coerce: parseWorkloadFile,
+ },
+ p: {
+ alias: 'parameter',
+ description: 'The key=value pair of parameter',
+ coerce: parseKeyValuePairs,
+ },
+ b: {
+ alias: 'num_bucket',
+ default: 1000,
+ type: 'number',
+ describe: 'The number of buckets in output',
+ },
+ },
+ runWorkloads,
+ ).argv;
+
+function formatOptions(argv) {
+ const options = argv.workload.concat(argv.parameter, [
+ ['numBucket', argv.num_bucket],
+ ]);
+
+ return new Map(options);
+}
+
+function parseKeyValuePairs(pairs) {
+ return pairs.map(pair => pair.split('='));
+}
+
+function parseWorkloadFile(filePath) {
+ const contents = fs.readFileSync(path.resolve(filePath));
+ return parseKeyValuePairs(contents.toString().split('\n'));
+}
+
+function printMetrics(workload) {
+ const numBucket = workload.options.get('numBucket');
+ let totalOps = 0;
+
+ workload.operations.forEach(operation => {
+ totalOps += workload.latencies[operation].length;
+ });
+
+ console.log(
+ dedent`[OVERALL], RunTime(ms), ${workload.duration}
+ [OVERALL], Throughput(ops/sec), ${totalOps / (workload.duration / 1000)}`,
+ );
+
+ workload.operations.forEach(operation => {
+ const lats = workload.latencies[operation].sort((a, b) => a - b);
+ const ops = lats.length;
+ const opName = `[${operation.toUpperCase()}]`;
+
+ console.log(
+ dedent`${opName}, Operations, ${ops}
+ ${opName}, AverageLatency(us), ${stats.mean(lats)}
+ ${opName}, LatencyVariance(us), ${stats.stdev(lats)}
+ ${opName}, MinLatency(us), ${lats[0]}
+ ${opName}, MaxLatency(us), ${lats[lats.length - 1]}
+ ${opName}, 95thPercentileLatency(us), ${stats.percentile(lats, 0.95)}
+ ${opName}, 99thPercentileLatency(us), ${stats.percentile(lats, 0.99)}
+ ${opName}, 99.9thPercentileLatency(us), ${stats.percentile(lats, 0.999)}
+ ${opName}, Return=OK, ${ops}`,
+ );
+
+ for (let i = 0; i < numBucket; i++) {
+ const hi = bounds.lt(lats, i + 1);
+ const lo = bounds.le(lats, i);
+ console.log(`${opName}, ${i}, ${hi - lo}`);
+ }
+
+ const lo = bounds.le(lats, numBucket);
+ console.log(`${opName}, ${numBucket}, ${ops - lo}`);
+ });
+}
+
+function runWorkload(database, options) {
+ const workload = new Workload(database, options);
+
+ return workload
+ .loadKeys()
+ .then(() => workload.run())
+ .then(() => printMetrics(workload))
+ .catch(err => console.error(err));
+}
+
+function runWorkloads(argv) {
+ const options = formatOptions(argv);
+
+ const spanner = new Spanner({
+ projectId: options.get('cloudspanner.project'),
+ });
+
+ const database = spanner
+ .instance(options.get('cloudspanner.instance'))
+ .database(options.get('cloudspanner.database'));
+
+ return Promise.all(
+ Array(options.get('num_worker') || 1)
+ .fill(0)
+ .map(() => runWorkload(database, options)),
+ );
+}
diff --git a/handwritten/spanner/bin/README.md b/handwritten/spanner/bin/README.md
new file mode 100644
index 00000000000..45104f29521
--- /dev/null
+++ b/handwritten/spanner/bin/README.md
@@ -0,0 +1,13 @@
+# benchwrapper
+
+benchwrapper is a lightweight gRPC server that wraps the Spanner library for
+benchmarking purposes.
+
+## Running
+
+```
+cd nodejs-spanner
+npm install
+export SPANNER_EMULATOR_HOST=localhost:8080
+npm run benchwrapper -- --port 8081
+```
\ No newline at end of file
diff --git a/handwritten/spanner/bin/benchwrapper.js b/handwritten/spanner/bin/benchwrapper.js
new file mode 100644
index 00000000000..231c7634f9a
--- /dev/null
+++ b/handwritten/spanner/bin/benchwrapper.js
@@ -0,0 +1,149 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const {grpc} = require('google-gax');
+const protoLoader = require('@grpc/proto-loader');
+const {Spanner} = require('../build/src');
+
+const argv = require('yargs')
+ .option('port', {
+ description: 'The port that the Node.js benchwrapper should run on.',
+ type: 'number',
+ demand: true,
+ })
+ .parse();
+
+const PROTO_PATH = __dirname + '/spanner.proto';
+const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
+ keepCase: true,
+ longs: String,
+ enums: String,
+ defaults: true,
+ oneofs: true,
+});
+const protoDescriptor = grpc.loadPackageDefinition(packageDefinition);
+const spannerBenchWrapper = protoDescriptor.spanner_bench;
+
+// The benchwrapper should only be executed against an emulator.
+if (!process.env.SPANNER_EMULATOR_HOST) {
+ throw new Error(
+ 'This benchmarking server only works when connected to an emulator. Please set SPANNER_EMULATOR_HOST.',
+ );
+}
+// This will connect the Spanner client to an emulator, as SPANNER_EMULATOR_HOST has been set.
+const spannerClient = new Spanner();
+
+// Implementation of SpannerBenchWrapper.Read method.
+function Read(call, callback) {
+ const instance = spannerClient.instance('someinstance');
+ const database = instance.database('somedatabase');
+ let tx;
+ database
+ .getSnapshot()
+ .then(data => {
+ tx = data[0];
+ return tx.run(call.request.query);
+ })
+ .then(data => {
+ const [rows] = data;
+ // Just iterate over all rows.
+ rows.forEach(() => {});
+ })
+ .finally(() => {
+ if (tx) {
+ tx.end();
+ }
+ callback(null, {});
+ });
+}
+
+// Implementation of SpannerBenchWrapper.Insert method.
+function Insert(call, callback) {
+ const instance = spannerClient.instance('someinstance');
+ const database = instance.database('somedatabase');
+ database.runTransaction((err, transaction) => {
+ if (err) {
+ callback(err);
+ return;
+ }
+ call.request.singers.forEach(singer => {
+ transaction.insert('Singers', {
+ SingerId: singer.id,
+ FirstName: singer.first_name,
+ LastName: singer.last_name,
+ });
+ });
+ transaction.commit(err => {
+ if (err) {
+ callback(err);
+ } else {
+ callback(null, {});
+ }
+ });
+ });
+}
+
+// Implementation of SpannerBenchWrapper.Insert method.
+function Update(call, callback) {
+ const instance = spannerClient.instance('someinstance');
+ const database = instance.database('somedatabase');
+ database.runTransaction((err, transaction) => {
+ if (err) {
+ callback(err);
+ return;
+ }
+ transaction.batchUpdate(call.request.queries, (err, rowCounts) => {
+ if (err) {
+ callback(
+ new grpc.StatusBuilder()
+ .withCode(err.code)
+ .withDetails(err.details || err.message)
+ .withMetadata(err.metadata)
+ .build(),
+ );
+ transaction.rollback().then(() => {});
+ return;
+ }
+ // Iterate over all rowCounts.
+ rowCounts.forEach(() => {});
+ transaction.commit(err => {
+ if (err) {
+ callback(err);
+ } else {
+ callback(null, {});
+ }
+ });
+ });
+ });
+}
+
+// Create and start a benchwrapper server.
+const server = new grpc.Server();
+server.addService(spannerBenchWrapper['SpannerBenchWrapper']['service'], {
+ Read: Read,
+ Insert: Insert,
+ Update: Update,
+});
+console.log('starting benchwrapper for Spanner on localhost:' + argv.port);
+server.bindAsync(
+ '0.0.0.0:' + argv.port,
+ grpc.ServerCredentials.createInsecure(),
+ err => {
+ if (err) {
+ console.error(err);
+ return;
+ }
+ server.start();
+ },
+);
diff --git a/handwritten/spanner/bin/benchwrapper_test_client.js b/handwritten/spanner/bin/benchwrapper_test_client.js
new file mode 100644
index 00000000000..a7fc5d46808
--- /dev/null
+++ b/handwritten/spanner/bin/benchwrapper_test_client.js
@@ -0,0 +1,75 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is a simple test client for the Spanner benchwrapper.
+
+const {grpc} = require('google-gax');
+const protoLoader = require('@grpc/proto-loader');
+
+const argv = require('yargs')
+ .option('port', {
+ description: 'The port that the benchwrapper client should connect to.',
+ type: 'number',
+ demand: true,
+ })
+ .parse();
+
+const PROTO_PATH = __dirname + '/spanner.proto';
+const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
+ keepCase: true,
+ longs: String,
+ enums: String,
+ defaults: true,
+ oneofs: true,
+});
+const protoDescriptor = grpc.loadPackageDefinition(packageDefinition);
+const spannerBenchWrapper = protoDescriptor.spanner_bench;
+
+console.log(`connecting to localhost:${argv.port}`);
+const client = new spannerBenchWrapper.SpannerBenchWrapper(
+ `localhost:${argv.port}`,
+ grpc.credentials.createInsecure(),
+);
+const readReq = {
+ Query: 'SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1',
+};
+const insertReq = {
+ singers: [
+ {id: '1', firstName: 'Lana', lastName: 'Del Rey'},
+ {id: '2', firstName: 'Taylor', lastName: 'Swift'},
+ ],
+};
+const updateReq = {
+ Queries: [
+ 'UPDATE sometable SET foo=1 WHERE bar=2',
+ 'UPDATE sometable SET foo=2 WHERE bar=1',
+ ],
+};
+client.read(readReq, (err, result) => {
+ callback('read', err, result);
+});
+client.insert(insertReq, (err, result) => {
+ callback('insert', err, result);
+});
+client.update(updateReq, (err, result) => {
+ callback('update', err, result);
+});
+
+function callback(method, err, result) {
+ if (err) {
+ console.log(`${method} failed with error ${err}`);
+ return;
+ }
+ console.log(`${method} executed with result ${result}`);
+}
diff --git a/handwritten/spanner/bin/spanner.proto b/handwritten/spanner/bin/spanner.proto
new file mode 100644
index 00000000000..10153a4ccad
--- /dev/null
+++ b/handwritten/spanner/bin/spanner.proto
@@ -0,0 +1,73 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package spanner_bench;
+option java_multiple_files = true;
+option java_package = "com.google.cloud.benchwrapper";
+
+message Singer {
+ int64 id = 1;
+ string first_name = 2;
+ string last_name = 3;
+ string singer_info = 4;
+}
+
+message Album {
+ int64 id = 1;
+ int64 singer_id = 2;
+ string album_title = 3;
+}
+
+message ReadQuery {
+ // The query to use in the read call.
+ string query = 1;
+}
+
+message InsertQuery {
+ // The query to use in the insert call.
+ repeated Singer singers = 1;
+ repeated Album albums = 2;
+}
+
+message UpdateQuery {
+ // The queries to use in the update call.
+ repeated string queries = 1;
+}
+
+message EmptyResponse {}
+
+service SpannerBenchWrapper {
+ // Read represents operations like Go's ReadOnlyTransaction.Query, Java's
+ // ReadOnlyTransaction.executeQuery, Python's snapshot.read, and Node's
+ // Transaction.Read.
+ //
+ // It will typically be used to read many items.
+ rpc Read(ReadQuery) returns (EmptyResponse) {}
+
+ // Insert represents operations like Go's Client.Apply, Java's
+ // DatabaseClient.writeAtLeastOnce, Python's transaction.commit, and Node's
+ // Transaction.Commit.
+ //
+ // It will typically be used to insert many items.
+ rpc Insert(InsertQuery) returns (EmptyResponse) {}
+
+ // Update represents operations like Go's ReadWriteTransaction.BatchUpdate,
+ // Java's TransactionRunner.run, Python's Batch.update, and Node's
+ // Transaction.BatchUpdate.
+ //
+ // It will typically be used to update many items.
+ rpc Update(UpdateQuery) returns (EmptyResponse) {}
+}
\ No newline at end of file
diff --git a/handwritten/spanner/linkinator.config.json b/handwritten/spanner/linkinator.config.json
new file mode 100644
index 00000000000..cbd5b15dcff
--- /dev/null
+++ b/handwritten/spanner/linkinator.config.json
@@ -0,0 +1,17 @@
+{
+ "recurse": true,
+ "skip": [
+ "https://codecov.io/gh/googleapis/",
+ "www.googleapis.com",
+ "img.shields.io",
+ "https://console.cloud.google.com/cloudshell",
+ "https://support.google.com",
+ "^https://github\\.com/googleapis/nodejs-spanner/blob/[^/]+/samples/.*"
+ ],
+ "silent": true,
+ "concurrency": 5,
+ "retry": true,
+ "retryErrors": true,
+ "retryErrorsCount": 5,
+ "retryErrorsJitter": 3000
+}
diff --git a/handwritten/spanner/observability-test/batch-transaction.ts b/handwritten/spanner/observability-test/batch-transaction.ts
new file mode 100644
index 00000000000..47c124d3922
--- /dev/null
+++ b/handwritten/spanner/observability-test/batch-transaction.ts
@@ -0,0 +1,247 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* eslint-disable prefer-rest-params */
+
+import {util} from '@google-cloud/common';
+import * as pfy from '@google-cloud/promisify';
+import * as assert from 'assert';
+import {before, beforeEach, afterEach, describe, it} from 'mocha';
+import * as extend from 'extend';
+import * as proxyquire from 'proxyquire';
+import * as sinon from 'sinon';
+const {
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+// eslint-disable-next-line n/no-extraneous-require
+const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base');
+import {Session, Spanner} from '../src';
+import * as bt from '../src/batch-transaction';
+
+const fakePfy = extend({}, pfy, {
+ promisifyAll(klass, options) {
+ if (klass.name !== 'BatchTransaction') {
+ return;
+ }
+ assert.deepStrictEqual(options.exclude, ['identifier']);
+ },
+});
+
+class FakeTimestamp {
+ calledWith_: IArguments;
+ constructor() {
+ this.calledWith_ = arguments;
+ }
+}
+
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+const fakeCodec: any = {
+ encode: util.noop,
+ Timestamp: FakeTimestamp,
+ Int() {},
+ Float() {},
+ SpannerDate() {},
+ convertProtoTimestampToDate() {},
+};
+
+const SPANNER = {
+ routeToLeaderEnabled: true,
+};
+
+const INSTANCE = {
+ parent: SPANNER,
+};
+
+const DATABASE = {
+ formattedName_: 'database',
+ parent: INSTANCE,
+};
+
+class FakeTransaction {
+ calledWith_: IArguments;
+ session;
+ constructor(session) {
+ this.calledWith_ = arguments;
+ this.session = session;
+ }
+ static encodeKeySet(): object {
+ return {};
+ }
+ static encodeParams(): object {
+ return {};
+ }
+
+ _getSpanner(): Spanner {
+ return SPANNER as Spanner;
+ }
+
+ run() {}
+ read() {}
+}
+
+describe('BatchTransaction', () => {
+ const sandbox = sinon.createSandbox();
+
+ // tslint:disable-next-line variable-name
+ let BatchTransaction: typeof bt.BatchTransaction;
+ let batchTransaction: bt.BatchTransaction;
+
+ before(() => {
+ BatchTransaction = proxyquire('../src/batch-transaction.js', {
+ '@google-cloud/precise-date': {PreciseDate: FakeTimestamp},
+ '@google-cloud/promisify': fakePfy,
+ './codec.js': {codec: fakeCodec},
+ './transaction.js': {Snapshot: FakeTransaction},
+ }).BatchTransaction;
+ });
+
+ const traceExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+
+ const provider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ afterEach(() => {
+ traceExporter.reset();
+ sandbox.restore();
+ });
+
+ const REQUEST = sandbox.stub();
+ const SESSION = {
+ parent: DATABASE,
+ formattedName_: 'abcdef',
+ request: REQUEST,
+ };
+ const ID = '0xdeadbeef';
+
+ const PARTITIONS = [{partitionToken: 'a'}, {partitionToken: 'b'}];
+ const RESPONSE = {partitions: PARTITIONS};
+
+ beforeEach(() => {
+ batchTransaction = new BatchTransaction(SESSION as {} as Session);
+ batchTransaction.session = SESSION as {} as Session;
+ batchTransaction.id = ID;
+ batchTransaction._observabilityOptions = {tracerProvider: provider};
+ REQUEST.callsFake((_, callback) => callback(null, RESPONSE));
+ });
+
+ const GAX_OPTS = {};
+
+ const QUERY = {
+ sql: 'SELECT * FROM Singers',
+ gaxOptions: GAX_OPTS,
+ params: {},
+ types: {},
+ };
+
+ it('createQueryPartitions', done => {
+ batchTransaction.createQueryPartitions(QUERY, err => {
+ assert.ifError(err);
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 2, 'Exactly 2 spans expected');
+
+ // Sort the spans by duration.
+ spans.sort((spanA, spanB) => {
+ spanA.duration < spanB.duration;
+ });
+
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.BatchTransaction.createPartitions_',
+ 'CloudSpanner.BatchTransaction.createQueryPartitions',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that createPartitions_ is a child span of createQueryPartitions.
+ const spanCreatePartitions_ = spans[0];
+ const spanCreateQueryPartitions = spans[1];
+ assert.ok(
+ spanCreateQueryPartitions.spanContext().traceId,
+ 'Expected that createQueryPartitions has a defined traceId',
+ );
+ assert.ok(
+ spanCreatePartitions_.spanContext().traceId,
+ 'Expected that createPartitions_ has a defined traceId',
+ );
+ assert.deepStrictEqual(
+ spanCreatePartitions_.spanContext().traceId,
+ spanCreateQueryPartitions.spanContext().traceId,
+ 'Expected that both spans share a traceId',
+ );
+ assert.ok(
+ spanCreateQueryPartitions.spanContext().spanId,
+ 'Expected that createQueryPartitions has a defined spanId',
+ );
+ assert.ok(
+ spanCreatePartitions_.spanContext().spanId,
+ 'Expected that createPartitions_ has a defined spanId',
+ );
+ assert.deepStrictEqual(
+ spanCreatePartitions_.parentSpanContext.spanId,
+ spanCreateQueryPartitions.spanContext().spanId,
+ 'Expected that createQueryPartitions is the parent to createPartitions_',
+ );
+ done();
+ });
+ });
+
+ it('createReadPartitions', done => {
+ const REQUEST = sandbox.stub();
+ const response = {};
+ REQUEST.callsFake((_, callback) => callback(null, response));
+
+ batchTransaction.createReadPartitions(QUERY, err => {
+ assert.ifError(err);
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 2, 'Exactly 2 spans expected');
+
+ // Sort the spans by duration.
+ spans.sort((spanA, spanB) => {
+ spanA.duration < spanB.duration;
+ });
+
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ });
+ const expectedSpanNames = [
+ 'CloudSpanner.BatchTransaction.createPartitions_',
+ 'CloudSpanner.BatchTransaction.createReadPartitions',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ done();
+ });
+ });
+});
diff --git a/handwritten/spanner/observability-test/database.ts b/handwritten/spanner/observability-test/database.ts
new file mode 100644
index 00000000000..d905ea086d5
--- /dev/null
+++ b/handwritten/spanner/observability-test/database.ts
@@ -0,0 +1,2212 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* eslint-disable prefer-rest-params */
+
+import * as through from 'through2';
+import {EventEmitter} from 'events';
+import * as assert from 'assert';
+import * as extend from 'extend';
+import {google} from '../protos/protos';
+import {
+ BatchWriteOptions,
+ CommitCallback,
+ CommitOptions,
+ MutationSet,
+} from '../src/transaction';
+import {util} from '@google-cloud/common';
+import {Transform} from 'stream';
+import * as proxyquire from 'proxyquire';
+import * as sinon from 'sinon';
+const {SpanStatusCode} = require('@opentelemetry/api');
+const {
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+// eslint-disable-next-line n/no-extraneous-require
+const {
+ ReadableSpan,
+ SimpleSpanProcessor,
+} = require('@opentelemetry/sdk-trace-base');
+import * as db from '../src/database';
+import {Instance, MutationGroup, Spanner} from '../src';
+import * as pfy from '@google-cloud/promisify';
+import {grpc} from 'google-gax';
+import {MockError} from '../test/mockserver/mockspanner';
+import {FakeSessionFactory} from '../test/database';
+import {RunTransactionOptions} from '../src/transaction-runner';
+const {generateWithAllSpansHaveDBName} = require('./helper');
+
+const fakePfy = extend({}, pfy, {
+ promisifyAll(klass, options) {
+ if (klass.name !== 'Database') {
+ return;
+ }
+ assert.deepStrictEqual(options.exclude, [
+ 'batchTransaction',
+ 'batchWriteAtLeastOnce',
+ 'getRestoreInfo',
+ 'getState',
+ 'getDatabaseDialect',
+ 'getOperations',
+ 'runTransaction',
+ 'runTransactionAsync',
+ 'table',
+ 'session',
+ ]);
+ },
+});
+
+class FakeBatchTransaction {
+ calledWith_: IArguments;
+ id?: string;
+ readTimestamp?: {seconds: number; nanos: number};
+ constructor() {
+ this.calledWith_ = arguments;
+ }
+}
+
+class FakeGrpcServiceObject extends EventEmitter {
+ calledWith_: IArguments;
+ constructor() {
+ super();
+ this.calledWith_ = arguments;
+ }
+}
+
+function fakePartialResultStream(this: Function & {calledWith_: IArguments}) {
+ this.calledWith_ = arguments;
+ return this;
+}
+
+class FakeSession {
+ calledWith_: IArguments;
+ formattedName_: any;
+ constructor() {
+ this.calledWith_ = arguments;
+ }
+ partitionedDml(): FakeTransaction {
+ return new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.PartitionedDml,
+ );
+ }
+ snapshot(): FakeTransaction {
+ return new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadOnly,
+ );
+ }
+}
+
+class FakeSessionPool extends EventEmitter {
+ calledWith_: IArguments;
+ constructor() {
+ super();
+ this.calledWith_ = arguments;
+ }
+ open() {}
+ getSession() {}
+ release() {}
+}
+
+class FakeTable {
+ calledWith_: IArguments;
+ constructor() {
+ this.calledWith_ = arguments;
+ }
+}
+
+class FakeTransaction extends EventEmitter {
+ calledWith_: IArguments;
+ _options!: google.spanner.v1.ITransactionOptions;
+ private _queuedMutations: google.spanner.v1.Mutation[];
+ constructor(options) {
+ super();
+ this._options = options;
+ this.calledWith_ = arguments;
+ this._queuedMutations = [];
+ }
+ begin() {}
+ end() {}
+ runStream(): Transform {
+ return through.obj();
+ }
+ runUpdate() {}
+ setQueuedMutations(mutation) {
+ this._queuedMutations = mutation;
+ }
+ setReadWriteTransactionOptions(options: RunTransactionOptions) {}
+ commit(
+ options?: CommitOptions,
+ callback?: CommitCallback,
+ ): void | Promise {
+ if (callback) {
+ callback(null, {commitTimestamp: {seconds: 1, nanos: 0}});
+ }
+ return Promise.resolve({commitTimestamp: {seconds: 1, nanos: 0}});
+ }
+}
+
+class FakeTransactionRunner {
+ calledWith_: IArguments;
+ constructor() {
+ this.calledWith_ = arguments;
+ // eslint-disable-next-line @typescript-eslint/no-this-alias
+ }
+ async run(): Promise {}
+}
+
+class FakeAsyncTransactionRunner {
+ calledWith_: IArguments;
+ constructor() {
+ this.calledWith_ = arguments;
+ }
+ async run(): Promise {
+ return {} as T;
+ }
+}
+
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+const fakeCodec: any = {
+ encode: util.noop,
+ Int() {},
+ Float() {},
+ SpannerDate() {},
+};
+
+class FakeAbortError {
+ error;
+ constructor(err) {
+ this.error = err;
+ }
+}
+
+const fakeRetry = fn => {
+ return fn();
+};
+
+fakeRetry.AbortError = FakeAbortError;
+
+describe('Database', () => {
+ const sandbox = sinon.createSandbox();
+
+ // tslint:disable-next-line variable-name
+ let Database: typeof db.Database;
+ // tslint:disable-next-line variable-name
+ let DatabaseCached: typeof db.Database;
+
+ const SPANNER = {
+ routeToLeaderEnabled: true,
+ } as {} as Spanner;
+
+ const INSTANCE = {
+ request: util.noop,
+ requestStream: util.noop,
+ formattedName_: 'instance-name',
+ databases_: new Map(),
+ parent: SPANNER,
+ } as {} as Instance;
+
+ const NAME = 'table-name';
+
+ const POOL_OPTIONS = {};
+
+ let database;
+
+ before(() => {
+ Database = proxyquire('../src/database.js', {
+ './common-grpc/service-object': {
+ GrpcServiceObject: FakeGrpcServiceObject,
+ },
+ '@google-cloud/promisify': fakePfy,
+ 'p-retry': fakeRetry,
+ './batch-transaction': {BatchTransaction: FakeBatchTransaction},
+ './codec': {codec: fakeCodec},
+ './partial-result-stream': {partialResultStream: fakePartialResultStream},
+ './session-pool': {SessionPool: FakeSessionPool},
+ './session-factory': {SessionFactory: FakeSessionFactory},
+ './session': {Session: FakeSession},
+ './table': {Table: FakeTable},
+ './transaction-runner': {
+ TransactionRunner: FakeTransactionRunner,
+ AsyncTransactionRunner: FakeAsyncTransactionRunner,
+ },
+ }).Database;
+ DatabaseCached = Object.assign({}, Database);
+ });
+
+ const withAllSpansHaveDBName = generateWithAllSpansHaveDBName(
+ INSTANCE.formattedName_ + '/databases/' + NAME,
+ );
+
+ beforeEach(() => {
+ fakeCodec.encode = util.noop;
+ extend(Database, DatabaseCached);
+ INSTANCE._observabilityOptions = {
+ tracerProvider: provider,
+ enableExtendedTracing: false,
+ };
+ database = new Database(INSTANCE, NAME, POOL_OPTIONS);
+ database.parent = INSTANCE;
+ database.databaseRole = 'parent_role';
+ const gaxOpts = {};
+ const options: {
+ a: string;
+ gaxOptions?: {};
+ } = {a: 'a', gaxOptions: gaxOpts};
+
+ const expectedReqOpts = extend({}, options, {
+ database: database.formattedName_,
+ });
+ delete expectedReqOpts.gaxOptions;
+ });
+
+ const traceExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+
+ const provider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ afterEach(() => {
+ sandbox.restore();
+ traceExporter.forceFlush();
+ traceExporter.reset();
+ });
+
+ it('getSessions without error', done => {
+ const ARGS = [null, [], {}];
+ database.request = (config, callback) => {
+ callback(...ARGS);
+ };
+
+ database.getSessions((err, sessions) => {
+ assert.ifError(err);
+ assert.ok(sessions);
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.getSessions'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span's status code is UNSET.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Expected an OK span status',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('getSessions with error', done => {
+ const ARGS = [new Error('our error'), null, {}];
+ database.request = (config, callback) => {
+ callback(...ARGS);
+ };
+
+ database.getSessions((err, sessions) => {
+ assert.ok(err);
+ assert.ok(!sessions);
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.getSessions'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'our error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ describe('batchCreateSessions', () => {
+ it('without error', done => {
+ const ARGS = [null, [{}]];
+ database.request = (config, callback) => {
+ callback(...ARGS);
+ };
+
+ database.batchCreateSessions(10, (err, sessions) => {
+ assert.ifError(err);
+ assert.ok(sessions);
+
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.batchCreateSessions'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span didn't encounter an error.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('with error', done => {
+ const ARGS = [new Error('batchCreateSessions.error'), null];
+ database.request = (config, callback) => {
+ callback(...ARGS);
+ };
+
+ database.batchCreateSessions(10, (err, sessions) => {
+ assert.ok(err);
+ assert.ok(!sessions);
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.batchCreateSessions'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'batchCreateSessions.error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('getSnapshot', () => {
+ let fakeSessionFactory: FakeSessionFactory;
+ let fakeSession: FakeSession;
+ let fakeSnapshot: FakeTransaction;
+
+ let beginSnapshotStub: sinon.SinonStub;
+ let getSessionStub: sinon.SinonStub;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+ fakeSession = new FakeSession();
+ fakeSnapshot = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadOnly,
+ );
+
+ beginSnapshotStub = (
+ sandbox.stub(fakeSnapshot, 'begin') as sinon.SinonStub
+ ).callsFake(callback => callback(null));
+
+ getSessionStub = (
+ sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub
+ ).callsFake(callback => callback(null, fakeSession));
+
+ sandbox.stub(fakeSession, 'snapshot').returns(fakeSnapshot);
+
+ sandbox.stub(fakeSessionFactory, 'isMultiplexedEnabled').returns(false);
+ });
+
+ it('with error', done => {
+ const fakeError = new Error('our snapshot error');
+
+ getSessionStub.callsFake(callback => callback(fakeError, null));
+
+ database.getSnapshot(err => {
+ assert.strictEqual(err, fakeError);
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.getSnapshot'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'our snapshot error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('with retries on `begin` errors with `Session not found`', done => {
+ const fakeError = {
+ code: grpc.status.NOT_FOUND,
+ message: 'Session not found',
+ } as MockError;
+
+ const fakeSession2 = new FakeSession();
+ const fakeSnapshot2 = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadOnly,
+ );
+ (sandbox.stub(fakeSnapshot2, 'begin') as sinon.SinonStub).callsFake(
+ callback => callback(null),
+ );
+ sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2);
+
+ getSessionStub
+ .onFirstCall()
+ .callsFake(callback => callback(null, fakeSession))
+ .onSecondCall()
+ .callsFake(callback => callback(null, fakeSession2));
+ beginSnapshotStub.callsFake(callback => callback(fakeError));
+
+ // The first session that was not found should be released back into the
+ // pool, so that the pool can remove it from its inventory.
+ const releaseStub = sandbox.stub(fakeSessionFactory, 'release');
+
+ database.getSnapshot(async (err, snapshot) => {
+ assert.ifError(err);
+ assert.strictEqual(snapshot, fakeSnapshot2);
+ // The first session that error should already have been released back
+ // to the pool.
+ assert.strictEqual(releaseStub.callCount, 1);
+ // Ending the valid snapshot will release its session back into the
+ // pool.
+ snapshot.emit('end');
+ assert.strictEqual(releaseStub.callCount, 2);
+
+ await provider.forceFlush();
+ await traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.getSnapshot',
+ 'CloudSpanner.Database.getSnapshot',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the first span actually produced an error that was recorded.
+ const parentSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ parentSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'Session not found',
+ parentSpan.status.message.toString(),
+ 'Mismatched span status message',
+ );
+
+ // Ensure that the second span is a child of the first span.
+ const secondRetrySpan = spans[1];
+ assert.ok(
+ parentSpan.spanContext().traceId,
+ 'Expected that the initial parent span has a defined traceId',
+ );
+ assert.ok(
+ secondRetrySpan.spanContext().traceId,
+ 'Expected that the second retry span has a defined traceId',
+ );
+ assert.deepStrictEqual(
+ parentSpan.spanContext().traceId,
+ secondRetrySpan.spanContext().traceId,
+ 'Expected that both spans share a traceId',
+ );
+ assert.ok(
+ parentSpan.spanContext().spanId,
+ 'Expected that the initial parent span has a defined spanId',
+ );
+ assert.ok(
+ secondRetrySpan.spanContext().spanId,
+ 'Expected that the second retry span has a defined spanId',
+ );
+ assert.deepStrictEqual(
+ secondRetrySpan.parentSpanContext.spanId,
+ parentSpan.spanContext().spanId,
+ 'Expected that secondRetrySpan is the child to parentSpan',
+ );
+
+ const expectedEventNames = ['No session available'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('createBatchTransaction', () => {
+ const SESSION = {};
+ const RESPONSE = {a: 'b'};
+
+ beforeEach(() => {
+ database.sessionFactory_ = {
+ getSession(callback) {
+ callback(null, SESSION);
+ },
+ };
+ });
+
+ it('with session error', done => {
+ const error = new Error('with session error');
+
+ database.sessionFactory_ = {
+ getSession(callback) {
+ callback(error);
+ },
+ };
+
+ database.createBatchTransaction((err, transaction, resp) => {
+ assert.strictEqual(err, error);
+ assert.strictEqual(transaction, null);
+ assert.strictEqual(resp, undefined);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createBatchTransaction',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'with session error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('with no error', done => {
+ const opts = {a: 'b'};
+
+ const fakeTransaction = {
+ begin(callback) {
+ callback(null, RESPONSE);
+ },
+ once() {},
+ end() {},
+ };
+
+ database.batchTransaction = (identifier, options) => {
+ assert.deepStrictEqual(identifier, {session: SESSION});
+ assert.strictEqual(options, opts);
+ return fakeTransaction;
+ };
+
+ database.createBatchTransaction(opts, async (err, transaction, resp) => {
+ assert.strictEqual(err, null);
+ assert.strictEqual(transaction, fakeTransaction);
+ assert.strictEqual(resp, RESPONSE);
+ transaction!.end();
+
+ await provider.forceFlush();
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createBatchTransaction',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ `No span status message expected\n\tGot: undefined\n\tWant: ${firstSpan.status.message}`,
+ );
+
+ const expectedEventNames = ['Using Session'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('with begin transaction error', done => {
+ const error = new Error('our createBatchTransaction error');
+
+ const fakeTransaction = {
+ begin(callback) {
+ callback(error, RESPONSE);
+ },
+ once() {},
+ end() {},
+ };
+
+ database.batchTransaction = () => {
+ return fakeTransaction;
+ };
+
+ database.createBatchTransaction((err, transaction, resp) => {
+ assert.strictEqual(err, error);
+ assert.strictEqual(transaction, null);
+ assert.strictEqual(resp, RESPONSE);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createBatchTransaction',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'our createBatchTransaction error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('getTransaction', () => {
+ let fakeSessionFactory: FakeSessionFactory;
+ let fakeSession: FakeSession;
+ let fakeTransaction: FakeTransaction;
+
+ let getSessionStub: sinon.SinonStub;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+ fakeSession = new FakeSession();
+ fakeTransaction = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadWrite,
+ );
+
+ getSessionStub = (
+ sandbox.stub(
+ fakeSessionFactory,
+ 'getSessionForReadWrite',
+ ) as sinon.SinonStub
+ ).callsFake(callback => {
+ callback(null, fakeSession, fakeTransaction);
+ });
+ });
+
+ it('with pool errors', done => {
+ const fakeError = new Error('pool error');
+
+ getSessionStub.callsFake(callback => callback(fakeError));
+
+ database.getTransaction(
+ {requestOptions: {transactionTag: 'transaction-tag'}},
+ async err => {
+ assert.strictEqual(err, fakeError);
+
+ await provider.forceFlush();
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+ const expectedSpanNames = ['CloudSpanner.Database.getTransaction'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // In the event of a sessionPool error, we should not have events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `event names mismatch:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'pool error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+ assert.strictEqual(
+ spans[0].attributes['transaction.tag'],
+ 'transaction-tag',
+ );
+ done();
+ },
+ );
+ });
+
+ it('with no errors', done => {
+ database.getTransaction((err, transaction) => {
+ assert.ifError(err);
+ assert.strictEqual(transaction, fakeTransaction);
+ transaction!.end();
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.getTransaction'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that we have specific events.
+ const expectedEventNames = ['Using Session'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `event names mismatch:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ `Unexpected span status code: ${firstSpan.status.code}`,
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ `Unexpected status message: ${firstSpan.status.message}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('writeAtLeastOnce', () => {
+ const mutations = new MutationSet();
+ mutations.insert('MyTable', {
+ Key: 'k3',
+ Thing: 'xyz',
+ });
+
+ const SESSION = new FakeSession();
+ const RESPONSE = {commitTimestamp: {seconds: 1, nanos: 0}};
+ const TRANSACTION = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadWrite,
+ );
+
+ let sessionFactory: FakeSessionFactory;
+
+ beforeEach(() => {
+ sessionFactory = database.sessionFactory_;
+ (sandbox.stub(sessionFactory, 'getSession') as sinon.SinonStub).callsFake(
+ callback => {
+ callback(null, SESSION, TRANSACTION);
+ },
+ );
+ sandbox.stub(sessionFactory, 'isMultiplexedEnabled').returns(false);
+ });
+
+ it('should return any errors getting a session', done => {
+ const fakeErr = new Error('getting session error');
+
+ (sessionFactory.getSession as sinon.SinonStub).callsFake(callback =>
+ callback(fakeErr, null, null),
+ );
+
+ database.writeAtLeastOnce(mutations, err => {
+ assert.deepStrictEqual(err, fakeErr);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.writeAtLeastOnce'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'getting session error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('with empty mutation should return successful CommitResponse', done => {
+ const fakeMutations = new MutationSet();
+ try {
+ database.writeAtLeastOnce(fakeMutations, (err, response) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(
+ response.commitTimestamp,
+ RESPONSE.commitTimestamp,
+ );
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.writeAtLeastOnce'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ const expectedEventNames = ['Using Session'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ } catch (error) {
+ assert(error instanceof Error);
+ }
+ });
+
+ it('with error on null mutation should catch thrown error', done => {
+ try {
+ database.writeAtLeastOnce(null, () => {});
+ } catch (err) {
+ // Performing a substring search on the error because
+ // depending on the version of Node.js, the error might be either of:
+ // * Cannot read properties of null (reading 'proto')
+ // * Cannot read property 'proto' of null
+ (err as grpc.ServiceError).message.includes('Cannot read propert');
+ (err as grpc.ServiceError).message.includes('of null');
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.writeAtLeastOnce'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+
+ const errorMessage = firstSpan.status.message;
+ assert.ok(
+ errorMessage.includes(
+ "Cannot read properties of null (reading 'proto')",
+ ) || errorMessage.includes("Cannot read property 'proto' of null"),
+ );
+
+ // We expect an exception to have been caught as well as a Session event.
+ const expectedEventNames = ['Using Session', 'exception'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ }
+ });
+ });
+
+ describe('batchWriteAtLeastOnce', () => {
+ const mutationGroup1 = new MutationGroup();
+ mutationGroup1.insert('MyTable', {
+ Key: 'ks1',
+ Thing: 'abc',
+ });
+ const mutationGroup2 = new MutationGroup();
+ mutationGroup2.insert('MyTable', {
+ Key: 'ks2',
+ Thing: 'xyz',
+ });
+
+ const mutationGroups = [mutationGroup1, mutationGroup2];
+
+ let fakeSessionFactory: FakeSessionFactory;
+ let fakeSession: FakeSession;
+ let fakeDataStream: Transform;
+ let getSessionStub: sinon.SinonStub;
+
+ const options = {
+ requestOptions: {
+ transactionTag: 'batch-write-tag',
+ },
+ excludeTxnFromChangeStream: true,
+ gaxOptions: {autoPaginate: false},
+ } as BatchWriteOptions;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+ fakeSession = new FakeSession();
+ fakeDataStream = through.obj();
+
+ getSessionStub = (
+ sandbox.stub(
+ fakeSessionFactory,
+ 'getSessionForReadWrite',
+ ) as sinon.SinonStub
+ ).callsFake(callback => callback(null, fakeSession));
+
+ sandbox.stub(database, 'requestStream').returns(fakeDataStream);
+ });
+
+ it('on retry with "Session not found" error', done => {
+ const sessionNotFoundError = {
+ code: grpc.status.NOT_FOUND,
+ message: 'Session not found',
+ } as grpc.ServiceError;
+ let retryCount = 0;
+
+ database
+ .batchWriteAtLeastOnce(mutationGroups, options)
+ .on('data', () => {})
+ .on('error', err => {
+ assert.fail(err);
+ })
+ .on('end', () => {
+ assert.strictEqual(retryCount, 1);
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.batchWriteAtLeastOnce',
+ 'CloudSpanner.Database.batchWriteAtLeastOnce',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+
+ assert.deepStrictEqual(
+ firstSpan.status.message,
+ sessionNotFoundError.message,
+ );
+
+ // The last span should not have an error status.
+ const lastSpan = spans[spans.length - 1];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ lastSpan.status.code,
+ 'Unexpected span status',
+ );
+
+ assert.deepStrictEqual(lastSpan.status.message, undefined);
+
+ const expectedEventNames = [
+ 'Using Session',
+ 'No session available',
+ 'Using Session',
+ ];
+ assert.deepStrictEqual(actualEventNames, expectedEventNames);
+ assert.strictEqual(
+ spans[0].attributes['transaction.tag'],
+ 'batch-write-tag',
+ );
+ done();
+ });
+
+ fakeDataStream.emit('error', sessionNotFoundError);
+ retryCount++;
+ });
+
+ it('on getSession errors', done => {
+ const fakeError = new Error('err');
+
+ getSessionStub.callsFake(callback => callback(fakeError));
+ database
+ .batchWriteAtLeastOnce(mutationGroups, options)
+ .on('error', err => {
+ assert.strictEqual(err, fakeError);
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.batchWriteAtLeastOnce',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+
+ assert.deepStrictEqual(firstSpan.status.message, fakeError.message);
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(expectedEventNames, actualEventNames);
+
+ done();
+ });
+ });
+
+ it('with no errors', done => {
+ getSessionStub.callsFake(callback => callback(null, {}));
+ database
+ .batchWriteAtLeastOnce(mutationGroups, options)
+ .on('data', () => {})
+ .on('error', assert.ifError)
+ .on('end', () => {
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.batchWriteAtLeastOnce',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ const expectedEventNames = ['Using Session'];
+ assert.deepStrictEqual(actualEventNames, expectedEventNames);
+
+ done();
+ });
+
+ fakeDataStream.emit('data', 'response');
+ fakeDataStream.end('end');
+ });
+ });
+
+ describe('runTransaction', () => {
+ const SESSION = new FakeSession();
+ const TRANSACTION = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadWrite,
+ );
+
+ let fakeSessionFactory: FakeSessionFactory;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+
+ (
+ sandbox.stub(
+ fakeSessionFactory,
+ 'getSessionForReadWrite',
+ ) as sinon.SinonStub
+ ).callsFake(callback => {
+ callback(null, SESSION, TRANSACTION);
+ });
+ });
+
+ it('with error getting session', done => {
+ const fakeErr = new Error('getting a session');
+
+ (fakeSessionFactory.getSessionForReadWrite as sinon.SinonStub).callsFake(
+ callback => callback(fakeErr),
+ );
+
+ database.runTransaction(
+ {requestOptions: {transactionTag: 'transaction-tag'}},
+ err => {
+ assert.strictEqual(err, fakeErr);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.runTransaction'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'getting a session',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ assert.strictEqual(
+ spans[0].attributes['transaction.tag'],
+ 'transaction-tag',
+ );
+ done();
+ },
+ );
+ });
+
+ it('with other errors when running the transaction', done => {
+ const fakeError = new Error('internal rejects err');
+
+ sandbox.stub(FakeTransactionRunner.prototype, 'run').rejects(fakeError);
+
+ database.runTransaction(err => {
+ assert.strictEqual(err, fakeError);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.runTransaction'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'internal rejects err',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('runTransactionAsync', () => {
+ const SESSION = new FakeSession();
+ const TRANSACTION = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadWrite,
+ );
+
+ let fakeSessionFactory: FakeSessionFactory;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+ (
+ sandbox.stub(
+ fakeSessionFactory,
+ 'getSessionForReadWrite',
+ ) as sinon.SinonStub
+ ).callsFake(callback => {
+ callback(null, SESSION, TRANSACTION);
+ });
+ });
+
+ it('with no error', async () => {
+ const fakeValue = {};
+
+ sandbox
+ .stub(FakeAsyncTransactionRunner.prototype, 'run')
+ .resolves(fakeValue);
+
+ const value = await database.runTransactionAsync(
+ {requestOptions: {transactionTag: 'transaction-tag'}},
+ async txn => {
+ const result = await txn.run('SELECT 1');
+ await txn.commit();
+ return result;
+ },
+ );
+
+ assert.strictEqual(value, fakeValue);
+
+ await provider.forceFlush();
+ await traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.runTransactionAsync'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ const expectedEventNames = ['Using Session'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ assert.strictEqual(
+ spans[0].attributes['transaction.tag'],
+ 'transaction-tag',
+ );
+ });
+
+ it('with error', async () => {
+ const ourException = new Error('our thrown error');
+ sandbox
+ .stub(FakeAsyncTransactionRunner.prototype, 'run')
+ .throws(ourException);
+
+ await assert.rejects(async () => {
+ await database.runTransactionAsync(async txn => {
+ const result = await txn.run('SELECT 1');
+ await txn.commit();
+ return result;
+ });
+ }, ourException);
+
+ await provider.forceFlush();
+ await traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.runTransactionAsync'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ firstSpan.status.code,
+ SpanStatusCode.ERROR,
+ 'Unexpected span status',
+ );
+ assert.strictEqual(
+ firstSpan.status.message,
+ ourException.message,
+ 'Unexpected span status message',
+ );
+
+ const expectedEventNames = ['Using Session', 'exception'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ });
+ });
+
+ describe('runStream', () => {
+ const QUERY = {
+ sql: 'SELECT * FROM table',
+ a: 'b',
+ c: 'd',
+ requestOptions: {requestTag: 'request-tag'},
+ };
+ let fakeSessionFactory: FakeSessionFactory;
+ let fakeSession: FakeSession;
+ let fakeSession2: FakeSession;
+ let fakeSnapshot: FakeTransaction;
+ let fakeSnapshot2: FakeTransaction;
+ let fakeStream: Transform;
+ let fakeStream2: Transform;
+
+ let getSessionStub: sinon.SinonStub;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+ fakeSession = new FakeSession();
+ fakeSession2 = new FakeSession();
+ fakeSnapshot = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadOnly,
+ );
+ fakeSnapshot2 = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.ReadOnly,
+ );
+ fakeStream = through.obj();
+ fakeStream2 = through.obj();
+
+ getSessionStub = (
+ sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub
+ )
+ .onFirstCall()
+ .callsFake(callback => callback(null, fakeSession))
+ .onSecondCall()
+ .callsFake(callback => callback(null, fakeSession2));
+
+ sandbox.stub(fakeSession, 'snapshot').returns(fakeSnapshot);
+
+ sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2);
+
+ sandbox.stub(fakeSnapshot, 'runStream').returns(fakeStream);
+
+ sandbox.stub(fakeSnapshot2, 'runStream').returns(fakeStream2);
+
+ sandbox.stub(fakeSessionFactory, 'isMultiplexedEnabled').returns(false);
+ });
+
+ it('with error on `getSession`', done => {
+ const fakeError = new Error('getSession error');
+
+ getSessionStub.onFirstCall().callsFake(callback => callback(fakeError));
+
+ database.runStream(QUERY).on('error', err => {
+ assert.strictEqual(err, fakeError);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.runStream'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'getSession error',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // We don't expect events.
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ assert.strictEqual(spans[0].attributes['request.tag'], 'request-tag');
+ done();
+ });
+ });
+
+ it('propagation on stream/transaction errors', done => {
+ const fakeError = new Error('propagation err');
+ const endStub = sandbox.stub(fakeSnapshot, 'end');
+
+ database.runStream(QUERY).on('error', err => {
+ assert.strictEqual(err, fakeError);
+ assert.strictEqual(endStub.callCount, 1);
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'Exactly 1 span expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.Database.runStream'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.strictEqual(
+ 'propagation err',
+ firstSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ const expectedEventNames = ['Using Session'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+
+ fakeStream.destroy(fakeError);
+ });
+
+ it('retries with "Session not found" error', done => {
+ const sessionNotFoundError = {
+ code: grpc.status.NOT_FOUND,
+ message: 'Session not found',
+ } as grpc.ServiceError;
+ const endStub = sandbox.stub(fakeSnapshot, 'end');
+ const endStub2 = sandbox.stub(fakeSnapshot2, 'end');
+ let rows = 0;
+
+ database
+ .runStream(QUERY)
+ .on('data', () => rows++)
+ .on('error', err => {
+ assert.fail(err);
+ })
+ .on('end', async () => {
+ assert.strictEqual(endStub.callCount, 1);
+ assert.strictEqual(endStub2.callCount, 1);
+ assert.strictEqual(rows, 1);
+
+ await provider.forceFlush();
+ await traceExporter.forceFlush();
+
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 2, 'Exactly 2 spans expected');
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.runStream',
+ 'CloudSpanner.Database.runStream',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the span actually produced an error that was recorded.
+ const lastSpan = spans[0];
+ assert.deepStrictEqual(
+ SpanStatusCode.ERROR,
+ lastSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.deepStrictEqual(
+ 'Session not found',
+ lastSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const firstSpan = spans[1];
+ assert.deepStrictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.deepStrictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ const expectedEventNames = [
+ 'Using Session',
+ 'No session available',
+ 'Using Session',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+
+ fakeStream.emit('error', sessionNotFoundError);
+ fakeStream2.push('row1');
+ fakeStream2.push(null);
+ });
+ });
+
+ describe('runPartitionedUpdate', () => {
+ const QUERY = {
+ sql: 'INSERT INTO `MyTable` (Key, Thing) VALUES(@key, @thing)',
+ params: {
+ key: 'k999',
+ thing: 'abc',
+ },
+ requestOptions: {requestTag: 'request-tag'},
+ };
+
+ let fakeSessionFactory: FakeSessionFactory;
+ let fakeSession: FakeSession;
+ let fakePartitionedDml = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.PartitionedDml,
+ );
+
+ let getSessionStub;
+ let beginStub;
+
+ beforeEach(() => {
+ fakeSessionFactory = database.sessionFactory_;
+ fakeSession = new FakeSession();
+ fakePartitionedDml = new FakeTransaction(
+ {} as google.spanner.v1.TransactionOptions.PartitionedDml,
+ );
+
+ getSessionStub = (
+ sandbox.stub(
+ fakeSessionFactory,
+ 'getSessionForPartitionedOps',
+ ) as sinon.SinonStub
+ ).callsFake(callback => {
+ callback(null, fakeSession);
+ });
+
+ sandbox.stub(fakeSession, 'partitionedDml').returns(fakePartitionedDml);
+
+ beginStub = (
+ sandbox.stub(fakePartitionedDml, 'begin') as sinon.SinonStub
+ ).callsFake(callback => callback(null));
+
+ (
+ sandbox.stub(fakePartitionedDml, 'runUpdate') as sinon.SinonStub
+ ).callsFake((_, callback) => callback(null));
+ });
+
+ interface traceExportResults {
+ spanNames: string[];
+ spans: (typeof ReadableSpan)[];
+ eventNames: string[];
+ }
+
+ async function getTraceExportResults(): Promise {
+ await provider.forceFlush();
+ await traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ return Promise.resolve({
+ spanNames: actualSpanNames,
+ spans: spans,
+ eventNames: actualEventNames,
+ });
+ }
+
+ it('with pool errors', done => {
+ const fakeError = new Error('err');
+
+ getSessionStub.callsFake(callback => callback(fakeError));
+ database.runPartitionedUpdate(QUERY, async (err, rowCount) => {
+ assert.strictEqual(err, fakeError);
+ assert.strictEqual(rowCount, 0);
+
+ const exportResults = await getTraceExportResults();
+ const actualSpanNames = exportResults.spanNames;
+ const spans = exportResults.spans;
+ const actualEventNames = exportResults.eventNames;
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.runPartitionedUpdate',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the first span actually produced an error that was recorded.
+ const parentSpan = spans[0];
+ assert.deepStrictEqual(
+ SpanStatusCode.ERROR,
+ parentSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.deepStrictEqual(
+ fakeError.message,
+ parentSpan.status.message.toString(),
+ 'Mismatched span status message',
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ assert.strictEqual(spans[0].attributes['request.tag'], 'request-tag');
+ done();
+ });
+ });
+
+ it('with begin errors', done => {
+ const fakeError = new Error('err');
+
+ beginStub.callsFake(callback => callback(fakeError));
+
+ const releaseStub = (
+ sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub
+ ).withArgs(fakeSession);
+
+ database.runPartitionedUpdate(QUERY, async (err, rowCount) => {
+ assert.strictEqual(err, fakeError);
+ assert.strictEqual(rowCount, 0);
+ assert.strictEqual(releaseStub.callCount, 1);
+
+ const exportResults = await getTraceExportResults();
+ const actualSpanNames = exportResults.spanNames;
+ const spans = exportResults.spans;
+ const actualEventNames = exportResults.eventNames;
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.runPartitionedUpdate',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the first span actually produced an error that was recorded.
+ const parentSpan = spans[0];
+ assert.deepStrictEqual(
+ SpanStatusCode.ERROR,
+ parentSpan.status.code,
+ 'Expected an ERROR span status',
+ );
+ assert.deepStrictEqual(
+ fakeError.message,
+ parentSpan.status.message.toString(),
+ 'Mismatched span status message',
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ done();
+ });
+ });
+
+ it('session released on transaction end', done => {
+ const releaseStub = (
+ sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub
+ ).withArgs(fakeSession);
+
+ database.runPartitionedUpdate(QUERY, async () => {
+ const exportResults = await getTraceExportResults();
+ const actualSpanNames = exportResults.spanNames;
+ const spans = exportResults.spans;
+ const actualEventNames = exportResults.eventNames;
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.runPartitionedUpdate',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Ensure that the first span actually produced an error that was recorded.
+ const parentSpan = spans[0];
+ assert.deepStrictEqual(
+ SpanStatusCode.UNSET,
+ parentSpan.status.code,
+ 'Unexpected span status',
+ );
+ assert.deepStrictEqual(
+ undefined,
+ parentSpan.status.message,
+ 'Mismatched span status message',
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ done();
+ });
+
+ fakePartitionedDml.emit('end');
+ assert.strictEqual(releaseStub.callCount, 1);
+ });
+ });
+});
diff --git a/handwritten/spanner/observability-test/helper.ts b/handwritten/spanner/observability-test/helper.ts
new file mode 100644
index 00000000000..6ce1ba8b6a4
--- /dev/null
+++ b/handwritten/spanner/observability-test/helper.ts
@@ -0,0 +1,95 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {ContextManager, context} from '@opentelemetry/api';
+import * as assert from 'assert';
+const {ReadableSpan} = require('@opentelemetry/sdk-trace-base');
+import {SEMATTRS_DB_NAME} from '@opentelemetry/semantic-conventions';
+
+export const createSessionEvents = [
+ 'Requesting a multiplexed session',
+ 'Created a multiplexed session',
+];
+
+export const batchCreateSessionsEvents = [
+ 'Requesting 25 sessions',
+ 'Creating 25 sessions',
+ 'Requested for 25 sessions returned 25',
+];
+
+export const waitingSessionsEvents = [
+ 'Waiting for a multiplexed session to become available',
+ 'Acquired multiplexed session',
+ 'Using Session',
+];
+
+export const cacheSessionEvents = ['Cache hit: has usable multiplexed session'];
+
+/**
+ * This utility exists as a test helper because mocha has builtin "context"
+ * and referring to context causes type/value collision errors.
+ */
+export function setGlobalContextManager(manager: ContextManager) {
+ context.setGlobalContextManager(manager);
+}
+
+/**
+ * This utility exists as a test helper because mocha has builtin "context"
+ * and referring to context causes type/value collision errors.
+ */
+export function disableContextAndManager(manager: ContextManager) {
+ manager.disable();
+ context.disable();
+}
+
+export function generateWithAllSpansHaveDBName(dbName: String): Function {
+ return function (spans: (typeof ReadableSpan)[]) {
+ spans.forEach(span => {
+ assert.deepStrictEqual(
+ span.attributes[SEMATTRS_DB_NAME],
+ dbName,
+ `Span ${span.name} has mismatched DB_NAME`,
+ );
+ });
+ };
+}
+
+export async function verifySpansAndEvents(
+ traceExporter,
+ expectedSpans,
+ expectedEvents,
+) {
+ await traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ const actualEventNames: string[] = [];
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpans,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpans}`,
+ );
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEvents,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEvents}`,
+ );
+}
diff --git a/handwritten/spanner/observability-test/observability.ts b/handwritten/spanner/observability-test/observability.ts
new file mode 100644
index 00000000000..99e90bb0662
--- /dev/null
+++ b/handwritten/spanner/observability-test/observability.ts
@@ -0,0 +1,476 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const assert = require('assert');
+const {
+ AlwaysOffSampler,
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+const {SpanStatusCode, TracerProvider} = require('@opentelemetry/api');
+// eslint-disable-next-line n/no-extraneous-require
+const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base');
+const {
+ TRACER_NAME,
+ TRACER_VERSION,
+ SPAN_NAMESPACE_PREFIX,
+ getActiveOrNoopSpan,
+ setSpanError,
+ setSpanErrorAndException,
+ startTrace,
+} = require('../src/instrument');
+const {
+ ATTR_OTEL_SCOPE_NAME,
+ ATTR_OTEL_SCOPE_VERSION,
+ SEMATTRS_DB_NAME,
+ SEMATTRS_DB_SQL_TABLE,
+ SEMATTRS_DB_STATEMENT,
+ SEMATTRS_DB_SYSTEM,
+ SEMATTRS_EXCEPTION_MESSAGE,
+} = require('@opentelemetry/semantic-conventions');
+
+const {disableContextAndManager, setGlobalContextManager} = require('./helper');
+
+const {
+ AsyncHooksContextManager,
+} = require('@opentelemetry/context-async-hooks');
+
+describe('startTrace', () => {
+ const globalExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+
+ const globalProvider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: globalExporter,
+ spanProcessors: [new SimpleSpanProcessor(globalExporter)],
+ });
+ globalProvider.register();
+
+ const contextManager = new AsyncHooksContextManager();
+ setGlobalContextManager(contextManager);
+
+ afterEach(() => {
+ globalExporter.forceFlush();
+ });
+
+ after(async () => {
+ globalExporter.forceFlush();
+ await globalProvider.shutdown();
+ disableContextAndManager(contextManager);
+ });
+
+ it('with TracerProvider in global configuration', () => {
+ startTrace('mySpan', {}, span => {
+ span.end();
+
+ assert.equal(
+ span.name,
+ SPAN_NAMESPACE_PREFIX + '.mySpan',
+ 'name mismatch',
+ );
+ });
+ });
+
+ it('with TracerProvider in options, skips using global TracerProvider', () => {
+ const overridingExporter = new InMemorySpanExporter();
+ const overridingProvider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: overridingExporter,
+ spanProcessors: [new SimpleSpanProcessor(overridingExporter)],
+ });
+
+ startTrace(
+ 'aSpan',
+ {opts: {tracerProvider: overridingProvider}},
+ async span => {
+ await new Promise(resolve => setTimeout(resolve, 400));
+ span.end();
+
+ const gotSpansFromGlobal = globalExporter.getFinishedSpans();
+ assert.strictEqual(
+ gotSpansFromGlobal.length,
+ 0,
+ 'Expected no spans from the global tracerProvider and exporter but got ${gotSpansFromGlobal.length}',
+ );
+
+ const gotSpansFromCurrent = overridingExporter.getFinishedSpans();
+ assert.strictEqual(
+ gotSpansFromCurrent.length,
+ 1,
+ 'Expected exactly 1 span but got ${gotSpansFromCurrent.length}',
+ );
+
+ overridingExporter.forceFlush();
+ await overridingProvider.shutdown();
+ },
+ );
+ });
+
+ it('sanity check: TRACER_NAME, TRACER_VERSION', () => {
+ assert.equal(!TRACER_NAME, false, 'TRACER_NAME must be set');
+ assert.equal(!TRACER_VERSION, false, 'TRACER_VERSION must be set');
+ });
+
+ it('with semantic attributes', () => {
+ const opts = {
+ tableName: 'table',
+ dbName: 'projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID',
+ };
+ startTrace('aSpan', opts, span => {
+ assert.equal(
+ span.attributes[ATTR_OTEL_SCOPE_NAME],
+ TRACER_NAME,
+ 'Missing OTEL_SCOPE_NAME attribute',
+ );
+
+ assert.equal(
+ span.attributes[ATTR_OTEL_SCOPE_VERSION],
+ TRACER_VERSION,
+ 'Missing OTEL_SCOPE_VERSION attribute',
+ );
+
+ assert.equal(
+ span.attributes['gcp.client.service'],
+ 'spanner',
+ 'Missing gcp.client.service attribute',
+ );
+
+ assert.equal(
+ span.attributes['gcp.client.version'],
+ TRACER_VERSION,
+ 'Missing gcp.client.version attribute',
+ );
+
+ assert.equal(
+ span.attributes['gcp.client.repo'],
+ 'googleapis/nodejs-spanner',
+ 'Missing gcp.client.repo attribute',
+ );
+
+ assert.equal(
+ span.attributes['gcp.resource.name'],
+ '//spanner.googleapis.com/projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID',
+ 'Missing gcp.resource.name attribute',
+ );
+
+ assert.equal(
+ span.attributes[SEMATTRS_DB_SQL_TABLE],
+ 'table',
+ 'Missing DB_SQL_TABLE attribute',
+ );
+
+ assert.equal(
+ span.attributes[SEMATTRS_DB_NAME],
+ 'projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID',
+ 'Missing DB_NAME attribute',
+ );
+ });
+ });
+
+ it('with enableExtendedTracing=true, no sql value set', () => {
+ const opts = {opts: {enableExtendedTracing: true}};
+ startTrace('aSpan', opts, span => {
+ assert.equal(
+ span.attributes[SEMATTRS_DB_STATEMENT],
+ undefined,
+ 'Unexpected DB_STATEMENT attribute',
+ );
+ });
+ });
+
+ it('with enableExtendedTracing=true, sql string value set', () => {
+ const opts = {
+ opts: {enableExtendedTracing: true},
+ sql: 'SELECT CURRENT_TIMESTAMP()',
+ };
+
+ startTrace('aSpan', opts, span => {
+ assert.equal(
+ span.attributes[SEMATTRS_DB_STATEMENT],
+ 'SELECT CURRENT_TIMESTAMP()',
+ 'Mismatched DB_STATEMENT attribute',
+ );
+ });
+ });
+
+ it('with enableExtendedTracing=false, sql string value set', () => {
+ const opts = {
+ opts: {enableExtendedTracing: false},
+ sql: 'SELECt CURRENT_TIMESTAMP()',
+ };
+
+ startTrace('aSpan', opts, span => {
+ assert.equal(
+ span.attributes[SEMATTRS_DB_STATEMENT],
+ undefined,
+ 'Mismatched DB_STATEMENT attribute',
+ );
+ });
+ });
+
+ it('with enableExtendedTracing=true, sql ExecuteSqlRequest value set', () => {
+ const req = {sql: 'SELECT 1=1'};
+ const opts = {
+ opts: {enableExtendedTracing: true},
+ sql: req,
+ };
+
+ startTrace('aSpan', opts, span => {
+ assert.equal(
+ span.attributes[SEMATTRS_DB_STATEMENT],
+ 'SELECT 1=1',
+ 'Mismatched DB_STATEMENT attribute',
+ );
+ });
+ });
+
+ it('with enableExtendedTracing=false, sql ExecuteSqlRequest value set', () => {
+ const req = {sql: 'SELECT 1=1'};
+ const opts = {
+ opts: {enableExtendedTracing: true},
+ sql: req,
+ };
+
+ startTrace('aSpan', opts, span => {
+ assert.equal(
+ span.attributes[SEMATTRS_DB_STATEMENT],
+ req.sql,
+ 'Mismatched DB_STATEMENT attribute',
+ );
+ });
+ });
+
+ it('alwaysOffSampler used, no spans exported', () => {
+ const overridingExporter = new InMemorySpanExporter();
+ const overridingProvider = new NodeTracerProvider({
+ sampler: new AlwaysOffSampler(),
+ exporter: overridingExporter,
+ spanProcessors: [new SimpleSpanProcessor(overridingExporter)],
+ });
+ overridingProvider.register();
+
+ startTrace(
+ 'aSpan',
+ {opts: {tracerProvider: overridingProvider}},
+ async span => {
+ await new Promise(resolve => setTimeout(resolve, 400));
+ span.end();
+
+ const gotSpansFromGlobal = globalExporter.getFinishedSpans();
+ assert.strictEqual(
+ gotSpansFromGlobal.length,
+ 0,
+ 'Expected no spans but got ${gotSpansFromGlobal.length}',
+ );
+
+ const gotSpansFromCurrent = overridingExporter.getFinishedSpans();
+ assert.strictEqual(
+ gotSpansFromCurrent.length,
+ 0,
+ 'Expected no spans but got ${gotSpansFromCurrent.length}',
+ );
+
+ overridingExporter.forceFlush();
+ await overridingProvider.shutdown();
+ },
+ );
+ });
+});
+
+describe('getActiveOrNoopSpan', () => {
+ let globalProvider: typeof TracerProvider;
+ let exporter: typeof InMemorySpanExporter;
+
+ before(() => {
+ exporter = new InMemorySpanExporter();
+ globalProvider = new NodeTracerProvider({
+ sampler: new AlwaysOffSampler(),
+ exporter: exporter,
+ spanProcessors: [new SimpleSpanProcessor(exporter)],
+ });
+ globalProvider.register();
+ });
+
+ beforeEach(() => {
+ exporter.forceFlush();
+ });
+
+ after(async () => {
+ await globalProvider.shutdown();
+ });
+
+ it('with no value should return a noopSpan and nothing exported', () => {
+ const span = getActiveOrNoopSpan();
+ assert.strictEqual(!span, false, 'the span MUST not be null regardless');
+ span.updateName('aSpan should not crash');
+ span.setStatus({message: 'done here'});
+ });
+
+ it('with a started span should return the currently active one', () => {
+ startTrace('aSpan', {}, span => {
+ const activeSpan = getActiveOrNoopSpan();
+ assert.strictEqual(
+ span.name,
+ SPAN_NAMESPACE_PREFIX + '.aSpan',
+ 'names must match',
+ );
+ assert.strictEqual(
+ span.name,
+ activeSpan.name,
+ `names must match between activeSpan or current one\n\tGot: ${span.name}\n\tWant: ${activeSpan.name}`,
+ );
+ assert.strictEqual(
+ span.startTime,
+ activeSpan.startTime,
+ 'startTimes must match',
+ );
+ assert.ok(
+ span.duration,
+ undefined,
+ 'the unended span must have an undefined duration',
+ );
+ assert.ok(
+ activeSpan.duration,
+ undefined,
+ 'the unended span must have an undefined duration, got ${activeSpan.duration}',
+ );
+ assert.strictEqual(
+ span.duration,
+ activeSpan.duration,
+ 'durations must match',
+ );
+ span.end();
+ });
+ });
+});
+
+describe('setError', () => {
+ const exporter = new InMemorySpanExporter();
+ const provider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: exporter,
+ spanProcessors: [new SimpleSpanProcessor(exporter)],
+ });
+ provider.register();
+
+ const contextManager = new AsyncHooksContextManager();
+ setGlobalContextManager(contextManager);
+
+ afterEach(() => {
+ exporter.forceFlush();
+ });
+
+ after(async () => {
+ exporter.forceFlush();
+ await provider.shutdown();
+ disableContextAndManager(contextManager);
+ });
+
+ it('passing in null error or null span should have no effect', () => {
+ startTrace('aSpan', {opts: {tracerProvider: provider}}, span => {
+ const status1 = span.status;
+ let res = setSpanError(span, null);
+ assert.strictEqual(res, false, 'nothing was set');
+ const status2 = span.status;
+ assert.strictEqual(
+ status1,
+ status2,
+ 'setting null error should have no effect',
+ );
+
+ res = setSpanError(null, null);
+ assert.strictEqual(res, false, 'nothing was set');
+ });
+ });
+
+ it('a non-empty string should set the message', () => {
+ startTrace('aSpan', {opts: {tracerProvider: provider}}, span => {
+ const res = setSpanError(span, 'this one');
+ assert.strictEqual(res, true, 'value was set');
+ span.end();
+
+ const spans = exporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'exactly 1 span must be exported');
+
+ const expSpan = spans[0];
+ const status2 = expSpan.status;
+ assert.strictEqual(status2.message, 'this one');
+ assert.strictEqual(status2.code, SpanStatusCode.ERROR);
+ });
+ });
+});
+
+describe('setErrorAndException', () => {
+ const exporter = new InMemorySpanExporter();
+ const provider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: exporter,
+ spanProcessors: [new SimpleSpanProcessor(exporter)],
+ });
+ provider.register();
+
+ const contextManager = new AsyncHooksContextManager();
+ setGlobalContextManager(contextManager);
+
+ afterEach(() => {
+ exporter.forceFlush();
+ });
+
+ after(async () => {
+ await provider.shutdown();
+ disableContextAndManager(contextManager);
+ });
+
+ it('passing in null error or null span should have no effect', () => {
+ startTrace('aSpan', {opts: {tracerProvider: provider}}, span => {
+ const status1 = span.status;
+ let res = setSpanErrorAndException(span, null);
+ assert.strictEqual(res, false, 'nothing was set');
+ const status2 = span.status;
+ assert.strictEqual(
+ status1,
+ status2,
+ 'setting null error should have no effect',
+ );
+
+ res = setSpanErrorAndException(null, null);
+ assert.strictEqual(res, false, 'nothing was set');
+ });
+ });
+
+ it('a non-empty string should set the message', () => {
+ startTrace('aSpan', {opts: {tracerProvider: provider}}, span => {
+ const res = setSpanErrorAndException(span, 'this one');
+ assert.strictEqual(res, true, 'value was set');
+ span.end();
+
+ const spans = exporter.getFinishedSpans();
+ assert.strictEqual(spans.length, 1, 'exactly 1 span must be exported');
+
+ const expSpan = spans[0];
+ const status2 = expSpan.status;
+ assert.strictEqual(status2.message, 'this one');
+ assert.strictEqual(status2.code, SpanStatusCode.ERROR);
+
+ assert.strictEqual(
+ expSpan.events[0].attributes[SEMATTRS_EXCEPTION_MESSAGE],
+ 'this one',
+ 'the exception must have been recorded',
+ );
+ });
+ });
+});
diff --git a/handwritten/spanner/observability-test/session-pool.ts b/handwritten/spanner/observability-test/session-pool.ts
new file mode 100644
index 00000000000..8864e3993fd
--- /dev/null
+++ b/handwritten/spanner/observability-test/session-pool.ts
@@ -0,0 +1,218 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import {before, beforeEach, afterEach, describe, it} from 'mocha';
+import * as extend from 'extend';
+import PQueue from 'p-queue';
+import * as proxyquire from 'proxyquire';
+import * as sinon from 'sinon';
+import stackTrace = require('stack-trace');
+const {
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+// eslint-disable-next-line n/no-extraneous-require
+const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base');
+// eslint-disable-next-line n/no-extraneous-require
+const {SpanStatusCode} = require('@opentelemetry/api');
+
+import {Database} from '../src/database';
+import {Session} from '../src/session';
+import * as sp from '../src/session-pool';
+
+let pQueueOverride: typeof PQueue | null = null;
+
+function FakePQueue(options) {
+ return new (pQueueOverride || PQueue)(options);
+}
+
+FakePQueue.default = FakePQueue;
+
+class FakeTransaction {
+ options;
+ constructor(options?) {
+ this.options = options;
+ }
+ async begin(): Promise {}
+}
+
+const fakeStackTrace = extend({}, stackTrace);
+
+describe('SessionPool', () => {
+ let sessionPool: sp.SessionPool;
+ // tslint:disable-next-line variable-name
+ let SessionPool: typeof sp.SessionPool;
+
+ function noop() {}
+ const DATABASE = {
+ batchCreateSessions: noop,
+ databaseRole: 'parent_role',
+ } as unknown as Database;
+
+ const sandbox = sinon.createSandbox();
+ sandbox.stub().throws('Should not be called.');
+
+ const createSession = (name = 'id', props?): Session => {
+ props = props || {};
+
+ return Object.assign(new Session(DATABASE, name), props, {
+ create: sandbox.stub().resolves(),
+ delete: sandbox.stub().resolves(),
+ keepAlive: sandbox.stub().resolves(),
+ transaction: sandbox.stub().returns(new FakeTransaction()),
+ });
+ };
+
+ before(() => {
+ SessionPool = proxyquire('../src/session-pool.js', {
+ 'p-queue': FakePQueue,
+ 'stack-trace': fakeStackTrace,
+ }).SessionPool;
+ });
+
+ afterEach(() => {
+ pQueueOverride = null;
+ sandbox.restore();
+ });
+
+ const traceExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+ const provider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ beforeEach(() => {
+ DATABASE.session = createSession;
+ DATABASE._observabilityOptions = {
+ tracerProvider: provider,
+ };
+ sessionPool = new SessionPool(DATABASE);
+ sessionPool._observabilityOptions = DATABASE._observabilityOptions;
+ traceExporter.reset();
+ });
+
+ describe('_createSessions', () => {
+ const OPTIONS = 3;
+ it('on exception from Database.batchCreateSessions', async () => {
+ const ourException = new Error('this fails intentionally');
+ sandbox.stub(DATABASE, 'batchCreateSessions').throws(ourException);
+ sandbox.stub(sessionPool, 'release');
+
+ await assert.rejects(async () => {
+ await sessionPool._createSessions(OPTIONS);
+ }, ourException);
+
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.SessionPool.createSessions'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ 'Requesting 3 sessions',
+ 'Creating 3 sessions',
+ 'Requested for 3 sessions returned 0',
+ 'exception',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ ourException.message,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+ });
+
+ it('without error', async () => {
+ const RESPONSE = [[{}, {}, {}]];
+
+ sandbox.stub(DATABASE, 'batchCreateSessions').resolves(RESPONSE);
+ sandbox.stub(sessionPool, 'release');
+
+ await sessionPool._createSessions(OPTIONS);
+ assert.strictEqual(sessionPool.size, 3);
+
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = ['CloudSpanner.SessionPool.createSessions'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ 'Requesting 3 sessions',
+ 'Creating 3 sessions',
+ 'Requested for 3 sessions returned 3',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+ });
+ });
+});
diff --git a/handwritten/spanner/observability-test/spanner.ts b/handwritten/spanner/observability-test/spanner.ts
new file mode 100644
index 00000000000..d298c7d7dd3
--- /dev/null
+++ b/handwritten/spanner/observability-test/spanner.ts
@@ -0,0 +1,2027 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import {grpc} from 'google-gax';
+import {google} from '../protos/protos';
+import {Database, Instance, Spanner} from '../src';
+import {MutationSet} from '../src/transaction';
+import protobuf = google.spanner.v1;
+import v1 = google.spanner.v1;
+import * as mock from '../test/mockserver/mockspanner';
+import * as mockInstanceAdmin from '../test/mockserver/mockinstanceadmin';
+import * as mockDatabaseAdmin from '../test/mockserver/mockdatabaseadmin';
+import * as sinon from 'sinon';
+import {Row} from '../src/partial-result-stream';
+import {END_TO_END_TRACING_HEADER} from '../src/common';
+import {MetricsTracerFactory} from '../src/metrics/metrics-tracer-factory';
+const {
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+// eslint-disable-next-line n/no-extraneous-require
+const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base');
+const {SpanStatusCode} = require('@opentelemetry/api');
+const {
+ disableContextAndManager,
+ generateWithAllSpansHaveDBName,
+ setGlobalContextManager,
+ verifySpansAndEvents,
+ createSessionEvents,
+ waitingSessionsEvents,
+ cacheSessionEvents,
+} = require('./helper');
+const {
+ AsyncHooksContextManager,
+} = require('@opentelemetry/context-async-hooks');
+
+const {ObservabilityOptions} = require('../src/instrument');
+const selectSql = 'SELECT 1';
+const updateSql = 'UPDATE FOO SET BAR=1 WHERE BAZ=2';
+
+async function disableMetrics(sandbox?: sinon.SinonSandbox) {
+ if (sandbox) {
+ if (
+ Object.prototype.hasOwnProperty.call(
+ process.env,
+ 'SPANNER_DISABLE_BUILTIN_METRICS',
+ )
+ ) {
+ sandbox.replace(process.env, 'SPANNER_DISABLE_BUILTIN_METRICS', 'true');
+ } else {
+ sandbox.define(process.env, 'SPANNER_DISABLE_BUILTIN_METRICS', 'true');
+ }
+ }
+ await MetricsTracerFactory.resetInstance();
+ MetricsTracerFactory.enabled = false;
+}
+
+/** A simple result set for SELECT 1. */
+function createSelect1ResultSet(): protobuf.ResultSet {
+ const fields = [
+ protobuf.StructType.Field.create({
+ name: 'NUM',
+ type: protobuf.Type.create({code: protobuf.TypeCode.INT64}),
+ }),
+ ];
+ const metadata = new protobuf.ResultSetMetadata({
+ rowType: new protobuf.StructType({
+ fields,
+ }),
+ });
+ return protobuf.ResultSet.create({
+ metadata,
+ rows: [{values: [{stringValue: '1'}]}],
+ });
+}
+
+interface setupResults {
+ server: grpc.Server;
+ spanner: Spanner;
+ spannerMock: mock.MockSpanner;
+}
+
+async function setup(
+ observabilityOptions?: typeof ObservabilityOptions,
+ sandbox?: sinon.SinonSandbox,
+): Promise {
+ const server = new grpc.Server();
+ const spannerMock = mock.createMockSpanner(server);
+ mockInstanceAdmin.createMockInstanceAdmin(server);
+ mockDatabaseAdmin.createMockDatabaseAdmin(server);
+
+ const port: number = await new Promise((resolve, reject) => {
+ server.bindAsync(
+ '0.0.0.0:0',
+ grpc.ServerCredentials.createInsecure(),
+ (err, assignedPort) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(assignedPort);
+ }
+ },
+ );
+ });
+
+ spannerMock.putStatementResult(
+ selectSql,
+ mock.StatementResult.resultSet(createSelect1ResultSet()),
+ );
+ spannerMock.putStatementResult(
+ updateSql,
+ mock.StatementResult.updateCount(1),
+ );
+
+ await disableMetrics(sandbox);
+ const spanner = new Spanner({
+ projectId: 'observability-project-id',
+ servicePath: 'localhost',
+ port,
+ sslCreds: grpc.credentials.createInsecure(),
+ observabilityOptions: observabilityOptions,
+ });
+
+ return Promise.resolve({
+ spanner: spanner,
+ server: server,
+ spannerMock: spannerMock,
+ });
+}
+
+describe('EndToEnd', async () => {
+ const sandbox = sinon.createSandbox();
+ const contextManager = new AsyncHooksContextManager();
+ setGlobalContextManager(contextManager);
+ afterEach(() => {
+ disableContextAndManager(contextManager);
+ });
+
+ const traceExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+ const tracerProvider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ const setupResult = await setup(
+ {
+ tracerProvider: tracerProvider,
+ enableExtendedTracing: false,
+ },
+ sandbox,
+ );
+
+ const server = setupResult.server;
+ const spannerMock = setupResult.spannerMock;
+ const spanner = setupResult.spanner;
+ const instance = spanner.instance('instance');
+
+ after(async () => {
+ spanner.close();
+ await server.tryShutdown(() => {});
+ sandbox.restore();
+ });
+
+ afterEach(async () => {
+ await tracerProvider.forceFlush();
+ await traceExporter.reset();
+ spannerMock.resetRequests();
+ });
+
+ const database = instance.database('database');
+
+ beforeEach(async () => {
+ // To deflake expectations of session creation, let's
+ // issue out a warm-up request request that'll ensure
+ // that the MultiplexedSession is created deterministically.
+ await database.run('SELECT 1');
+ // Clear out any present traces to make a clean slate for testing.
+ traceExporter.forceFlush();
+ traceExporter.reset();
+ });
+
+ describe('Database', () => {
+ it('getSessions', async () => {
+ await database.getSessions();
+ const expectedSpanNames = ['CloudSpanner.Database.getSessions'];
+ const expectedEventNames = [];
+
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ });
+
+ it('getSnapshot', done => {
+ database.getSnapshot((err, transaction) => {
+ assert.ifError(err);
+
+ transaction!.run('SELECT 1', async err => {
+ assert.ifError(err);
+ transaction!.end();
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Database.getSnapshot',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ ];
+ const expectedEventNames = [
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ ...cacheSessionEvents,
+ 'Starting stream',
+ ];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ done();
+ });
+ });
+ });
+
+ it('getTransaction', done => {
+ database.getTransaction(async (err, transaction) => {
+ assert.ifError(err);
+ assert.ok(transaction);
+ transaction!.end();
+ void transaction!.commit();
+
+ const expectedSpanNames = ['CloudSpanner.Database.getTransaction'];
+ const expectedEventNames = [...cacheSessionEvents, 'Using Session'];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ done();
+ });
+ });
+
+ it('runStream', done => {
+ database
+ .runStream('SELECT 1')
+ .on('data', () => {})
+ .once('error', assert.ifError)
+ .on('end', async () => {
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ ];
+ const expectedEventNames = [
+ 'Starting stream',
+ ...cacheSessionEvents,
+ 'Using Session',
+ ];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+
+ done();
+ });
+ });
+
+ it('run', async () => {
+ await database.run('SELECT 1');
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ 'CloudSpanner.Database.run',
+ ];
+ const expectedEventNames = [
+ 'Starting stream',
+ ...cacheSessionEvents,
+ 'Using Session',
+ ];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ });
+
+ it('runTransaction', done => {
+ database.runTransaction(async (err, transaction) => {
+ assert.ifError(err);
+ await transaction!.run('SELECT 1');
+ await transaction!.commit();
+ await transaction!.end();
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Transaction.commit',
+ 'CloudSpanner.Database.runTransaction',
+ ];
+ const expectedEventNames = [
+ 'Starting stream',
+ 'Transaction Creation Done',
+ 'Starting Commit',
+ 'Commit Done',
+ ...cacheSessionEvents,
+ ];
+
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ done();
+ });
+ });
+
+ it('runTransactionAsync', async () => {
+ await database.runTransactionAsync(async transaction => {
+ await transaction!.run('SELECT 1');
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Database.runTransactionAsync',
+ ];
+ const expectedEventNames = [
+ 'Starting stream',
+ 'Transaction Creation Done',
+ ...cacheSessionEvents,
+ 'Using Session',
+ ];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ });
+
+ it('writeAtLeastOnce', done => {
+ const blankMutations = new MutationSet();
+ database.writeAtLeastOnce(blankMutations, async (err, response) => {
+ assert.ifError(err);
+ assert.ok(response);
+ const expectedSpanNames = [
+ 'CloudSpanner.Transaction.commit',
+ 'CloudSpanner.Database.writeAtLeastOnce',
+ ];
+ const expectedEventNames = [
+ 'Starting Commit',
+ 'Commit Done',
+ ...cacheSessionEvents,
+ 'Using Session',
+ ];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ done();
+ });
+ });
+
+ it('batchCreateSessions', done => {
+ database.batchCreateSessions(5, async err => {
+ assert.ifError(err);
+ const expectedSpanNames = ['CloudSpanner.Database.batchCreateSessions'];
+ const expectedEventNames = [];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ done();
+
+ it('runPartitionedUpdate', async () => {
+ await database.runPartitionedUpdate({
+ sql: updateSql,
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Dml.runUpdate',
+ 'CloudSpanner.PartitionedDml.runUpdate',
+ 'CloudSpanner.Database.runPartitionedUpdate',
+ ];
+ const expectedEventNames = [
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ 'Starting stream',
+ 'Acquiring session',
+ 'Cache hit: has usable session',
+ 'Acquired session',
+ ];
+ verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ });
+ });
+ });
+ });
+});
+
+describe('ObservabilityOptions injection and propagation', async () => {
+ let sandbox;
+
+ beforeEach(() => {
+ sandbox = sinon.createSandbox();
+ });
+
+ afterEach(() => {
+ sandbox.restore();
+ });
+
+ it('Passed into Spanner, Instance and Database', async () => {
+ const traceExporter = new InMemorySpanExporter();
+ const tracerProvider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ const observabilityOptions: typeof ObservabilityOptions = {
+ tracerProvider: tracerProvider,
+ enableExtendedTracing: true,
+ };
+
+ const setupResult = await setup(observabilityOptions, sandbox);
+ const spanner = setupResult.spanner;
+ const server = setupResult.server;
+ const spannerMock = setupResult.spannerMock;
+
+ after(async () => {
+ traceExporter.reset();
+ await tracerProvider.shutdown();
+ spannerMock.resetRequests();
+ spanner.close();
+ server.tryShutdown(() => {});
+ sandbox.restore();
+ });
+
+ // Ensure that the same observability configuration is set on the Spanner client.
+ assert.deepStrictEqual(spanner._observabilityOptions, observabilityOptions);
+
+ // Acquire a handle to the Instance through spanner.instance.
+ const instanceByHandle = spanner.instance('instance');
+ assert.deepStrictEqual(
+ instanceByHandle._observabilityOptions,
+ observabilityOptions,
+ );
+
+ // Create the Instance by means of a constructor directly.
+ const instanceByConstructor = new Instance(spanner, 'myInstance');
+ assert.deepStrictEqual(
+ instanceByConstructor._observabilityOptions,
+ observabilityOptions,
+ );
+
+ // Acquire a handle to the Database through instance.database.
+ const databaseByHandle = instanceByHandle.database('database');
+ assert.deepStrictEqual(
+ databaseByHandle._observabilityOptions,
+ observabilityOptions,
+ );
+
+ // Create the Database by means of a constructor directly.
+ const databaseByConstructor = new Database(
+ instanceByConstructor,
+ 'myDatabase',
+ );
+ assert.deepStrictEqual(
+ databaseByConstructor._observabilityOptions,
+ observabilityOptions,
+ );
+ });
+
+ describe('Transaction', async () => {
+ const traceExporter = new InMemorySpanExporter();
+ const tracerProvider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ const observabilityOptions: typeof ObservabilityOptions = {
+ tracerProvider: tracerProvider,
+ enableExtendedTracing: true,
+ };
+ const setupResult = await setup(observabilityOptions, sandbox);
+ const spanner = setupResult.spanner;
+ const server = setupResult.server;
+ const spannerMock = setupResult.spannerMock;
+
+ after(async () => {
+ traceExporter.reset();
+ await tracerProvider.shutdown();
+ spannerMock.resetRequests();
+ spanner.close();
+ server.tryShutdown(() => {});
+ });
+
+ let database: Database;
+ beforeEach(async () => {
+ const instance = spanner.instance('instance');
+ database = instance.database('database');
+
+ // To deflake expectations of session creation, let's
+ // issue out a warm-up request request that'll ensure
+ // that the MultiplexedSession is created deterministically.
+ await database.run('SELECT 1');
+ // Clear out any present traces to make a clean slate for testing.
+ traceExporter.forceFlush();
+ traceExporter.reset();
+ });
+
+ afterEach(() => {
+ spannerMock.resetRequests();
+ });
+
+ const db = spanner.instance('instance').database('database');
+ const withAllSpansHaveDBName = generateWithAllSpansHaveDBName(
+ db.formattedName_,
+ );
+
+ it('run', async () => {
+ let txn;
+ try {
+ [txn] = await database.getTransaction();
+ await txn.run('SELECT 1');
+ await tracerProvider.forceFlush();
+ traceExporter.forceFlush();
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.getTransaction',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ ...cacheSessionEvents,
+ 'Using Session',
+ 'Starting stream',
+ 'Transaction Creation Done',
+ ];
+ assert.strictEqual(
+ actualEventNames.every(value => expectedEventNames.includes(value)),
+ true,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ } catch (err) {
+ assert.ifError(err);
+ } finally {
+ txn.end();
+ }
+ });
+
+ it('Transaction.begin+Dml.runUpdate', done => {
+ database.getTransaction(async (err, tx) => {
+ assert.ifError(err);
+
+ // Firstly erase the prior spans so that we can have only Transaction spans.
+ traceExporter.reset();
+
+ await tx!.begin();
+ tx!.runUpdate(updateSql, async err => {
+ assert.ifError(err);
+ tx!.end();
+
+ await tracerProvider.forceFlush();
+ await traceExporter.forceFlush();
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Dml.runUpdate',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ 'Starting stream',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames.every(value => expectedEventNames.includes(value)),
+ true,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ it('runStream', done => {
+ let rowCount = 0;
+ database.getTransaction((err, tx) => {
+ assert.ifError(err);
+ tx!
+ .runStream(selectSql)
+ .on('data', () => rowCount++)
+ .on('error', assert.ifError)
+ .on('stats', () => {})
+ .on('end', async () => {
+ tx!.end();
+
+ await tracerProvider.forceFlush();
+ traceExporter.forceFlush();
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.getTransaction',
+ 'CloudSpanner.Snapshot.runStream',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ ...cacheSessionEvents,
+ 'Using Session',
+ 'Starting stream',
+ 'Transaction Creation Done',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+
+ it('rollback', done => {
+ database.getTransaction(async (err, tx) => {
+ assert.ifError(err);
+
+ // Firstly erase the prior spans so that we can have only Transaction spans.
+ traceExporter.reset();
+
+ await tx!.begin();
+
+ tx!.runUpdate(updateSql, async err => {
+ assert.ifError(err);
+ tx!.rollback(async () => {
+ tx!.end();
+ await tracerProvider.forceFlush();
+ traceExporter.forceFlush();
+
+ const spans = traceExporter.getFinishedSpans();
+ withAllSpansHaveDBName(spans);
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Dml.runUpdate',
+ 'CloudSpanner.Transaction.rollback',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ 'Starting stream',
+ ];
+ assert.strictEqual(
+ actualEventNames.every(value =>
+ expectedEventNames.includes(value),
+ ),
+ true,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ it('Propagates spans to the injected not global TracerProvider', async () => {
+ const globalTraceExporter = new InMemorySpanExporter();
+ const globalTracerProvider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: globalTraceExporter,
+ spanProcessors: [new SimpleSpanProcessor(globalTraceExporter)],
+ });
+ globalTracerProvider.register();
+
+ const injectedTraceExporter = new InMemorySpanExporter();
+ const injectedTracerProvider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: injectedTraceExporter,
+ spanProcessors: [new SimpleSpanProcessor(injectedTraceExporter)],
+ });
+
+ const observabilityOptions: typeof ObservabilityOptions = {
+ tracerProvider: injectedTracerProvider,
+ enableExtendedTracing: true,
+ };
+ const setupResult = await setup(observabilityOptions, sandbox);
+ const spanner = setupResult.spanner;
+ const server = setupResult.server;
+ const spannerMock = setupResult.spannerMock;
+
+ const instance = spanner.instance('instance');
+ const database = instance.database('database');
+
+ const withAllSpansHaveDBName = generateWithAllSpansHaveDBName(
+ database.formattedName_,
+ );
+
+ try {
+ await database.run('SELECT 1');
+ injectedTraceExporter.forceFlush();
+ globalTraceExporter.forceFlush();
+ const spansFromInjected = injectedTraceExporter.getFinishedSpans();
+ const spansFromGlobal = globalTraceExporter.getFinishedSpans();
+ assert.strictEqual(
+ spansFromGlobal.length,
+ 0,
+ 'Expecting no spans from the global exporter',
+ );
+ assert.strictEqual(
+ spansFromInjected.length > 0,
+ true,
+ 'Expecting spans from the injected exporter',
+ );
+
+ spansFromInjected.sort((spanA, spanB) => {
+ spanA.startTime < spanB.startTime;
+ });
+ withAllSpansHaveDBName(spansFromInjected);
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spansFromInjected.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ 'CloudSpanner.Database.run',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ ...waitingSessionsEvents,
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ } catch (err) {
+ assert.ifError(err);
+ } finally {
+ injectedTraceExporter.reset();
+ await injectedTracerProvider.shutdown();
+ spannerMock.resetRequests();
+ spanner.close();
+ server.tryShutdown(() => {});
+ }
+ });
+});
+
+describe('E2E traces with async/await', async () => {
+ let server: grpc.Server;
+ let spanner: Spanner;
+ let spannerMock: mock.MockSpanner;
+ let traceExporter: typeof InMemorySpanExporter;
+ let provider: typeof NodeTracerProvider;
+ let observabilityOptions: typeof ObservabilityOptions;
+ let sandbox;
+
+ beforeEach(async () => {
+ sandbox = sinon.createSandbox();
+ traceExporter = new InMemorySpanExporter();
+ provider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ observabilityOptions = {
+ tracerProvider: provider,
+ enableExtendedTracing: true,
+ };
+ const setupResult = await setup(observabilityOptions, sandbox);
+ spanner = setupResult.spanner;
+ server = setupResult.server;
+ spannerMock = setupResult.spannerMock;
+ });
+
+ afterEach(async () => {
+ traceExporter.reset();
+ provider.shutdown();
+ spannerMock.resetRequests();
+ spanner.close();
+ server.tryShutdown(() => {});
+ sandbox.restore();
+ });
+
+ function assertAsyncAwaitExpectations() {
+ // See https://github.com/googleapis/nodejs-spanner/issues/2146.
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ 'CloudSpanner.Database.run',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // We need to ensure a strict relationship between the spans.
+ // runSpan -------------------|
+ // |-runStream ----------|
+ const runStreamSpan = spans[spans.length - 2];
+ const runSpan = spans[spans.length - 1];
+ assert.ok(
+ runSpan.spanContext().traceId,
+ 'Expected that runSpan has a defined traceId',
+ );
+ assert.ok(
+ runStreamSpan.spanContext().traceId,
+ 'Expected that runStreamSpan has a defined traceId',
+ );
+ assert.deepStrictEqual(
+ runStreamSpan.parentSpanContext.spanId,
+ runSpan.spanContext().spanId,
+ `Expected that runSpan(spanId=${runSpan.spanContext().spanId}) is the parent to runStreamSpan(parentSpanId=${runStreamSpan.parentSpanContext.spanId})`,
+ );
+ assert.deepStrictEqual(
+ runSpan.spanContext().traceId,
+ runStreamSpan.spanContext().traceId,
+ 'Expected that both spans share a traceId',
+ );
+ assert.ok(
+ runStreamSpan.spanContext().spanId,
+ 'Expected that runStreamSpan has a defined spanId',
+ );
+ assert.ok(
+ runSpan.spanContext().spanId,
+ 'Expected that runSpan has a defined spanId',
+ );
+
+ const databaseCreateSessionSpan = spans[0];
+ assert.strictEqual(
+ databaseCreateSessionSpan.name,
+ 'CloudSpanner.Database.createSession',
+ );
+ const multiplexedSessionCreateSessionSpan = spans[1];
+ assert.strictEqual(
+ multiplexedSessionCreateSessionSpan.name,
+ 'CloudSpanner.MultiplexedSession.createSession',
+ );
+ assert.ok(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ 'Expecting a defined multiplexedSessionCreateSession traceId',
+ );
+ assert.deepStrictEqual(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ databaseCreateSessionSpan.spanContext().traceId,
+ 'Expected the same traceId',
+ );
+ assert.deepStrictEqual(
+ databaseCreateSessionSpan.parentSpanContext.spanId,
+ multiplexedSessionCreateSessionSpan.spanContext().spanId,
+ 'Expected that multiplexedSession.createSession is the parent to db.creassionSession',
+ );
+
+ // Assert that despite all being exported, MultiplexedSession.createSession
+ // is not in the same trace as runStream, createSessions is invoked at
+ // Spanner Client instantiation, thus before database.run is invoked.
+ assert.notEqual(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ runSpan.spanContext().traceId,
+ 'Did not expect the same traceId',
+ );
+
+ // Finally check for the collective expected event names.
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ ...waitingSessionsEvents,
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ }
+
+ it('async/await correctly parents trace spans', async () => {
+ // See https://github.com/googleapis/nodejs-spanner/issues/2146.
+ async function main() {
+ const instance = spanner.instance('testing');
+ const database = instance.database('db-1');
+
+ const query = {
+ sql: selectSql,
+ };
+
+ const [rows] = await database.run(query);
+
+ rows.forEach(row => {
+ row.toJSON();
+ });
+
+ provider.forceFlush();
+ }
+
+ await main();
+ assertAsyncAwaitExpectations();
+ });
+
+ it('callback correctly parents trace spans', done => {
+ function main(onComplete) {
+ const instance = spanner.instance('testing');
+ const database = instance.database('db-1');
+
+ const query = {
+ sql: selectSql,
+ };
+
+ database.run(query, (err, rows) => {
+ rows.forEach(row => {
+ row.toJSON();
+ });
+
+ provider.forceFlush();
+ onComplete();
+ });
+ }
+
+ main(() => {
+ assertAsyncAwaitExpectations();
+ done();
+ });
+ });
+});
+
+describe('Negative cases', async () => {
+ let server: grpc.Server;
+ let spanner: Spanner;
+ let spannerMock: mock.MockSpanner;
+ let traceExporter: typeof InMemorySpanExporter;
+ let provider: typeof NodeTracerProvider;
+ let observabilityOptions: typeof ObservabilityOptions;
+ let sandbox;
+
+ const selectSql1p = 'SELECT 1p';
+ const messageBadSelect1p = `Missing whitespace between literal and alias [at 1:9]
+SELECT 1p
+ ^`;
+ const insertAlreadyExistentDataSql =
+ "INSERT INTO Singers(firstName, SingerId) VALUES('Foo', 1)";
+ const messageBadInsertAlreadyExistent =
+ 'Failed to insert row with primary key ({pk#SingerId:1}) due to previously existing row';
+
+ beforeEach(async () => {
+ sandbox = sinon.createSandbox();
+ traceExporter = new InMemorySpanExporter();
+ provider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ observabilityOptions = {
+ tracerProvider: provider,
+ enableExtendedTracing: true,
+ };
+ const setupResult = await setup(observabilityOptions, sandbox);
+ spanner = setupResult.spanner;
+ server = setupResult.server;
+ spannerMock = setupResult.spannerMock;
+
+ const serverErr = {
+ message: messageBadSelect1p,
+ code: grpc.status.INVALID_ARGUMENT,
+ } as mock.MockError;
+ spannerMock.putStatementResult(
+ selectSql1p,
+ mock.StatementResult.error(serverErr),
+ );
+
+ const insertAlreadyExistentErr = {
+ message: messageBadInsertAlreadyExistent,
+ code: grpc.status.ALREADY_EXISTS,
+ } as mock.MockError;
+ spannerMock.putStatementResult(
+ insertAlreadyExistentDataSql,
+ mock.StatementResult.error(insertAlreadyExistentErr),
+ );
+ });
+
+ afterEach(async () => {
+ traceExporter.reset();
+ provider.shutdown();
+ spannerMock.resetRequests();
+ spanner.close();
+ server.tryShutdown(() => {});
+ sandbox.restore();
+ });
+
+ function assertRunBadSyntaxExpectations() {
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ spans.sort((spanA, spanB) => {
+ return spanA.startTime < spanB.startTime;
+ });
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ 'CloudSpanner.Database.run',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // We need to ensure a strict relationship between the spans.
+ // runSpan -------------------|
+ // |-runStream ----------|
+ const runStreamSpan = spans[spans.length - 2];
+ const runSpan = spans[spans.length - 1];
+ assert.ok(
+ runSpan.spanContext().traceId,
+ 'Expected that runSpan has a defined traceId',
+ );
+ assert.ok(
+ runStreamSpan.spanContext().traceId,
+ 'Expected that runStreamSpan has a defined traceId',
+ );
+ assert.deepStrictEqual(
+ runStreamSpan.parentSpanContext.spanId,
+ runSpan.spanContext().spanId,
+ `Expected that runSpan(spanId=${runSpan.spanContext().spanId}) is the parent to runStreamSpan(parentSpanId=${runStreamSpan.parentSpanId})`,
+ );
+ assert.deepStrictEqual(
+ runSpan.spanContext().traceId,
+ runStreamSpan.spanContext().traceId,
+ 'Expected that both spans share a traceId',
+ );
+ assert.ok(
+ runStreamSpan.spanContext().spanId,
+ 'Expected that runStreamSpan has a defined spanId',
+ );
+ assert.ok(
+ runSpan.spanContext().spanId,
+ 'Expected that runSpan has a defined spanId',
+ );
+
+ const databaseCreateSessionSpan = spans[0];
+ assert.strictEqual(
+ databaseCreateSessionSpan.name,
+ 'CloudSpanner.Database.createSession',
+ );
+ const multiplexedSessionCreateSessionSpan = spans[1];
+ assert.strictEqual(
+ multiplexedSessionCreateSessionSpan.name,
+ 'CloudSpanner.MultiplexedSession.createSession',
+ );
+ assert.ok(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ 'Expecting a defined multiplexedSessionCreateSession traceId',
+ );
+ assert.deepStrictEqual(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ databaseCreateSessionSpan.spanContext().traceId,
+ 'Expected the same traceId',
+ );
+ assert.deepStrictEqual(
+ databaseCreateSessionSpan.parentSpanContext.spanId,
+ multiplexedSessionCreateSessionSpan.spanContext().spanId,
+ 'Expected that multiplexedSession.createSession is the parent to db.creassionSession',
+ );
+
+ // Assert that despite all being exported, MultiplexedSession.createSession
+ // is not in the same trace as runStream, createSessions is invoked at
+ // Spanner Client instantiation, thus before database.run is invoked.
+ assert.notEqual(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ runSpan.spanContext().traceId,
+ 'Did not expect the same traceId',
+ );
+
+ // Ensure that the last span has an error.
+ assert.deepStrictEqual(
+ runStreamSpan.status.code,
+ SpanStatusCode.ERROR,
+ 'Expected an error status',
+ );
+
+ const want = '3 INVALID_ARGUMENT: ' + messageBadSelect1p;
+ assert.deepStrictEqual(
+ runStreamSpan.status.message,
+ want,
+ `Mismatched status message:\n\n\tGot: '${runStreamSpan.status.message}'\n\tWant: '${want}'`,
+ );
+
+ // Finally check for the collective expected event names.
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ ...waitingSessionsEvents,
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ }
+
+ it('database.run with bad syntax: async/await', async () => {
+ const instance = spanner.instance('instance');
+ const database = instance.database('database');
+
+ try {
+ await database.run(selectSql1p);
+ } catch (e) {
+ // This catch is meant to ensure that we
+ // can assert on the generated spans.
+ } finally {
+ provider.forceFlush();
+ }
+
+ assertRunBadSyntaxExpectations();
+ });
+
+ it('database.run with bad syntax: callback', done => {
+ const instance = spanner.instance('instance');
+ const database = instance.database('database');
+
+ database.run(selectSql1p, err => {
+ assert.ok(err);
+ provider.forceFlush();
+ assertRunBadSyntaxExpectations();
+ done();
+ });
+ });
+
+ function assertDatabaseRunPlusAwaitTransactionForAlreadyExistentData() {
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ spans.sort((spanA, spanB) => {
+ return spanA.startTime < spanB.startTime;
+ });
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Transaction.commit',
+ 'CloudSpanner.Transaction.commit',
+ 'CloudSpanner.Database.runTransactionAsync',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ const spanSnapshotRun = spans[3];
+ assert.strictEqual(spanSnapshotRun.name, 'CloudSpanner.Snapshot.run');
+ const wantSpanErr = '6 ALREADY_EXISTS: ' + messageBadInsertAlreadyExistent;
+ assert.deepStrictEqual(
+ spanSnapshotRun.status.code,
+ SpanStatusCode.ERROR,
+ 'Unexpected status code',
+ );
+ assert.deepStrictEqual(
+ spanSnapshotRun.status.message,
+ wantSpanErr,
+ 'Unexpexcted error message',
+ );
+
+ const databaseCreateSessionSpan = spans[0];
+ assert.strictEqual(
+ databaseCreateSessionSpan.name,
+ 'CloudSpanner.Database.createSession',
+ );
+ const multiplexedSessionCreateSessionSpan = spans[1];
+ assert.strictEqual(
+ multiplexedSessionCreateSessionSpan.name,
+ 'CloudSpanner.MultiplexedSession.createSession',
+ );
+ assert.ok(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ 'Expecting a defined multiplexedSessionCreateSession traceId',
+ );
+ assert.deepStrictEqual(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ databaseCreateSessionSpan.spanContext().traceId,
+ 'Expected the same traceId',
+ );
+ assert.deepStrictEqual(
+ databaseCreateSessionSpan.parentSpanContext.spanId,
+ multiplexedSessionCreateSessionSpan.spanContext().spanId,
+ 'Expected that multiplexedSession.createSession is the parent to db.creassionSession',
+ );
+
+ // We need to ensure a strict relationship between the spans.
+ // |-Database.runTransactionAsync |-------------------------------------|
+ // |-Snapshot.run |------------------------|
+ // |-Snapshot.runStream |---------------------|
+ // |-Transaction.commit |--------|
+ // |-Snapshot.begin |------|
+ // |-Snapshot.commit |-----|
+ const spanDatabaseRunTransactionAsync = spans[spans.length - 1];
+ assert.deepStrictEqual(
+ spanDatabaseRunTransactionAsync.name,
+ 'CloudSpanner.Database.runTransactionAsync',
+ `${actualSpanNames}`,
+ );
+ const spanTransactionCommit0 = spans[spans.length - 2];
+ assert.strictEqual(
+ spanTransactionCommit0.name,
+ 'CloudSpanner.Transaction.commit',
+ );
+ assert.deepStrictEqual(
+ spanTransactionCommit0.parentSpanContext.spanId,
+ spanDatabaseRunTransactionAsync.spanContext().spanId,
+ 'Expected that Database.runTransaction is the parent to Transaction.commmit',
+ );
+
+ assert.deepStrictEqual(
+ spanSnapshotRun.parentSpanContext.spanId,
+ spanDatabaseRunTransactionAsync.spanContext().spanId,
+ 'Expected that Database.runTransaction is the parent to Snapshot.run',
+ );
+
+ // Assert that despite all being exported, MultiplexedSession.createSessions
+ // is not in the same trace as runStream, createSessions is invoked at
+ // Spanner Client instantiation, thus before database.run is invoked.
+ assert.notEqual(
+ multiplexedSessionCreateSessionSpan.spanContext().traceId,
+ spanDatabaseRunTransactionAsync.spanContext().traceId,
+ 'Did not expect the same traceId',
+ );
+
+ // Finally check for the collective expected event names.
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ 'Stream broken. Safe to retry',
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ 'Starting Commit',
+ 'Commit Done',
+ ...waitingSessionsEvents,
+ 'exception',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ }
+
+ it('database.runTransaction with async/await for INSERT with existent data + transaction.commit', async () => {
+ const instance = spanner.instance('instance');
+ const database = instance.database('database');
+
+ const update = {
+ sql: insertAlreadyExistentDataSql,
+ };
+
+ try {
+ await database.runTransactionAsync(async transaction => {
+ try {
+ await transaction!.run(update);
+ } finally {
+ await transaction!.commit();
+ }
+ });
+ } catch (e) {
+ assert.strictEqual(
+ (e as grpc.ServiceError).code,
+ grpc.status.ALREADY_EXISTS,
+ );
+ }
+
+ provider.forceFlush();
+ assertDatabaseRunPlusAwaitTransactionForAlreadyExistentData();
+ });
+});
+
+describe('Traces for ExecuteStream broken stream retries', () => {
+ let sandbox: sinon.SinonSandbox;
+ const selectSql = 'SELECT NUM, NAME FROM NUMBERS';
+ const select1 = 'SELECT 1';
+ const invalidSql = 'SELECT * FROM FOO';
+ const insertSql = "INSERT INTO NUMBER (NUM, NAME) VALUES (4, 'Four')";
+ const selectAllTypes = 'SELECT * FROM TABLE_WITH_ALL_TYPES';
+ const insertSqlForAllTypes = `INSERT INTO TABLE_WITH_ALL_TYPES(
+ COLBOOL, COLINT64, COLFLOAT64, COLNUMERIC, COLSTRING, COLBYTES, COLJSON, COLDATE, COLTIMESTAMP
+ ) VALUES (
+ @bool, @int64, @float64, @numeric, @string, @bytes, @json, @date, @timestamp
+ )`;
+ const updateSql = "UPDATE NUMBER SET NAME='Unknown' WHERE NUM IN (5, 6)";
+ const fooNotFoundErr = Object.assign(new Error('Table FOO not found'), {
+ code: grpc.status.NOT_FOUND,
+ });
+ const server = new grpc.Server();
+ const spannerMock = mock.createMockSpanner(server);
+ mockInstanceAdmin.createMockInstanceAdmin(server);
+ mockDatabaseAdmin.createMockDatabaseAdmin(server);
+ let port: number;
+ let spanner: Spanner;
+ let instance: Instance;
+ let dbCounter = 1;
+
+ const traceExporter = new InMemorySpanExporter();
+ const tracerProvider = new NodeTracerProvider({
+ sampler: new AlwaysOnSampler(),
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ function newTestDatabase(): Database {
+ return instance.database(`database-${dbCounter++}`);
+ }
+
+ before(async () => {
+ sandbox = sinon.createSandbox();
+ port = await new Promise((resolve, reject) => {
+ server.bindAsync(
+ '0.0.0.0:0',
+ grpc.ServerCredentials.createInsecure(),
+ (err, assignedPort) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(assignedPort);
+ }
+ },
+ );
+ });
+ spannerMock.putStatementResult(
+ selectSql,
+ mock.StatementResult.resultSet(mock.createSimpleResultSet()),
+ );
+ spannerMock.putStatementResult(
+ select1,
+ mock.StatementResult.resultSet(mock.createSelect1ResultSet()),
+ );
+ spannerMock.putStatementResult(
+ selectAllTypes,
+ mock.StatementResult.resultSet(mock.createResultSetWithAllDataTypes()),
+ );
+ spannerMock.putStatementResult(
+ invalidSql,
+ mock.StatementResult.error(fooNotFoundErr),
+ );
+ spannerMock.putStatementResult(
+ insertSql,
+ mock.StatementResult.updateCount(1),
+ );
+ spannerMock.putStatementResult(
+ insertSqlForAllTypes,
+ mock.StatementResult.updateCount(1),
+ );
+ spannerMock.putStatementResult(
+ updateSql,
+ mock.StatementResult.updateCount(2),
+ );
+
+ const observabilityOptions: typeof ObservabilityOptions = {
+ tracerProvider: tracerProvider,
+ enableExtendedTracing: true,
+ };
+ spanner = new Spanner({
+ servicePath: 'localhost',
+ port,
+ sslCreds: grpc.credentials.createInsecure(),
+ observabilityOptions: observabilityOptions,
+ });
+ // Gets a reference to a Cloud Spanner instance and database
+ instance = spanner.instance('instance');
+ });
+
+ after(() => {
+ spanner.close();
+ server.tryShutdown(() => {});
+ sandbox.restore();
+ });
+
+ beforeEach(async () => {
+ spannerMock.resetRequests();
+ spannerMock.removeExecutionTimes();
+ await tracerProvider.forceFlush();
+ await traceExporter.forceFlush();
+ await traceExporter.reset();
+ });
+
+ describe('PartialResultStream', () => {
+ beforeEach(() => {
+ traceExporter.reset();
+ });
+ const streamIndexes = [1, 2];
+ streamIndexes.forEach(index => {
+ it('should retry UNAVAILABLE during streaming', async () => {
+ const database = newTestDatabase();
+ const err = {
+ message: 'Temporary unavailable',
+ code: grpc.status.UNAVAILABLE,
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ const [rows] = await database.run(selectSql);
+ assert.strictEqual(rows.length, 3);
+ await database.close();
+ });
+
+ it('should retry UNAVAILABLE during streaming with txn ID from inline begin response', async () => {
+ const err = {
+ message: 'Temporary unavailable',
+ code: grpc.status.UNAVAILABLE,
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ const database = newTestDatabase();
+
+ await database.runTransactionAsync(async tx => {
+ await tx.run(selectSql);
+ await tx.commit();
+ });
+ await database.close();
+
+ const requests = spannerMock
+ .getRequests()
+ .filter(val => (val as v1.ExecuteSqlRequest).sql)
+ .map(req => req as v1.ExecuteSqlRequest);
+ assert.strictEqual(requests.length, 2);
+ assert.ok(
+ requests[0].transaction?.begin!.readWrite,
+ 'inline txn is not set.',
+ );
+ assert.ok(
+ requests[1].transaction!.id,
+ 'Transaction ID is not used for retries.',
+ );
+ assert.ok(
+ requests[1].resumeToken,
+ 'Resume token is not set for the retried',
+ );
+ });
+
+ it('should retry UNAVAILABLE during streaming with txn ID from inline begin response with parallel queries', async () => {
+ const err = {
+ message: 'Temporary unavailable',
+ code: grpc.status.UNAVAILABLE,
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ const database = newTestDatabase();
+
+ await database.runTransactionAsync(async tx => {
+ const [rows1, rows2] = await Promise.all([
+ tx!.run(selectSql),
+ tx!.run(selectSql),
+ ]);
+ assert.equal(rows1.length, 3);
+ assert.equal(rows2.length, 3);
+ await tx.commit();
+ });
+ await database.close();
+
+ const requests = spannerMock
+ .getRequests()
+ .filter(val => (val as v1.ExecuteSqlRequest).sql)
+ .map(req => req as v1.ExecuteSqlRequest);
+ assert.strictEqual(requests.length, 3);
+ assert.ok(
+ requests[0].transaction?.begin!.readWrite,
+ 'inline txn is not set.',
+ );
+ assert.ok(
+ requests[1].transaction!.id,
+ 'Transaction ID is not used for retries.',
+ );
+ assert.ok(
+ requests[1].resumeToken,
+ 'Resume token is not set for the retried',
+ );
+ const commitRequests = spannerMock
+ .getRequests()
+ .filter(val => (val as v1.CommitRequest).mutations)
+ .map(req => req as v1.CommitRequest);
+ assert.strictEqual(commitRequests.length, 1);
+ assert.deepStrictEqual(
+ requests[1].transaction!.id,
+ requests[2].transaction!.id,
+ );
+ assert.deepStrictEqual(
+ requests[1].transaction!.id,
+ commitRequests[0].transactionId,
+ );
+ const beginTxnRequests = spannerMock
+ .getRequests()
+ .filter(val => (val as v1.BeginTransactionRequest).options?.readWrite)
+ .map(req => req as v1.BeginTransactionRequest);
+ assert.deepStrictEqual(beginTxnRequests.length, 0);
+ });
+
+ it('should not retry non-retryable error during streaming', async () => {
+ const database = newTestDatabase();
+ const err = {
+ message: 'Test error',
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ try {
+ await database.run(selectSql);
+ assert.fail('missing expected error');
+ } catch (e) {
+ assert.strictEqual(
+ (e as grpc.ServiceError).message,
+ '2 UNKNOWN: Test error',
+ );
+ }
+ await database.close();
+ });
+
+ it('should retry UNAVAILABLE during streaming with a callback', done => {
+ const database = newTestDatabase();
+ const err = {
+ message: 'Temporary unavailable',
+ code: grpc.status.UNAVAILABLE,
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ database.run(selectSql, (err, rows) => {
+ assert.ifError(err);
+ assert.strictEqual(rows!.length, 3);
+ database
+ .close()
+ .then(() => done())
+ .catch(err => done(err));
+ });
+ });
+
+ it('should not retry non-retryable error during streaming with a callback', done => {
+ const database = newTestDatabase();
+ const err = {
+ message: 'Non-retryable error',
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ database.run(selectSql, err => {
+ assert.ok(err, 'Missing expected error');
+ assert.strictEqual(err!.message, '2 UNKNOWN: Non-retryable error');
+ database
+ .close()
+ .then(() => done())
+ .catch(err => done(err));
+ });
+ });
+
+ it('should emit non-retryable error during streaming to stream', done => {
+ const database = newTestDatabase();
+
+ const err = {
+ message: 'Non-retryable error',
+ streamIndex: index,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ const receivedRows: Row[] = [];
+ database
+ .runStream(selectSql)
+ // We will receive data for the partial result sets that are
+ // returned before the error occurs.
+ .on('data', row => {
+ receivedRows.push(row);
+ })
+ .on('end', () => {
+ assert.fail('Missing expected error');
+ })
+ .on('error', err => {
+ assert.strictEqual(err.message, '2 UNKNOWN: Non-retryable error');
+ database
+ .close()
+ .then(() => {
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ spans.sort((spanA, spanB) => {
+ return spanA.startTime < spanB.startTime;
+ });
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Finally check for the collective expected event names.
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ 'Transaction Creation Done',
+ ...waitingSessionsEvents,
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ })
+ .catch(err => done(err));
+ });
+ });
+ });
+ });
+
+ it('should retry UNAVAILABLE from executeStreamingSql with multiple errors during streaming', async () => {
+ const database = newTestDatabase();
+ const errors: mock.MockError[] = [];
+ for (const index of [0, 1, 1, 2, 2]) {
+ errors.push({
+ message: 'Temporary unavailable',
+ code: grpc.status.UNAVAILABLE,
+ streamIndex: index,
+ } as mock.MockError);
+ }
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofErrors(errors),
+ );
+ const [rows] = await database.run(selectSql);
+ assert.strictEqual(rows.length, 3);
+ await database.close();
+
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ spans.sort((spanA, spanB) => {
+ return spanA.startTime < spanB.startTime;
+ });
+
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Database.runStream',
+ 'CloudSpanner.Database.run',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Finally check for the collective expected event names.
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ 'Re-attempting start stream',
+ 'Resuming stream',
+ 'Resuming stream',
+ 'Resuming stream',
+ 'Resuming stream',
+ ...waitingSessionsEvents,
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ });
+
+ it('should retry UNAVAILABLE on update', async () => {
+ const database = newTestDatabase();
+ const err = {
+ message: 'Temporary unavailable',
+ code: grpc.status.UNAVAILABLE,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+
+ await database.runTransactionAsync(async tx => {
+ const [updateCount] = await tx!.runUpdate(insertSql);
+ assert.strictEqual(updateCount, 1);
+ await tx!.commit();
+ });
+ await database.close();
+
+ // The span for a successful invocation of database.runTransaction
+ // can only be ended after the calling function is completed.
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ const actualSpanNames: string[] = [];
+ const actualEventNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ span.events.forEach(event => {
+ actualEventNames.push(event.name);
+ });
+ });
+
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Snapshot.runStream',
+ 'CloudSpanner.Snapshot.run',
+ 'CloudSpanner.Dml.runUpdate',
+ 'CloudSpanner.Snapshot.begin',
+ 'CloudSpanner.Transaction.commit',
+ 'CloudSpanner.Transaction.commit',
+ 'CloudSpanner.Database.runTransactionAsync',
+ ];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ // Finally check for the collective expected event names.
+ const expectedEventNames = [
+ ...createSessionEvents,
+ 'Starting stream',
+ 'Re-attempting start stream',
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ 'Starting Commit',
+ 'Commit Done',
+ ...waitingSessionsEvents,
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+ });
+
+ it('should not retry non-retryable error on update', async () => {
+ const database = newTestDatabase();
+ const err = {
+ message: 'Permanent error',
+ // We need to specify a non-retryable error code to prevent the entire
+ // transaction to retry. Not specifying an error code, will result in
+ // an error with code UNKNOWN, which again will retry the transaction.
+ code: grpc.status.INVALID_ARGUMENT,
+ } as mock.MockError;
+ spannerMock.setExecutionTime(
+ spannerMock.executeStreamingSql,
+ mock.SimulatedExecutionTime.ofError(err),
+ );
+ let attempts = 0;
+
+ await database.runTransactionAsync(async tx => {
+ attempts++;
+ await tx!.runUpdate(insertSql, err => {
+ assert.ok(err, 'Missing expected error');
+ assert.strictEqual(err!.code, grpc.status.INVALID_ARGUMENT);
+ assert.strictEqual(attempts, 1);
+ tx!
+ .commit()
+ .then(() => {
+ database.close().catch(assert.ifError);
+ })
+ .catch(assert.ifError);
+ });
+ });
+ assert.deepStrictEqual(
+ attempts,
+ 1,
+ 'runTransactionAsync.attempt must be 1',
+ );
+ const expectedSpanNames = [
+ 'CloudSpanner.Database.createSession',
+ 'CloudSpanner.MultiplexedSession.createSession',
+ 'CloudSpanner.Database.runTransactionAsync',
+ ];
+
+ const expectedEventNames = [
+ ...createSessionEvents,
+ ...waitingSessionsEvents,
+ ];
+ await verifySpansAndEvents(
+ traceExporter,
+ expectedSpanNames,
+ expectedEventNames,
+ );
+ });
+});
+
+describe('End to end tracing headers', () => {
+ let sandbox;
+ let server: grpc.Server;
+ let spanner: Spanner;
+ let spannerMock: mock.MockSpanner;
+ let observabilityOptions: typeof ObservabilityOptions;
+
+ beforeEach(async () => {
+ sandbox = sinon.createSandbox();
+ observabilityOptions = {
+ enableEndToEndTracing: true,
+ };
+
+ const setupResult = await setup(observabilityOptions, sandbox);
+ spanner = setupResult.spanner;
+ server = setupResult.server;
+ spannerMock = setupResult.spannerMock;
+ });
+
+ afterEach(async () => {
+ spannerMock.resetRequests();
+ spanner.close();
+ server.tryShutdown(() => {});
+ sandbox.restore();
+ });
+
+ it('run', async () => {
+ const instance = spanner.instance('instance');
+ const database = instance.database('database');
+ let txn;
+ try {
+ [txn] = await database.getTransaction();
+ await txn.run('SELECT 1');
+ let metadataCountWithE2EHeader = 0;
+ let metadataCountWithTraceParent = 0;
+ spannerMock.getMetadata().forEach(metadata => {
+ if (metadata.get(END_TO_END_TRACING_HEADER)[0] !== undefined) {
+ metadataCountWithE2EHeader++;
+ assert.strictEqual(
+ metadata.get(END_TO_END_TRACING_HEADER)[0],
+ 'true',
+ );
+ }
+ if (metadata.get('traceparent')[0] !== undefined) {
+ metadataCountWithTraceParent++;
+ }
+ });
+
+ // Create Session for multiplexed session(default) and Select 1 request.
+ assert.strictEqual(spannerMock.getRequests().length, 2);
+ assert.strictEqual(metadataCountWithE2EHeader, 2);
+ assert.strictEqual(metadataCountWithTraceParent, 2);
+ } catch (err) {
+ assert.ifError(err);
+ } finally {
+ txn.end();
+ }
+ });
+});
diff --git a/handwritten/spanner/observability-test/table.ts b/handwritten/spanner/observability-test/table.ts
new file mode 100644
index 00000000000..26db0ff42b2
--- /dev/null
+++ b/handwritten/spanner/observability-test/table.ts
@@ -0,0 +1,338 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as pfy from '@google-cloud/promisify';
+import * as assert from 'assert';
+import {before, beforeEach, afterEach, describe, it} from 'mocha';
+import * as extend from 'extend';
+import * as proxyquire from 'proxyquire';
+import * as sinon from 'sinon';
+import * as through from 'through2';
+
+const {
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+import {SpanStatusCode} from '@opentelemetry/api';
+
+// eslint-disable-next-line n/no-extraneous-require
+const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base');
+
+const fakePfy = extend({}, pfy, {
+ promisifyAll(klass, options) {
+ if (klass.name !== 'Table') {
+ return;
+ }
+ assert.deepStrictEqual(options.exclude, ['delete', 'drop']);
+ },
+});
+
+class FakeTransaction {
+ commit(gaxOptions, callback) {
+ callback(null, {});
+ }
+ createReadStream() {
+ return through.obj();
+ }
+ deleteRows() {}
+ end() {}
+ insert() {}
+ replace() {}
+ upsert() {}
+ update() {}
+}
+
+describe('Table', () => {
+ const sandbox = sinon.createSandbox();
+
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ let Table: any;
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ let TableCached: any;
+ let table;
+ let transaction: FakeTransaction;
+
+ const DATABASE = {
+ formattedName_: 'formatted-db-name',
+ runTransaction: (opts, callback) => callback(null, transaction),
+ getSnapshot: (options, callback) => callback(null, transaction),
+ };
+
+ const traceExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+ const provider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ const NAME = 'table-name';
+
+ const ROW = {};
+
+ const mutateRowsOptions = {
+ requestOptions: {transactionTag: 'transaction-tag'},
+ };
+
+ before(() => {
+ Table = proxyquire('../src/table.js', {
+ '@google-cloud/promisify': fakePfy,
+ }).Table;
+ TableCached = extend({}, Table);
+ });
+
+ beforeEach(() => {
+ extend(Table, TableCached);
+ table = new Table(DATABASE, NAME);
+ transaction = new FakeTransaction();
+ table._observabilityOptions = {tracerProvider: provider};
+ });
+
+ afterEach(() => {
+ sandbox.restore();
+ traceExporter.reset();
+ });
+
+ function getExportedSpans(minCount: number) {
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+ assert.strictEqual(
+ spans.length >= minCount,
+ true,
+ `at least ${minCount} spans expected`,
+ );
+
+ // Sort the spans by duration.
+ spans.sort((spanA, spanB) => {
+ spanA.duration < spanB.duration;
+ });
+
+ return spans;
+ }
+
+ function spanNames(spans) {
+ const actualSpanNames: string[] = [];
+ spans.forEach(span => {
+ actualSpanNames.push(span.name);
+ });
+ return actualSpanNames;
+ }
+
+ function verifySpanAttributes(span) {
+ const attributes = span.attributes;
+ assert.strictEqual(attributes['transaction.tag'], 'transaction-tag');
+ assert.strictEqual(attributes['db.sql.table'], 'table-name');
+ assert.strictEqual(attributes['db.name'], 'formatted-db-name');
+ }
+
+ it('deleteRows', done => {
+ const KEYS = ['key'];
+ const stub = (
+ sandbox.stub(transaction, 'deleteRows') as sinon.SinonStub
+ ).withArgs(table.name, KEYS);
+
+ sandbox.stub(transaction, 'commit').callsFake((opts, callback) => {
+ callback();
+ });
+
+ table.deleteRows(KEYS, mutateRowsOptions, err => {
+ assert.ifError(err);
+ assert.strictEqual(stub.callCount, 1);
+ const spans = getExportedSpans(1);
+ const actualSpanNames = spanNames(spans);
+ const expectedSpanNames = ['CloudSpanner.Table.deleteRows'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ verifySpanAttributes(spans[0]);
+ done();
+ });
+ });
+
+ it('insert', done => {
+ const stub = (
+ sandbox.stub(transaction, 'insert') as sinon.SinonStub
+ ).withArgs(table.name, ROW);
+
+ table.insert(ROW, mutateRowsOptions, err => {
+ assert.ifError(err);
+ assert.strictEqual(stub.callCount, 1);
+ const spans = getExportedSpans(1);
+ const actualSpanNames = spanNames(spans);
+ const expectedSpanNames = ['CloudSpanner.Table.insert'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ verifySpanAttributes(spans[0]);
+ done();
+ });
+ });
+
+ it('insert with an error', done => {
+ const fakeError = new Error('err');
+ sandbox
+ .stub(DATABASE, 'runTransaction')
+ .callsFake((opts, callback) => callback(fakeError));
+
+ table.insert(ROW, mutateRowsOptions, err => {
+ assert.strictEqual(err, fakeError);
+
+ const gotSpans = getExportedSpans(1);
+ const gotSpanStatus = gotSpans[0].status;
+ const wantSpanStatus = {
+ code: SpanStatusCode.ERROR,
+ message: fakeError.message,
+ };
+ assert.deepStrictEqual(
+ gotSpanStatus,
+ wantSpanStatus,
+ `mismatch in span status:\n\tGot: ${JSON.stringify(gotSpanStatus)}\n\tWant: ${JSON.stringify(wantSpanStatus)}`,
+ );
+
+ const actualSpanNames = spanNames(gotSpans);
+ const expectedSpanNames = ['CloudSpanner.Table.insert'];
+
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ verifySpanAttributes(gotSpans[0]);
+ done();
+ });
+ });
+
+ it('upsert', done => {
+ const stub = (
+ sandbox.stub(transaction, 'upsert') as sinon.SinonStub
+ ).withArgs(table.name, ROW);
+
+ table.upsert(ROW, mutateRowsOptions, err => {
+ assert.ifError(err);
+ assert.strictEqual(stub.callCount, 1);
+
+ const gotSpans = getExportedSpans(1);
+
+ const actualSpanNames = spanNames(gotSpans);
+ const expectedSpanNames = ['CloudSpanner.Table.upsert'];
+
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ verifySpanAttributes(gotSpans[0]);
+ done();
+ });
+ });
+
+ it('upsert with an error', done => {
+ const fakeError = new Error('err');
+ sandbox
+ .stub(DATABASE, 'runTransaction')
+ .callsFake((opts, callback) => callback(fakeError));
+
+ table.upsert(ROW, mutateRowsOptions, err => {
+ assert.strictEqual(err, fakeError);
+
+ const gotSpans = getExportedSpans(1);
+
+ const gotSpanStatus = gotSpans[0].status;
+ const wantSpanStatus = {
+ code: SpanStatusCode.ERROR,
+ message: fakeError.message,
+ };
+ assert.deepStrictEqual(
+ gotSpanStatus,
+ wantSpanStatus,
+ `mismatch in span status:\n\tGot: ${JSON.stringify(gotSpanStatus)}\n\tWant: ${JSON.stringify(wantSpanStatus)}`,
+ );
+
+ const actualSpanNames = spanNames(gotSpans);
+ const expectedSpanNames = ['CloudSpanner.Table.upsert'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ verifySpanAttributes[gotSpans[0]];
+ done();
+ });
+ });
+
+ it('replace', done => {
+ const stub = (
+ sandbox.stub(transaction, 'replace') as sinon.SinonStub
+ ).withArgs(table.name, ROW);
+
+ table.replace(ROW, mutateRowsOptions, err => {
+ assert.ifError(err);
+ assert.strictEqual(stub.callCount, 1);
+
+ const gotSpans = getExportedSpans(1);
+
+ const actualSpanNames = spanNames(gotSpans);
+ const expectedSpanNames = ['CloudSpanner.Table.replace'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ verifySpanAttributes(gotSpans[0]);
+ done();
+ });
+ });
+
+ it('replace with an error', done => {
+ const fakeError = new Error('err');
+ sandbox
+ .stub(DATABASE, 'runTransaction')
+ .callsFake((opts, callback) => callback(fakeError));
+
+ table.replace(ROW, mutateRowsOptions, err => {
+ assert.strictEqual(err, fakeError);
+ const gotSpans = getExportedSpans(1);
+ const gotSpanStatus = gotSpans[0].status;
+ const wantSpanStatus = {
+ code: SpanStatusCode.ERROR,
+ message: fakeError.message,
+ };
+ assert.deepStrictEqual(
+ gotSpanStatus,
+ wantSpanStatus,
+ `mismatch in span status:\n\tGot: ${JSON.stringify(gotSpanStatus)}\n\tWant: ${JSON.stringify(wantSpanStatus)}`,
+ );
+
+ const actualSpanNames = spanNames(gotSpans);
+ const expectedSpanNames = ['CloudSpanner.Table.replace'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+ verifySpanAttributes(gotSpans[0]);
+
+ done();
+ });
+ });
+});
diff --git a/handwritten/spanner/observability-test/transaction.ts b/handwritten/spanner/observability-test/transaction.ts
new file mode 100644
index 00000000000..c89c1b0a925
--- /dev/null
+++ b/handwritten/spanner/observability-test/transaction.ts
@@ -0,0 +1,782 @@
+/*!
+ * Copyright 2024 Google LLC. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import {before, beforeEach, afterEach, describe, it} from 'mocha';
+import {EventEmitter} from 'events';
+import * as proxyquire from 'proxyquire';
+import * as sinon from 'sinon';
+
+import {codec} from '../src/codec';
+const {
+ AlwaysOnSampler,
+ NodeTracerProvider,
+ InMemorySpanExporter,
+} = require('@opentelemetry/sdk-trace-node');
+// eslint-disable-next-line n/no-extraneous-require
+const {SpanStatusCode} = require('@opentelemetry/api');
+const {
+ ReadableSpan,
+ SimpleSpanProcessor,
+} = require('@opentelemetry/sdk-trace-base');
+const {generateWithAllSpansHaveDBName} = require('./helper');
+import {ExecuteSqlRequest, ReadRequest} from '../src/transaction';
+
+describe('Transaction', () => {
+ const sandbox = sinon.createSandbox();
+
+ const REQUEST = sandbox.stub();
+ const REQUEST_STREAM = sandbox.stub();
+ const SESSION_NAME = 'session-123';
+
+ const SPANNER = {
+ routeToLeaderEnabled: true,
+ directedReadOptions: {},
+ };
+
+ const INSTANCE = {
+ parent: SPANNER,
+ };
+
+ const DATABASE = {
+ formattedName_: 'formatted-database-name',
+ parent: INSTANCE,
+ };
+
+ const withAllSpansHaveDBName = generateWithAllSpansHaveDBName(
+ DATABASE.formattedName_,
+ );
+
+ const SESSION = {
+ parent: DATABASE,
+ formattedName_: SESSION_NAME,
+ request: REQUEST,
+ requestStream: REQUEST_STREAM,
+ _observabilityOptions: {},
+ };
+
+ const PARTIAL_RESULT_STREAM = sandbox.stub();
+ const PROMISIFY_ALL = sandbox.stub();
+
+ let Snapshot;
+ let Transaction;
+ let transaction;
+ let snapshot;
+
+ before(() => {
+ const txns = proxyquire('../src/transaction', {
+ '@google-cloud/promisify': {promisifyAll: PROMISIFY_ALL},
+ './codec': {codec},
+ './partial-result-stream': {partialResultStream: PARTIAL_RESULT_STREAM},
+ });
+
+ Snapshot = txns.Snapshot;
+ Transaction = txns.Transaction;
+ });
+
+ let traceExporter: typeof InMemorySpanExporter;
+ let tracerProvider: typeof NodeTracerProvider;
+
+ beforeEach(() => {
+ traceExporter = new InMemorySpanExporter();
+ const sampler = new AlwaysOnSampler();
+
+ tracerProvider = new NodeTracerProvider({
+ sampler: sampler,
+ exporter: traceExporter,
+ spanProcessors: [new SimpleSpanProcessor(traceExporter)],
+ });
+
+ const SNAPSHOT_OPTIONS = {a: 'b', c: 'd'};
+ sandbox.stub(Snapshot, 'encodeTimestampBounds').returns(SNAPSHOT_OPTIONS);
+ SESSION._observabilityOptions = {tracerProvider: tracerProvider};
+ snapshot = new Snapshot(SESSION);
+ snapshot._observabilityOptions = {tracerProvider: tracerProvider};
+
+ transaction = new Transaction(SESSION);
+ });
+
+ afterEach(async () => {
+ sandbox.restore();
+ await tracerProvider.forceFlush();
+ traceExporter.reset();
+ });
+
+ after(async () => {
+ await tracerProvider.shutdown();
+ });
+
+ interface spanExportResults {
+ spans: (typeof ReadableSpan)[];
+ spanNames: string[];
+ spanEventNames: string[];
+ }
+
+ function extractExportedSpans(): spanExportResults {
+ traceExporter.forceFlush();
+ const spans = traceExporter.getFinishedSpans();
+
+ // Sort the spans by startTime.
+ spans.sort((spanA, spanB) => {
+ spanA.startTime < spanB.startTime;
+ });
+
+ const spanNames: string[] = [];
+ const eventNames: string[] = [];
+ spans.forEach(span => {
+ spanNames.push(span.name);
+ span.events.forEach(event => {
+ eventNames.push(event.name);
+ });
+ });
+
+ return {
+ spans: spans,
+ spanNames: spanNames,
+ spanEventNames: eventNames,
+ } as spanExportResults;
+ }
+
+ describe('Snapshot', () => {
+ describe('begin', () => {
+ const BEGIN_RESPONSE = {
+ id: Buffer.from('transaction-id-123'),
+ };
+
+ it('without error', done => {
+ REQUEST.callsFake((_, callback) => callback(null, BEGIN_RESPONSE));
+
+ snapshot.begin((err, resp) => {
+ assert.ifError(err);
+ assert.strictEqual(resp, BEGIN_RESPONSE);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.begin'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ 'Begin Transaction',
+ 'Transaction Creation Done',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ done();
+ });
+ });
+
+ it('with error', done => {
+ const fakeError = new Error('begin.error');
+
+ REQUEST.callsFake((_, callback) => callback(fakeError));
+
+ snapshot.begin(err => {
+ assert.strictEqual(err, fakeError);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.begin'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = ['Begin Transaction'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ 'begin.error',
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('read', () => {
+ const TABLE = 'my-table-123';
+
+ let fakeStream;
+
+ beforeEach(() => {
+ fakeStream = new EventEmitter();
+ sandbox.stub(snapshot, 'createReadStream').returns(fakeStream);
+ });
+
+ it('with error', done => {
+ const fakeError = new Error('read.error');
+
+ snapshot.read(TABLE, {}, err => {
+ assert.strictEqual(err, fakeError);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.read'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ 'read.error',
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+
+ fakeStream.emit('error', fakeError);
+ });
+
+ it('without error', done => {
+ const fakeRows = [{a: 'b'}, {c: 'd'}, {e: 'f'}];
+
+ snapshot.read(TABLE, {}, (err, rows) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(rows, fakeRows);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.read'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+
+ fakeRows.forEach(row => fakeStream.emit('data', row));
+ fakeStream.emit('end');
+ });
+ });
+
+ describe('run', () => {
+ const QUERY = 'SELET * FROM `MyTable`';
+
+ let fakeStream;
+
+ beforeEach(() => {
+ fakeStream = new EventEmitter();
+ sandbox.stub(snapshot, 'runStream').returns(fakeStream);
+ });
+
+ it('without error', done => {
+ const fakeRows = [{a: 'b'}, {c: 'd'}, {e: 'f'}];
+
+ snapshot.run(QUERY, (err, rows) => {
+ assert.ifError(err);
+ assert.deepStrictEqual(rows, fakeRows);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.run'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+ done();
+ });
+
+ fakeRows.forEach(row => fakeStream.emit('data', row));
+ fakeStream.emit('end');
+ });
+
+ it('with errors', done => {
+ const fakeError = new Error('run.error');
+
+ snapshot.run(QUERY, err => {
+ assert.strictEqual(err, fakeError);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.run'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ 'run.error',
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+
+ fakeStream.emit('error', fakeError);
+ });
+ });
+
+ describe('runStream', () => {
+ const QUERY = {
+ sql: 'SELECT * FROM `MyTable`',
+ };
+
+ beforeEach(() => {
+ PARTIAL_RESULT_STREAM.callsFake(makeRequest => makeRequest());
+ });
+
+ it('with error', done => {
+ REQUEST_STREAM.resetHistory();
+
+ const fakeQuery: ExecuteSqlRequest = Object.assign({}, QUERY, {
+ params: {a: undefined},
+ requestOptions: {requestTag: 'request-tag'},
+ });
+
+ snapshot.requestOptions = {transactionTag: 'transaction-tag'};
+ const stream = snapshot.runStream(fakeQuery);
+ stream.on('error', error => {
+ assert.strictEqual(
+ error.message,
+ 'Value of type undefined not recognized.',
+ );
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.runStream'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = ['Starting stream', 'exception'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected an span status code',
+ );
+ assert.strictEqual(
+ 'Value of type undefined not recognized.',
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ const attributes = exportResults.spans[0].attributes;
+ assert.strictEqual(attributes['transaction.tag'], 'transaction-tag');
+ assert.strictEqual(attributes['db.name'], 'formatted-database-name');
+ assert.strictEqual(attributes['request.tag'], 'request-tag');
+ done();
+ });
+ assert.ok(!REQUEST_STREAM.called, 'No request should be made');
+ });
+ });
+
+ describe('createReadStream', () => {
+ const TABLE = 'my-table-123';
+
+ beforeEach(() => {
+ PARTIAL_RESULT_STREAM.callsFake(makeRequest => makeRequest());
+ });
+
+ it('without error', done => {
+ const fakeStream = new EventEmitter();
+ REQUEST_STREAM.returns(fakeStream);
+ const request: ReadRequest = {
+ requestOptions: {requestTag: 'request-tag'},
+ };
+ snapshot.requestOptions = {transactionTag: 'transaction-tag'};
+ const stream = snapshot.createReadStream(TABLE, request);
+ stream.on('end', () => {
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+
+ const expectedSpanNames = ['CloudSpanner.Snapshot.createReadStream'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const attributes = exportResults.spans[0].attributes;
+ assert.strictEqual(attributes['transaction.tag'], 'transaction-tag');
+ assert.strictEqual(attributes['db.sql.table'], TABLE);
+ assert.strictEqual(attributes['db.name'], 'formatted-database-name');
+ assert.strictEqual(attributes['request.tag'], 'request-tag');
+ done();
+ });
+ fakeStream.emit('end');
+ });
+ });
+ });
+
+ describe('rollback', () => {
+ const ID = 'transaction-id-0xdedabeef';
+
+ beforeEach(() => {
+ transaction.id = ID;
+ });
+
+ it('no error with unset `id`', done => {
+ const expectedError = new Error(
+ 'Transaction ID is unknown, nothing to rollback.',
+ );
+ delete transaction.id;
+
+ transaction.rollback(err => {
+ assert.deepStrictEqual(err, null);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Transaction.rollback'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [
+ 'Transaction ID is unknown, nothing to rollback.',
+ ];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+ });
+
+ it('with request error', done => {
+ const fakeError = new Error('our request error');
+ transaction.request = (config, callback) => {
+ callback(fakeError);
+ };
+
+ transaction.rollback(err => {
+ assert.deepStrictEqual(err, fakeError);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Transaction.rollback'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ 'our request error',
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+ });
+
+ it('with no error', done => {
+ transaction.request = (config, callback) => {
+ callback(null);
+ };
+
+ transaction.rollback(err => {
+ assert.ifError(err);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Transaction.rollback'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = [];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+ });
+ });
+
+ describe('commit', () => {
+ it('without error', done => {
+ const id = 'transaction-id-123';
+ const transactionTag = 'bar';
+ transaction.id = id;
+ transaction.requestOptions = {transactionTag};
+
+ transaction.request = (config, callback) => {
+ callback(null, {});
+ };
+
+ transaction.commit(err => {
+ assert.ifError(err);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Transaction.commit'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = ['Starting Commit', 'Commit Done'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.UNSET,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ undefined,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ done();
+ });
+ });
+
+ it('with generic error', () => {
+ const fakeError = new Error('commit.error');
+ transaction.request = (config, callback) => {
+ callback(fakeError, {});
+ };
+
+ transaction.commit(err => {
+ assert.strictEqual(err, fakeError);
+
+ const exportResults = extractExportedSpans();
+ const actualSpanNames = exportResults.spanNames;
+ const actualEventNames = exportResults.spanEventNames;
+
+ const expectedSpanNames = ['CloudSpanner.Transaction.commit'];
+ assert.deepStrictEqual(
+ actualSpanNames,
+ expectedSpanNames,
+ `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`,
+ );
+
+ const expectedEventNames = ['Starting Commit', 'Commit failed'];
+ assert.deepStrictEqual(
+ actualEventNames,
+ expectedEventNames,
+ `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`,
+ );
+
+ // Ensure that the final span that got retries did not error.
+ const spans = exportResults.spans;
+
+ const firstSpan = spans[0];
+ assert.strictEqual(
+ SpanStatusCode.ERROR,
+ firstSpan.status.code,
+ 'Unexpected span status code',
+ );
+ assert.strictEqual(
+ fakeError.message,
+ firstSpan.status.message,
+ 'Unexpected span status message',
+ );
+
+ withAllSpansHaveDBName(spans);
+ });
+ });
+ });
+});
diff --git a/handwritten/spanner/owlbot.py b/handwritten/spanner/owlbot.py
new file mode 100644
index 00000000000..21a9b60576b
--- /dev/null
+++ b/handwritten/spanner/owlbot.py
@@ -0,0 +1,72 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import synthtool as s
+import synthtool.gcp as gcp
+import synthtool.languages.node_mono_repo as node
+import logging
+from pathlib import Path
+from synthtool import _tracked_paths
+from synthtool import shell
+import shutil
+
+staging = Path("owl-bot-staging/spanner")
+
+if staging.is_dir():
+ logging.info(f"Copying files from staging directory ${staging}.")
+
+ # nodejs-spanner is composed of 3 APIs: SpannerClient, SpannerAdminDatabase and
+ # SpannerAdminInstance, all 3 are exported in src/v1/index.js
+ # Excluding auto-generated system test since Spanner has its own packing test
+ excludes=["src/index.ts", "src/v1/index.ts", "README.md", "package.json",
+ "system-test/*", "system-test/fixtures/sample/*", "system-test/fixtures/sample/src/*",
+ "tsconfig.json"]
+
+ # Copy spanner library.
+ for version in ['v1']:
+ library = staging / version
+ _tracked_paths.add(library)
+ s.copy([library], destination="handwritten/spanner", excludes=excludes)
+
+ excludes += ["webpack.config.js", ".jsdoc.js"]
+
+ # Copy the admin/database library.
+ for version in ['v1']:
+ library = staging / 'admin' / 'database' / version
+ _tracked_paths.add(library)
+ s.copy([library], excludes=excludes)
+
+ # Copy the admin/instance library.
+ for version in ['v1']:
+ library = staging / 'admin' / 'instance' / version
+ _tracked_paths.add(library)
+ s.copy([library], excludes=excludes)
+
+ # Copy the spanner/executor library.
+ for version in ['v1']:
+ library = staging / 'executor' / version
+ _tracked_paths.add(library)
+ s.copy([library], excludes=excludes)
+
+ # The staging directory should never be merged into the main branch.
+ shutil.rmtree(staging)
+
+common_templates = gcp.CommonTemplates()
+templates = common_templates.node_mono_repo_library(relative_dir="handwritten/spanner", source_location='build/src')
+s.copy(templates, destination="handwritten/spanner", excludes=[".kokoro/samples-test.sh", ".kokoro/trampoline_v2.sh", ".github/release-trigger.yml", ".github/sync-repo-settings.yaml", "README.md"])
+
+node.postprocess_gapic_library_hermetic(relative_dir="handwritten/spanner")
+
+# Remove generated samples from veneer library:
+shell.run(('rm', '-rf', 'handwritten/spanner/samples/generated'), hide_output = False)
diff --git a/handwritten/spanner/package.json b/handwritten/spanner/package.json
new file mode 100644
index 00000000000..eb3cac0449f
--- /dev/null
+++ b/handwritten/spanner/package.json
@@ -0,0 +1,144 @@
+{
+ "name": "@google-cloud/spanner",
+ "description": "Cloud Spanner Client Library for Node.js",
+ "version": "8.6.0",
+ "license": "Apache-2.0",
+ "author": "Google Inc.",
+ "engines": {
+ "node": ">=18"
+ },
+ "repository": {
+ "type": "git",
+ "directory": "handwritten/spanner",
+ "url": "https://github.com/googleapis/google-cloud-node.git"
+ },
+ "main": "./build/src/index.js",
+ "types": "./build/src/index.d.ts",
+ "files": [
+ "build/protos",
+ "build/src",
+ "!build/src/**/*.map"
+ ],
+ "keywords": [
+ "google apis client",
+ "google api client",
+ "google apis",
+ "google api",
+ "google",
+ "google cloud platform",
+ "google cloud",
+ "cloud",
+ "spanner"
+ ],
+ "scripts": {
+ "docs": "jsdoc -c .jsdoc.js",
+ "predocs": "npm run compile",
+ "lint": "gts check",
+ "samples-test-with-archived": "cd samples/ && npm link ../ && npm test-with-archived && cd ../",
+ "samples-test": "cd samples/ && npm link ../ && npm test && cd ../",
+ "system-test": "mocha build/system-test --timeout 1600000",
+ "observability-test": "mocha build/observability-test --timeout 1600000",
+ "cleanup": "mocha scripts/cleanup.js --timeout 30000",
+ "test": "mocha build/test build/test/common build/observability-test",
+ "ycsb": "node ./benchmark/ycsb.js run -P ./benchmark/workloada -p table=usertable -p cloudspanner.instance=ycsb-instance -p operationcount=100 -p cloudspanner.database=ycsb",
+ "fix": "gts fix",
+ "clean": "gts clean",
+ "compile": "tsc -p . && cp -r protos build && cp -r test/data build/test",
+ "prepare": "npm run compile-protos && npm run compile",
+ "pretest": "npm run compile",
+ "presystem-test": "npm run compile",
+ "preobservability-test": "npm run compile",
+ "proto": "compileProtos src",
+ "docs-test": "linkinator docs",
+ "predocs-test": "npm run docs",
+ "benchwrapper": "node bin/benchwrapper.js",
+ "prelint": "cd samples; npm link ../; npm install",
+ "precompile": "gts clean",
+ "compile-protos": "compileProtos src",
+ "coverage": "c8 mocha build/test build/test/common && c8 report --check-coverage"
+ },
+ "dependencies": {
+ "@babel/core": "7.27.7",
+ "@babel/helpers": "7.27.6",
+ "@babel/traverse": "7.27.7",
+ "@google-cloud/common": "^6.0.0",
+ "@google-cloud/monitoring": "^5.0.0",
+ "@google-cloud/opentelemetry-resource-util": "^2.4.0",
+ "@google-cloud/precise-date": "^5.0.0",
+ "@google-cloud/projectify": "^5.0.0",
+ "@google-cloud/promisify": "^5.0.0",
+ "@grpc/grpc-js": "^1.13.2",
+ "@grpc/proto-loader": "^0.8.0",
+ "@opentelemetry/api": "^1.9.0",
+ "@opentelemetry/context-async-hooks": "^2.0.0",
+ "@opentelemetry/core": "^2.0.0",
+ "@opentelemetry/resources": "^1.8.0",
+ "@opentelemetry/sdk-metrics": "^1.30.1",
+ "@opentelemetry/semantic-conventions": "^1.30.0",
+ "@types/big.js": "^6.2.2",
+ "@types/stack-trace": "^0.0.33",
+ "big.js": "^7.0.0",
+ "checkpoint-stream": "^0.1.2",
+ "duplexify": "^4.1.3",
+ "events-intercept": "^2.0.0",
+ "extend": "^3.0.2",
+ "google-auth-library": "^10.0.0-rc.1",
+ "google-gax": "5.0.6",
+ "grpc-gcp": "^1.0.1",
+ "lodash.snakecase": "^4.1.1",
+ "merge-stream": "^2.0.0",
+ "p-queue": "^6.0.2",
+ "protobufjs": "^7.4.0",
+ "retry-request": "^8.0.0",
+ "split-array-stream": "^2.0.0",
+ "stack-trace": "0.0.10",
+ "stream-events": "^1.0.5",
+ "teeny-request": "^10.0.0",
+ "through2": "^4.0.2",
+ "uuid": "^11.1.0"
+ },
+ "devDependencies": {
+ "@opentelemetry/sdk-trace-base": "^2.0.0",
+ "@opentelemetry/sdk-trace-node": "^2.0.0",
+ "@types/concat-stream": "^2.0.3",
+ "@types/extend": "^3.0.4",
+ "@types/is": "^0.0.25",
+ "@types/lodash.snakecase": "^4.1.9",
+ "@types/merge-stream": "^2.0.0",
+ "@types/mocha": "^10.0.10",
+ "@types/mv": "^2.1.4",
+ "@types/ncp": "^2.0.8",
+ "@types/proxyquire": "^1.3.31",
+ "@types/request": "^2.48.12",
+ "@types/sinon": "^21.0.0",
+ "@types/through2": "^2.0.41",
+ "binary-search-bounds": "^2.0.5",
+ "c8": "^10.1.3",
+ "codecov": "^3.8.3",
+ "concat-stream": "^2.0.0",
+ "dedent": "^1.5.3",
+ "execa": "^5.0.0",
+ "gapic-tools": "^1.0.1",
+ "gts": "^6.0.2",
+ "jsdoc": "^4.0.4",
+ "jsdoc-fresh": "^5.0.0",
+ "jsdoc-region-tag": "^4.0.0",
+ "linkinator": "^6.1.2",
+ "lodash.random": "^3.2.0",
+ "long": "^5.3.2",
+ "mocha": "^11.1.0",
+ "mv": "^2.1.1",
+ "ncp": "^2.0.0",
+ "nise": "^6.1.1",
+ "p-limit": "^3.0.1",
+ "path-to-regexp": "^8.2.0",
+ "proxyquire": "^2.1.3",
+ "sinon": "^21.0.0",
+ "stats-lite": "^2.2.0",
+ "time-span": "4.0.0",
+ "tmp": "^0.2.3",
+ "typescript": "^5.8.2",
+ "yargs": "^17.7.2"
+ },
+ "homepage": "https://github.com/googleapis/google-cloud-node/tree/main/handwritten/spanner"
+}
diff --git a/handwritten/spanner/protos/google/spanner/admin/database/v1/backup.proto b/handwritten/spanner/protos/google/spanner/admin/database/v1/backup.proto
new file mode 100644
index 00000000000..6898814c421
--- /dev/null
+++ b/handwritten/spanner/protos/google/spanner/admin/database/v1/backup.proto
@@ -0,0 +1,773 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.database.v1;
+
+import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/spanner/admin/database/v1/common.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
+option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb";
+option java_multiple_files = true;
+option java_outer_classname = "BackupProto";
+option java_package = "com.google.spanner.admin.database.v1";
+option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
+option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
+
+// A backup of a Cloud Spanner database.
+message Backup {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/Backup"
+ pattern: "projects/{project}/instances/{instance}/backups/{backup}"
+ };
+
+ // Indicates the current state of the backup.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The pending backup is still being created. Operations on the
+ // backup may fail with `FAILED_PRECONDITION` in this state.
+ CREATING = 1;
+
+ // The backup is complete and ready for use.
+ READY = 2;
+ }
+
+ // Required for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. Name of the database from which this backup was created. This
+ // needs to be in the same instance as the backup. Values are of the form
+ // `projects//instances//databases/`.
+ string database = 2 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+
+ // The backup will contain an externally consistent copy of the database at
+ // the timestamp specified by `version_time`. If `version_time` is not
+ // specified, the system will set `version_time` to the `create_time` of the
+ // backup.
+ google.protobuf.Timestamp version_time = 9;
+
+ // Required for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. The expiration time of the backup, with microseconds
+ // granularity that must be at least 6 hours and at most 366 days
+ // from the time the CreateBackup request is processed. Once the `expire_time`
+ // has passed, the backup is eligible to be automatically deleted by Cloud
+ // Spanner to free the resources used by the backup.
+ google.protobuf.Timestamp expire_time = 3;
+
+ // Output only for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. Required for the
+ // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
+ // operation.
+ //
+ // A globally unique identifier for the backup which cannot be
+ // changed. Values are of the form
+ // `projects//instances//backups/[a-z][a-z0-9_\-]*[a-z0-9]`
+ // The final segment of the name must be between 2 and 60 characters
+ // in length.
+ //
+ // The backup is stored in the location(s) specified in the instance
+ // configuration of the instance containing the backup, identified
+ // by the prefix of the backup name of the form
+ // `projects//instances/`.
+ string name = 1;
+
+ // Output only. The time the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request is received. If the request does not specify `version_time`, the
+ // `version_time` of the backup will be equivalent to the `create_time`.
+ google.protobuf.Timestamp create_time = 4
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Size of the backup in bytes.
+ int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The number of bytes that will be freed by deleting this
+ // backup. This value will be zero if, for example, this backup is part of an
+ // incremental backup chain and younger backups in the chain require that we
+ // keep its data. For backups not in an incremental backup chain, this is
+ // always the size of the backup. This value may change if backups on the same
+ // chain get created, deleted or expired.
+ int64 freeable_size_bytes = 15 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. For a backup in an incremental backup chain, this is the
+ // storage space needed to keep the data that has changed since the previous
+ // backup. For all other backups, this is always the size of the backup. This
+ // value may change if backups on the same chain get deleted or expired.
+ //
+ // This field can be used to calculate the total storage space used by a set
+ // of backups. For example, the total space used by all backups of a database
+ // can be computed by summing up this field.
+ int64 exclusive_size_bytes = 16 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The current state of the backup.
+ State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The names of the restored databases that reference the backup.
+ // The database names are of
+ // the form `projects//instances//databases/`.
+ // Referencing databases may exist in different instances. The existence of
+ // any referencing database prevents the backup from being deleted. When a
+ // restored database from the backup enters the `READY` state, the reference
+ // to the backup is removed.
+ repeated string referencing_databases = 7 [
+ (google.api.field_behavior) = OUTPUT_ONLY,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Output only. The encryption information for the backup.
+ EncryptionInfo encryption_info = 8
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The encryption information for the backup, whether it is
+ // protected by one or more KMS keys. The information includes all Cloud
+ // KMS key versions used to encrypt the backup. The `encryption_status' field
+ // inside of each `EncryptionInfo` is not populated. At least one of the key
+ // versions must be available for the backup to be restored. If a key version
+ // is revoked in the middle of a restore, the restore behavior is undefined.
+ repeated EncryptionInfo encryption_information = 13
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The database dialect information for the backup.
+ DatabaseDialect database_dialect = 10
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The names of the destination backups being created by copying
+ // this source backup. The backup names are of the form
+ // `projects//instances//backups/`.
+ // Referencing backups may exist in different instances. The existence of
+ // any referencing backup prevents the backup from being deleted. When the
+ // copy operation is done (either successfully completed or cancelled or the
+ // destination backup is deleted), the reference to the backup is removed.
+ repeated string referencing_backups = 11 [
+ (google.api.field_behavior) = OUTPUT_ONLY,
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+
+ // Output only. The max allowed expiration time of the backup, with
+ // microseconds granularity. A backup's expiration time can be configured in
+ // multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
+ // copying an existing backup, the expiration time specified must be
+ // less than `Backup.max_expire_time`.
+ google.protobuf.Timestamp max_expire_time = 12
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. List of backup schedule URIs that are associated with
+ // creating this backup. This is only applicable for scheduled backups, and
+ // is empty for on-demand backups.
+ //
+ // To optimize for storage, whenever possible, multiple schedules are
+ // collapsed together to create one backup. In such cases, this field captures
+ // the list of all backup schedule URIs that are associated with creating
+ // this backup. If collapsing is not done, then this field captures the
+ // single backup schedule URI associated with creating this backup.
+ repeated string backup_schedules = 14 [
+ (google.api.field_behavior) = OUTPUT_ONLY,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/BackupSchedule"
+ }
+ ];
+
+ // Output only. Populated only for backups in an incremental backup chain.
+ // Backups share the same chain id if and only if they belong to the same
+ // incremental backup chain. Use this field to determine which backups are
+ // part of the same incremental backup chain. The ordering of backups in the
+ // chain can be determined by ordering the backup `version_time`.
+ string incremental_backup_chain_id = 17
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Data deleted at a time older than this is guaranteed not to be
+ // retained in order to support this backup. For a backup in an incremental
+ // backup chain, this is the version time of the oldest backup that exists or
+ // ever existed in the chain. For all other backups, this is the version time
+ // of the backup. This field can be used to understand what data is being
+ // retained by the backup system.
+ google.protobuf.Timestamp oldest_version_time = 18
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The instance partition(s) storing the backup.
+ //
+ // This is the same as the list of the instance partition(s) that the database
+ // had footprint in at the backup's `version_time`.
+ repeated BackupInstancePartition instance_partitions = 19
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+}
+
+// The request for
+// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+message CreateBackupRequest {
+ // Required. The name of the instance in which the backup will be
+ // created. This must be the same instance that contains the database the
+ // backup will be created from. The backup will be stored in the
+ // location(s) specified in the instance configuration of this
+ // instance. Values are of the form
+ // `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Required. The id of the backup to be created. The `backup_id` appended to
+ // `parent` forms the full backup name of the form
+ // `projects//instances//backups/`.
+ string backup_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The backup to create.
+ Backup backup = 3 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The encryption configuration used to encrypt the backup. If this
+ // field is not specified, the backup will use the same encryption
+ // configuration as the database by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ // = `USE_DATABASE_ENCRYPTION`.
+ CreateBackupEncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Metadata type for the operation returned by
+// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+message CreateBackupMetadata {
+ // The name of the backup being created.
+ string name = 1 [
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+
+ // The name of the database the backup is created from.
+ string database = 2 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+
+ // The progress of the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation.
+ OperationProgress progress = 3;
+
+ // The time at which cancellation of this operation was received.
+ // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ // starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not guaranteed.
+ // Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ google.protobuf.Timestamp cancel_time = 4;
+}
+
+// The request for
+// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+message CopyBackupRequest {
+ // Required. The name of the destination instance that will contain the backup
+ // copy. Values are of the form: `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Required. The id of the backup copy.
+ // The `backup_id` appended to `parent` forms the full backup_uri of the form
+ // `projects//instances//backups/`.
+ string backup_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The source backup to be copied.
+ // The source backup needs to be in READY state for it to be copied.
+ // Once CopyBackup is in progress, the source backup cannot be deleted or
+ // cleaned up on expiration until CopyBackup is finished.
+ // Values are of the form:
+ // `projects//instances//backups/`.
+ string source_backup = 3 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+
+ // Required. The expiration time of the backup in microsecond granularity.
+ // The expiration time must be at least 6 hours and at most 366 days
+ // from the `create_time` of the source backup. Once the `expire_time` has
+ // passed, the backup is eligible to be automatically deleted by Cloud Spanner
+ // to free the resources used by the backup.
+ google.protobuf.Timestamp expire_time = 4
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The encryption configuration used to encrypt the backup. If this
+ // field is not specified, the backup will use the same encryption
+ // configuration as the source backup by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
+ // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
+ CopyBackupEncryptionConfig encryption_config = 5
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Metadata type for the operation returned by
+// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+message CopyBackupMetadata {
+ // The name of the backup being created through the copy operation.
+ // Values are of the form
+ // `projects//instances//backups/`.
+ string name = 1 [
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+
+ // The name of the source backup that is being copied.
+ // Values are of the form
+ // `projects//instances//backups/`.
+ string source_backup = 2 [
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+
+ // The progress of the
+ // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ // operation.
+ OperationProgress progress = 3;
+
+ // The time at which cancellation of CopyBackup operation was received.
+ // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ // starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not guaranteed.
+ // Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ google.protobuf.Timestamp cancel_time = 4;
+}
+
+// The request for
+// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+message UpdateBackupRequest {
+ // Required. The backup to update. `backup.name`, and the fields to be updated
+ // as specified by `update_mask` are required. Other fields are ignored.
+ // Update is only supported for the following fields:
+ // * `backup.expire_time`.
+ Backup backup = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. A mask specifying which fields (e.g. `expire_time`) in the
+ // Backup resource should be updated. This mask is relative to the Backup
+ // resource, not to the request message. The field mask must always be
+ // specified; this prevents any future fields from being erased accidentally
+ // by clients that do not know about them.
+ google.protobuf.FieldMask update_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
+}
+
+// The request for
+// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+message GetBackupRequest {
+ // Required. Name of the backup.
+ // Values are of the form
+ // `projects//instances//backups/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+}
+
+// The request for
+// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
+message DeleteBackupRequest {
+ // Required. Name of the backup to delete.
+ // Values are of the form
+ // `projects//instances//backups/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+}
+
+// The request for
+// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+message ListBackupsRequest {
+ // Required. The instance to list backups from. Values are of the
+ // form `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // An expression that filters the list of returned backups.
+ //
+ // A filter expression consists of a field name, a comparison operator, and a
+ // value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the
+ // [Backup][google.spanner.admin.database.v1.Backup] are eligible for
+ // filtering:
+ //
+ // * `name`
+ // * `database`
+ // * `state`
+ // * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // * `size_bytes`
+ // * `backup_schedules`
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic, but
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // * `name:Howl` - The backup's name contains the string "howl".
+ // * `database:prod`
+ // - The database's name contains the string "prod".
+ // * `state:CREATING` - The backup is pending creation.
+ // * `state:READY` - The backup is fully created and ready for use.
+ // * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
+ // - The backup name contains the string "howl" and `create_time`
+ // of the backup is before 2018-03-28T14:50:00Z.
+ // * `expire_time < \"2018-03-28T14:50:00Z\"`
+ // - The backup `expire_time` is before 2018-03-28T14:50:00Z.
+ // * `size_bytes > 10000000000` - The backup's size is greater than 10GB
+ // * `backup_schedules:daily`
+ // - The backup is created from a schedule with "daily" in its name.
+ string filter = 2;
+
+ // Number of backups to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 3;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
+ // from a previous
+ // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
+ // to the same `parent` and with the same `filter`.
+ string page_token = 4;
+}
+
+// The response for
+// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+message ListBackupsResponse {
+ // The list of matching backups. Backups returned are ordered by `create_time`
+ // in descending order, starting from the most recent `create_time`.
+ repeated Backup backups = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
+ // call to fetch more of the matching backups.
+ string next_page_token = 2;
+}
+
+// The request for
+// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+message ListBackupOperationsRequest {
+ // Required. The instance of the backup operations. Values are of
+ // the form `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // An expression that filters the list of returned backup operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the [operation][google.longrunning.Operation]
+ // are eligible for filtering:
+ //
+ // * `name` - The name of the long-running operation
+ // * `done` - False if the operation is in progress, else true.
+ // * `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
+ // * `metadata.` - any field in metadata.value.
+ // `metadata.@type` must be specified first if filtering on metadata
+ // fields.
+ // * `error` - Error associated with the long-running operation.
+ // * `response.@type` - the type of response.
+ // * `response.` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic, but
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // * `done:true` - The operation is complete.
+ // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
+ // `metadata.database:prod` - Returns operations where:
+ // * The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // * The source database name of backup contains the string "prod".
+ // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
+ // `(metadata.name:howl) AND` \
+ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Returns operations where:
+ // * The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // * The backup name contains the string "howl".
+ // * The operation started before 2018-03-28T14:50:00Z.
+ // * The operation resulted in an error.
+ // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
+ // `(metadata.source_backup:test) AND` \
+ // `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
+ // `(error:*)` - Returns operations where:
+ // * The operation's metadata type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ // * The source backup name contains the string "test".
+ // * The operation started before 2022-01-18T14:50:00Z.
+ // * The operation resulted in an error.
+ // * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
+ // `(metadata.database:test_db)) OR` \
+ // `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
+ // AND` \
+ // `(metadata.source_backup:test_bkp)) AND` \
+ // `(error:*)` - Returns operations where:
+ // * The operation's metadata matches either of criteria:
+ // * The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ // AND the source database name of the backup contains the string
+ // "test_db"
+ // * The operation's metadata type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
+ // AND the source backup name contains the string "test_bkp"
+ // * The operation resulted in an error.
+ string filter = 2;
+
+ // Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 3;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
+ // from a previous
+ // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ string page_token = 4;
+}
+
+// The response for
+// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+message ListBackupOperationsResponse {
+ // The list of matching backup [long-running
+ // operations][google.longrunning.Operation]. Each operation's name will be
+ // prefixed by the backup's name. The operation's
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that are pending or have completed/failed/canceled within the
+ // last 7 days. Operations returned are ordered by
+ // `operation.metadata.value.progress.start_time` in descending order starting
+ // from the most recently started operation.
+ repeated google.longrunning.Operation operations = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
+ // call to fetch more of the matching metadata.
+ string next_page_token = 2;
+}
+
+// Information about a backup.
+message BackupInfo {
+ // Name of the backup.
+ string backup = 1 [
+ (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" }
+ ];
+
+ // The backup contains an externally consistent copy of `source_database` at
+ // the timestamp specified by `version_time`. If the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request did not specify `version_time`, the `version_time` of the backup is
+ // equivalent to the `create_time`.
+ google.protobuf.Timestamp version_time = 4;
+
+ // The time the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request was received.
+ google.protobuf.Timestamp create_time = 2;
+
+ // Name of the database the backup was created from.
+ string source_database = 3 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+}
+
+// Encryption configuration for the backup to create.
+message CreateBackupEncryptionConfig {
+ // Encryption types for the backup.
+ enum EncryptionType {
+ // Unspecified. Do not use.
+ ENCRYPTION_TYPE_UNSPECIFIED = 0;
+
+ // Use the same encryption configuration as the database. This is the
+ // default option when
+ // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
+ // is empty. For example, if the database is using
+ // `Customer_Managed_Encryption`, the backup will be using the same Cloud
+ // KMS key as the database.
+ USE_DATABASE_ENCRYPTION = 1;
+
+ // Use Google default encryption.
+ GOOGLE_DEFAULT_ENCRYPTION = 2;
+
+ // Use customer managed encryption. If specified, `kms_key_name`
+ // must contain a valid Cloud KMS key.
+ CUSTOMER_MANAGED_ENCRYPTION = 3;
+ }
+
+ // Required. The encryption type of the backup.
+ EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The Cloud KMS key that will be used to protect the backup.
+ // This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
+
+ // Optional. Specifies the KMS configuration for the one or more keys used to
+ // protect the backup. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the backup's instance configuration. Some examples:
+ // * For single region instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For an instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ repeated string kms_key_names = 3 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
+}
+
+// Encryption configuration for the copied backup.
+message CopyBackupEncryptionConfig {
+ // Encryption types for the backup.
+ enum EncryptionType {
+ // Unspecified. Do not use.
+ ENCRYPTION_TYPE_UNSPECIFIED = 0;
+
+ // This is the default option for
+ // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ // when
+ // [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
+ // is not specified. For example, if the source backup is using
+ // `Customer_Managed_Encryption`, the backup will be using the same Cloud
+ // KMS key as the source backup.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1;
+
+ // Use Google default encryption.
+ GOOGLE_DEFAULT_ENCRYPTION = 2;
+
+ // Use customer managed encryption. If specified, either `kms_key_name` or
+ // `kms_key_names` must contain valid Cloud KMS key(s).
+ CUSTOMER_MANAGED_ENCRYPTION = 3;
+ }
+
+ // Required. The encryption type of the backup.
+ EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The Cloud KMS key that will be used to protect the backup.
+ // This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
+
+ // Optional. Specifies the KMS configuration for the one or more keys used to
+ // protect the backup. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ // Kms keys specified can be in any order.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the backup's instance configuration. Some examples:
+ // * For single region instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For an instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ repeated string kms_key_names = 3 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
+}
+
+// The specification for full backups.
+// A full backup stores the entire contents of the database at a given
+// version time.
+message FullBackupSpec {}
+
+// The specification for incremental backup chains.
+// An incremental backup stores the delta of changes between a previous
+// backup and the database contents at a given version time. An
+// incremental backup chain consists of a full backup and zero or more
+// successive incremental backups. The first backup created for an
+// incremental backup chain is always a full backup.
+message IncrementalBackupSpec {}
+
+// Instance partition information for the backup.
+message BackupInstancePartition {
+ // A unique identifier for the instance partition. Values are of the form
+ // `projects//instances//instancePartitions/`
+ string instance_partition = 1 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstancePartition"
+ }];
+}
diff --git a/handwritten/spanner/protos/google/spanner/admin/database/v1/backup_schedule.proto b/handwritten/spanner/protos/google/spanner/admin/database/v1/backup_schedule.proto
new file mode 100644
index 00000000000..c273516ae09
--- /dev/null
+++ b/handwritten/spanner/protos/google/spanner/admin/database/v1/backup_schedule.proto
@@ -0,0 +1,230 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.database.v1;
+
+import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/spanner/admin/database/v1/backup.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
+option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb";
+option java_multiple_files = true;
+option java_outer_classname = "BackupScheduleProto";
+option java_package = "com.google.spanner.admin.database.v1";
+option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
+option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
+
+// Defines specifications of the backup schedule.
+message BackupScheduleSpec {
+ // Required.
+ oneof schedule_spec {
+ // Cron style schedule specification.
+ CrontabSpec cron_spec = 1;
+ }
+}
+
+// BackupSchedule expresses the automated backup creation specification for a
+// Spanner database.
+// Next ID: 10
+message BackupSchedule {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/BackupSchedule"
+ pattern: "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}"
+ plural: "backupSchedules"
+ singular: "backupSchedule"
+ };
+
+ // Identifier. Output only for the
+ // [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
+ // Required for the
+ // [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
+ // operation. A globally unique identifier for the backup schedule which
+ // cannot be changed. Values are of the form
+ // `projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
+ // The final segment of the name must be between 2 and 60 characters in
+ // length.
+ string name = 1 [(google.api.field_behavior) = IDENTIFIER];
+
+ // Optional. The schedule specification based on which the backup creations
+ // are triggered.
+ BackupScheduleSpec spec = 6 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The retention duration of a backup that must be at least 6 hours
+ // and at most 366 days. The backup is eligible to be automatically deleted
+ // once the retention period has elapsed.
+ google.protobuf.Duration retention_duration = 3
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The encryption configuration that will be used to encrypt the
+ // backup. If this field is not specified, the backup will use the same
+ // encryption configuration as the database.
+ CreateBackupEncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Required. Backup type spec determines the type of backup that is created by
+ // the backup schedule. Currently, only full backups are supported.
+ oneof backup_type_spec {
+ // The schedule creates only full backups.
+ FullBackupSpec full_backup_spec = 7;
+
+ // The schedule creates incremental backup chains.
+ IncrementalBackupSpec incremental_backup_spec = 8;
+ }
+
+ // Output only. The timestamp at which the schedule was last updated.
+ // If the schedule has never been updated, this field contains the timestamp
+ // when the schedule was first created.
+ google.protobuf.Timestamp update_time = 9
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+}
+
+// CrontabSpec can be used to specify the version time and frequency at
+// which the backup should be created.
+message CrontabSpec {
+ // Required. Textual representation of the crontab. User can customize the
+ // backup frequency and the backup version time using the cron
+ // expression. The version time must be in UTC timezone.
+ //
+ // The backup will contain an externally consistent copy of the
+ // database at the version time. Allowed frequencies are 12 hour, 1 day,
+ // 1 week and 1 month. Examples of valid cron specifications:
+ // * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
+ // * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
+ // * `0 2 * * * ` : once a day at 2 past midnight in UTC.
+ // * `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC.
+ // * `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC.
+ string text = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Output only. The time zone of the times in `CrontabSpec.text`. Currently
+ // only UTC is supported.
+ string time_zone = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Schedule backups will contain an externally consistent copy
+ // of the database at the version time specified in
+ // `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
+ // of the scheduled backups at that version time. Spanner will initiate
+ // the creation of scheduled backups within the time window bounded by the
+ // version_time specified in `schedule_spec.cron_spec` and version_time +
+ // `creation_window`.
+ google.protobuf.Duration creation_window = 3
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+}
+
+// The request for
+// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+message CreateBackupScheduleRequest {
+ // Required. The name of the database that this backup schedule applies to.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Required. The Id to use for the backup schedule. The `backup_schedule_id`
+ // appended to `parent` forms the full backup schedule name of the form
+ // `projects//instances//databases//backupSchedules/`.
+ string backup_schedule_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The backup schedule to create.
+ BackupSchedule backup_schedule = 3 [(google.api.field_behavior) = REQUIRED];
+}
+
+// The request for
+// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+message GetBackupScheduleRequest {
+ // Required. The name of the schedule to retrieve.
+ // Values are of the form
+ // `projects//instances//databases//backupSchedules/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/BackupSchedule"
+ }
+ ];
+}
+
+// The request for
+// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+message DeleteBackupScheduleRequest {
+ // Required. The name of the schedule to delete.
+ // Values are of the form
+ // `projects//instances//databases//backupSchedules/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/BackupSchedule"
+ }
+ ];
+}
+
+// The request for
+// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+message ListBackupSchedulesRequest {
+ // Required. Database is the parent resource whose backup schedules should be
+ // listed. Values are of the form
+ // projects//instances//databases/
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Optional. Number of backup schedules to be returned in the response. If 0
+ // or less, defaults to the server's maximum allowed page size.
+ int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
+ // from a previous
+ // [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
+ // to the same `parent`.
+ string page_token = 4 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The response for
+// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+message ListBackupSchedulesResponse {
+ // The list of backup schedules for a database.
+ repeated BackupSchedule backup_schedules = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
+ // call to fetch more of the schedules.
+ string next_page_token = 2;
+}
+
+// The request for
+// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+message UpdateBackupScheduleRequest {
+ // Required. The backup schedule to update. `backup_schedule.name`, and the
+ // fields to be updated as specified by `update_mask` are required. Other
+ // fields are ignored.
+ BackupSchedule backup_schedule = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. A mask specifying which fields in the BackupSchedule resource
+ // should be updated. This mask is relative to the BackupSchedule resource,
+ // not to the request message. The field mask must always be
+ // specified; this prevents any future fields from being erased
+ // accidentally.
+ google.protobuf.FieldMask update_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
+}
diff --git a/handwritten/spanner/protos/google/spanner/admin/database/v1/common.proto b/handwritten/spanner/protos/google/spanner/admin/database/v1/common.proto
new file mode 100644
index 00000000000..c494b8cf780
--- /dev/null
+++ b/handwritten/spanner/protos/google/spanner/admin/database/v1/common.proto
@@ -0,0 +1,132 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.database.v1;
+
+import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
+option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb";
+option java_multiple_files = true;
+option java_outer_classname = "CommonProto";
+option java_package = "com.google.spanner.admin.database.v1";
+option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
+option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
+option (google.api.resource_definition) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}"
+};
+option (google.api.resource_definition) = {
+ type: "cloudkms.googleapis.com/CryptoKeyVersion"
+ pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}"
+};
+
+// Encapsulates progress related information for a Cloud Spanner long
+// running operation.
+message OperationProgress {
+ // Percent completion of the operation.
+ // Values are between 0 and 100 inclusive.
+ int32 progress_percent = 1;
+
+ // Time the request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // If set, the time at which this operation failed or was completed
+ // successfully.
+ google.protobuf.Timestamp end_time = 3;
+}
+
+// Encryption configuration for a Cloud Spanner database.
+message EncryptionConfig {
+ // The Cloud KMS key to be used for encrypting and decrypting
+ // the database. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [(google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }];
+
+ // Specifies the KMS configuration for the one or more keys used to encrypt
+ // the database. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the database instance configuration. Some examples:
+ // * For single region database instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional database instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For a database instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ repeated string kms_key_names = 3 [(google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }];
+}
+
+// Encryption information for a Cloud Spanner database or backup.
+message EncryptionInfo {
+ // Possible encryption types.
+ enum Type {
+ // Encryption type was not specified, though data at rest remains encrypted.
+ TYPE_UNSPECIFIED = 0;
+
+ // The data is encrypted at rest with a key that is
+ // fully managed by Google. No key version or status will be populated.
+ // This is the default state.
+ GOOGLE_DEFAULT_ENCRYPTION = 1;
+
+ // The data is encrypted at rest with a key that is
+ // managed by the customer. The active version of the key. `kms_key_version`
+ // will be populated, and `encryption_status` may be populated.
+ CUSTOMER_MANAGED_ENCRYPTION = 2;
+ }
+
+ // Output only. The type of encryption.
+ Type encryption_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. If present, the status of a recent encrypt/decrypt call on
+ // underlying data for this database or backup. Regardless of status, data is
+ // always encrypted at rest.
+ google.rpc.Status encryption_status = 4
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. A Cloud KMS key version that is being used to protect the
+ // database or backup.
+ string kms_key_version = 2 [
+ (google.api.field_behavior) = OUTPUT_ONLY,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKeyVersion"
+ }
+ ];
+}
+
+// Indicates the dialect type of a database.
+enum DatabaseDialect {
+ // Default value. This value will create a database with the
+ // GOOGLE_STANDARD_SQL dialect.
+ DATABASE_DIALECT_UNSPECIFIED = 0;
+
+ // GoogleSQL supported SQL.
+ GOOGLE_STANDARD_SQL = 1;
+
+ // PostgreSQL supported SQL.
+ POSTGRESQL = 2;
+}
diff --git a/handwritten/spanner/protos/google/spanner/admin/database/v1/spanner_database_admin.proto b/handwritten/spanner/protos/google/spanner/admin/database/v1/spanner_database_admin.proto
new file mode 100644
index 00000000000..d41a4114c20
--- /dev/null
+++ b/handwritten/spanner/protos/google/spanner/admin/database/v1/spanner_database_admin.proto
@@ -0,0 +1,1314 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.database.v1;
+
+import "google/api/annotations.proto";
+import "google/api/client.proto";
+import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/struct.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+import "google/spanner/admin/database/v1/backup.proto";
+import "google/spanner/admin/database/v1/backup_schedule.proto";
+import "google/spanner/admin/database/v1/common.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1";
+option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb";
+option java_multiple_files = true;
+option java_outer_classname = "SpannerDatabaseAdminProto";
+option java_package = "com.google.spanner.admin.database.v1";
+option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1";
+option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1";
+option (google.api.resource_definition) = {
+ type: "spanner.googleapis.com/Instance"
+ pattern: "projects/{project}/instances/{instance}"
+};
+option (google.api.resource_definition) = {
+ type: "spanner.googleapis.com/InstancePartition"
+ pattern: "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}"
+};
+
+// Cloud Spanner Database Admin API
+//
+// The Cloud Spanner Database Admin API can be used to:
+// * create, drop, and list databases
+// * update the schema of pre-existing databases
+// * create, delete, copy and list backups for a database
+// * restore a database from an existing backup
+service DatabaseAdmin {
+ option (google.api.default_host) = "spanner.googleapis.com";
+ option (google.api.oauth_scopes) =
+ "https://www.googleapis.com/auth/cloud-platform,"
+ "https://www.googleapis.com/auth/spanner.admin";
+
+ // Lists Cloud Spanner databases.
+ rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*}/databases"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Creates a new Cloud Spanner database and starts to prepare it for serving.
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `/operations/` and
+ // can be used to track preparation of the database. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ rpc CreateDatabase(CreateDatabaseRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/instances/*}/databases"
+ body: "*"
+ };
+ option (google.api.method_signature) = "parent,create_statement";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.database.v1.Database"
+ metadata_type: "google.spanner.admin.database.v1.CreateDatabaseMetadata"
+ };
+ }
+
+ // Gets the state of a Cloud Spanner database.
+ rpc GetDatabase(GetDatabaseRequest) returns (Database) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/instances/*/databases/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Updates a Cloud Spanner database. The returned
+ // [long-running operation][google.longrunning.Operation] can be used to track
+ // the progress of updating the database. If the named database does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // While the operation is pending:
+ //
+ // * The database's
+ // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ // field is set to true.
+ // * Cancelling the operation is best-effort. If the cancellation succeeds,
+ // the operation metadata's
+ // [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ // is set, the updates are reverted, and the operation terminates with a
+ // `CANCELLED` status.
+ // * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
+ // until the pending operation is done (returns successfully or with
+ // error).
+ // * Reading the database via the API continues to give the pre-request
+ // values.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * The new values are in effect and readable via the API.
+ // * The database's
+ // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ // field becomes false.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `projects//instances//databases//operations/`
+ // and can be used to track the database modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ rpc UpdateDatabase(UpdateDatabaseRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ patch: "/v1/{database.name=projects/*/instances/*/databases/*}"
+ body: "database"
+ };
+ option (google.api.method_signature) = "database,update_mask";
+ option (google.longrunning.operation_info) = {
+ response_type: "Database"
+ metadata_type: "UpdateDatabaseMetadata"
+ };
+ }
+
+ // Updates the schema of a Cloud Spanner database by
+ // creating/altering/dropping tables, columns, indexes, etc. The returned
+ // [long-running operation][google.longrunning.Operation] will have a name of
+ // the format `/operations/` and can be used to
+ // track execution of the schema change(s). The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ // The operation has no response.
+ rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl"
+ body: "*"
+ };
+ option (google.api.method_signature) = "database,statements";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.protobuf.Empty"
+ metadata_type: "google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata"
+ };
+ }
+
+ // Drops (aka deletes) a Cloud Spanner database.
+ // Completed backups for the database will be retained according to their
+ // `expire_time`.
+ // Note: Cloud Spanner might continue to accept requests for a few seconds
+ // after the database has been deleted.
+ rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{database=projects/*/instances/*/databases/*}"
+ };
+ option (google.api.method_signature) = "database";
+ }
+
+ // Returns the schema of a Cloud Spanner database as a list of formatted
+ // DDL statements. This method does not show pending schema updates, those may
+ // be queried using the [Operations][google.longrunning.Operations] API.
+ rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) {
+ option (google.api.http) = {
+ get: "/v1/{database=projects/*/instances/*/databases/*}/ddl"
+ };
+ option (google.api.method_signature) = "database";
+ }
+
+ // Sets the access control policy on a database or backup resource.
+ // Replaces any existing policy.
+ //
+ // Authorization requires `spanner.databases.setIamPolicy`
+ // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ // For backups, authorization requires `spanner.backups.setIamPolicy`
+ // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest)
+ returns (google.iam.v1.Policy) {
+ option (google.api.http) = {
+ post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy"
+ body: "*"
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy"
+ body: "*"
+ }
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy"
+ body: "*"
+ }
+ };
+ option (google.api.method_signature) = "resource,policy";
+ }
+
+ // Gets the access control policy for a database or backup resource.
+ // Returns an empty policy if a database or backup exists but does not have a
+ // policy set.
+ //
+ // Authorization requires `spanner.databases.getIamPolicy` permission on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ // For backups, authorization requires `spanner.backups.getIamPolicy`
+ // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest)
+ returns (google.iam.v1.Policy) {
+ option (google.api.http) = {
+ post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy"
+ body: "*"
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy"
+ body: "*"
+ }
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy"
+ body: "*"
+ }
+ };
+ option (google.api.method_signature) = "resource";
+ }
+
+ // Returns permissions that the caller has on the specified database or backup
+ // resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner database will
+ // result in a NOT_FOUND error if the user has
+ // `spanner.databases.list` permission on the containing Cloud
+ // Spanner instance. Otherwise returns an empty set of permissions.
+ // Calling this method on a backup that does not exist will
+ // result in a NOT_FOUND error if the user has
+ // `spanner.backups.list` permission on the containing instance.
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest)
+ returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = {
+ post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions"
+ body: "*"
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions"
+ body: "*"
+ }
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions"
+ body: "*"
+ }
+ additional_bindings {
+ post: "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions"
+ body: "*"
+ }
+ };
+ option (google.api.method_signature) = "resource,permissions";
+ }
+
+ // Starts creating a new Cloud Spanner Backup.
+ // The returned backup [long-running operation][google.longrunning.Operation]
+ // will have a name of the format
+ // `projects//instances//backups//operations/`
+ // and can be used to track creation of the backup. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the creation and delete the
+ // backup. There can be only one pending backup creation per database. Backup
+ // creation of different databases can run concurrently.
+ rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/instances/*}/backups"
+ body: "backup"
+ };
+ option (google.api.method_signature) = "parent,backup,backup_id";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.database.v1.Backup"
+ metadata_type: "google.spanner.admin.database.v1.CreateBackupMetadata"
+ };
+ }
+
+ // Starts copying a Cloud Spanner Backup.
+ // The returned backup [long-running operation][google.longrunning.Operation]
+ // will have a name of the format
+ // `projects//instances//backups//operations/`
+ // and can be used to track copying of the backup. The operation is associated
+ // with the destination backup.
+ // The [metadata][google.longrunning.Operation.metadata] field type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the copying and delete the
+ // destination backup. Concurrent CopyBackup requests can run on the same
+ // source backup.
+ rpc CopyBackup(CopyBackupRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/instances/*}/backups:copy"
+ body: "*"
+ };
+ option (google.api.method_signature) =
+ "parent,backup_id,source_backup,expire_time";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.database.v1.Backup"
+ metadata_type: "google.spanner.admin.database.v1.CopyBackupMetadata"
+ };
+ }
+
+ // Gets metadata on a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ rpc GetBackup(GetBackupRequest) returns (Backup) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/instances/*/backups/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Updates a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ rpc UpdateBackup(UpdateBackupRequest) returns (Backup) {
+ option (google.api.http) = {
+ patch: "/v1/{backup.name=projects/*/instances/*/backups/*}"
+ body: "backup"
+ };
+ option (google.api.method_signature) = "backup,update_mask";
+ }
+
+ // Deletes a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/instances/*/backups/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Lists completed and pending backups.
+ // Backups returned are ordered by `create_time` in descending order,
+ // starting from the most recent `create_time`.
+ rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*}/backups"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Create a new database by restoring from a completed backup. The new
+ // database must be in the same project and in an instance with the same
+ // instance configuration as the instance containing
+ // the backup. The returned database [long-running
+ // operation][google.longrunning.Operation] has a name of the format
+ // `projects//instances//databases//operations/`,
+ // and can be used to track the progress of the operation, and to cancel it.
+ // The [metadata][google.longrunning.Operation.metadata] field type is
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] type
+ // is [Database][google.spanner.admin.database.v1.Database], if
+ // successful. Cancelling the returned operation will stop the restore and
+ // delete the database.
+ // There can be only one database being restored into an instance at a time.
+ // Once the restore operation completes, a new restore operation can be
+ // initiated, without waiting for the optimize operation associated with the
+ // first restore to complete.
+ rpc RestoreDatabase(RestoreDatabaseRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/instances/*}/databases:restore"
+ body: "*"
+ };
+ option (google.api.method_signature) = "parent,database_id,backup";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.database.v1.Database"
+ metadata_type: "google.spanner.admin.database.v1.RestoreDatabaseMetadata"
+ };
+ }
+
+ // Lists database [longrunning-operations][google.longrunning.Operation].
+ // A database operation has a name of the form
+ // `projects//instances//databases//operations/`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations.
+ rpc ListDatabaseOperations(ListDatabaseOperationsRequest)
+ returns (ListDatabaseOperationsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*}/databaseOperations"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Lists the backup [long-running operations][google.longrunning.Operation] in
+ // the given instance. A backup operation has a name of the form
+ // `projects//instances//backups//operations/`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.progress.start_time` in descending order starting
+ // from the most recently started operation.
+ rpc ListBackupOperations(ListBackupOperationsRequest)
+ returns (ListBackupOperationsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*}/backupOperations"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Lists Cloud Spanner database roles.
+ rpc ListDatabaseRoles(ListDatabaseRolesRequest)
+ returns (ListDatabaseRolesResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Adds split points to specified tables, indexes of a database.
+ rpc AddSplitPoints(AddSplitPointsRequest) returns (AddSplitPointsResponse) {
+ option (google.api.http) = {
+ post: "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints"
+ body: "*"
+ };
+ option (google.api.method_signature) = "database,split_points";
+ }
+
+ // Creates a new backup schedule.
+ rpc CreateBackupSchedule(CreateBackupScheduleRequest)
+ returns (BackupSchedule) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules"
+ body: "backup_schedule"
+ };
+ option (google.api.method_signature) =
+ "parent,backup_schedule,backup_schedule_id";
+ }
+
+ // Gets backup schedule for the input schedule name.
+ rpc GetBackupSchedule(GetBackupScheduleRequest) returns (BackupSchedule) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Updates a backup schedule.
+ rpc UpdateBackupSchedule(UpdateBackupScheduleRequest)
+ returns (BackupSchedule) {
+ option (google.api.http) = {
+ patch: "/v1/{backup_schedule.name=projects/*/instances/*/databases/*/backupSchedules/*}"
+ body: "backup_schedule"
+ };
+ option (google.api.method_signature) = "backup_schedule,update_mask";
+ }
+
+ // Deletes a backup schedule.
+ rpc DeleteBackupSchedule(DeleteBackupScheduleRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Lists all the backup schedules for the database.
+ rpc ListBackupSchedules(ListBackupSchedulesRequest)
+ returns (ListBackupSchedulesResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // This is an internal API called by Spanner Graph jobs. You should never need
+ // to call this API directly.
+ rpc InternalUpdateGraphOperation(InternalUpdateGraphOperationRequest)
+ returns (InternalUpdateGraphOperationResponse) {
+ option (google.api.method_signature) = "database,operation_id";
+ }
+}
+
+// Information about the database restore.
+message RestoreInfo {
+ // The type of the restore source.
+ RestoreSourceType source_type = 1;
+
+ // Information about the source used to restore the database.
+ oneof source_info {
+ // Information about the backup used to restore the database. The backup
+ // may no longer exist.
+ BackupInfo backup_info = 2;
+ }
+}
+
+// A Cloud Spanner database.
+message Database {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/Database"
+ pattern: "projects/{project}/instances/{instance}/databases/{database}"
+ };
+
+ // Indicates the current state of the database.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The database is still being created. Operations on the database may fail
+ // with `FAILED_PRECONDITION` in this state.
+ CREATING = 1;
+
+ // The database is fully created and ready for use.
+ READY = 2;
+
+ // The database is fully created and ready for use, but is still
+ // being optimized for performance and cannot handle full load.
+ //
+ // In this state, the database still references the backup
+ // it was restore from, preventing the backup
+ // from being deleted. When optimizations are complete, the full performance
+ // of the database will be restored, and the database will transition to
+ // `READY` state.
+ READY_OPTIMIZING = 3;
+ }
+
+ // Required. The name of the database. Values are of the form
+ // `projects//instances//databases/`,
+ // where `` is as specified in the `CREATE DATABASE`
+ // statement. This name can be passed to other API methods to
+ // identify the database.
+ string name = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Output only. The current database state.
+ State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. If exists, the time at which the database creation started.
+ google.protobuf.Timestamp create_time = 3
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Applicable only for restored databases. Contains information
+ // about the restore source.
+ RestoreInfo restore_info = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. For databases that are using customer managed encryption, this
+ // field contains the encryption configuration for the database.
+ // For databases that are using Google default or other types of encryption,
+ // this field is empty.
+ EncryptionConfig encryption_config = 5
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. For databases that are using customer managed encryption, this
+ // field contains the encryption information for the database, such as
+ // all Cloud KMS key versions that are in use. The `encryption_status' field
+ // inside of each `EncryptionInfo` is not populated.
+ //
+ // For databases that are using Google default or other types of encryption,
+ // this field is empty.
+ //
+ // This field is propagated lazily from the backend. There might be a delay
+ // from when a key version is being used and when it appears in this field.
+ repeated EncryptionInfo encryption_info = 8
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The period in which Cloud Spanner retains all versions of data
+ // for the database. This is the same as the value of version_retention_period
+ // database option set using
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+ // Defaults to 1 hour, if not set.
+ string version_retention_period = 6
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Earliest timestamp at which older versions of the data can be
+ // read. This value is continuously updated by Cloud Spanner and becomes stale
+ // the moment it is queried. If you are using this value to recover data, make
+ // sure to account for the time from the moment when the value is queried to
+ // the moment when you initiate the recovery.
+ google.protobuf.Timestamp earliest_version_time = 7
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The read-write region which contains the database's leader
+ // replicas.
+ //
+ // This is the same as the value of default_leader
+ // database option set using DatabaseAdmin.CreateDatabase or
+ // DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
+ string default_leader = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The dialect of the Cloud Spanner Database.
+ DatabaseDialect database_dialect = 10
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Whether drop protection is enabled for this database. Defaults to false,
+ // if not set. For more details, please see how to [prevent accidental
+ // database
+ // deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
+ bool enable_drop_protection = 11;
+
+ // Output only. If true, the database is being updated. If false, there are no
+ // ongoing update operations for the database.
+ bool reconciling = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
+}
+
+// The request for
+// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+message ListDatabasesRequest {
+ // Required. The instance whose databases should be listed.
+ // Values are of the form `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Number of databases to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ int32 page_size = 3;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
+ // from a previous
+ // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
+ string page_token = 4;
+}
+
+// The response for
+// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+message ListDatabasesResponse {
+ // Databases that matched the request.
+ repeated Database databases = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
+ // call to fetch more of the matching databases.
+ string next_page_token = 2;
+}
+
+// The request for
+// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+message CreateDatabaseRequest {
+ // Required. The name of the instance that will serve the new database.
+ // Values are of the form `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Required. A `CREATE DATABASE` statement, which specifies the ID of the
+ // new database. The database ID must conform to the regular expression
+ // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
+ // If the database ID is a reserved word or if it contains a hyphen, the
+ // database ID must be enclosed in backticks (`` ` ``).
+ string create_statement = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. A list of DDL statements to run inside the newly created
+ // database. Statements can create tables, indexes, etc. These
+ // statements execute atomically with the creation of the database:
+ // if there is an error in any statement, the database is not created.
+ repeated string extra_statements = 3 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The encryption configuration for the database. If this field is
+ // not specified, Cloud Spanner will encrypt/decrypt all data at rest using
+ // Google default encryption.
+ EncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The dialect of the Cloud Spanner Database.
+ DatabaseDialect database_dialect = 5 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
+ // 'extra_statements' above.
+ // Contains a protobuf-serialized
+ // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
+ // To generate it, [install](https://grpc.io/docs/protoc-installation/) and
+ // run `protoc` with --include_imports and --descriptor_set_out. For example,
+ // to generate for moon/shot/app.proto, run
+ // ```
+ // $protoc --proto_path=/app_path --proto_path=/lib_path \
+ // --include_imports \
+ // --descriptor_set_out=descriptors.data \
+ // moon/shot/app.proto
+ // ```
+ // For more details, see protobuffer [self
+ // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
+ bytes proto_descriptors = 6 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Metadata type for the operation returned by
+// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+message CreateDatabaseMetadata {
+ // The database being created.
+ string database = 1 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+}
+
+// The request for
+// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+message GetDatabaseRequest {
+ // Required. The name of the requested database. Values are of the form
+ // `projects//instances//databases/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+}
+
+// The request for
+// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+message UpdateDatabaseRequest {
+ // Required. The database to update.
+ // The `name` field of the database is of the form
+ // `projects//instances//databases/`.
+ Database database = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The list of fields to update. Currently, only
+ // `enable_drop_protection` field can be updated.
+ google.protobuf.FieldMask update_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
+}
+
+// Metadata type for the operation returned by
+// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+message UpdateDatabaseMetadata {
+ // The request for
+ // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ UpdateDatabaseRequest request = 1;
+
+ // The progress of the
+ // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
+ // operation.
+ OperationProgress progress = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is best-effort).
+ google.protobuf.Timestamp cancel_time = 3;
+}
+
+// Enqueues the given DDL statements to be applied, in order but not
+// necessarily all at once, to the database schema at some point (or
+// points) in the future. The server checks that the statements
+// are executable (syntactically valid, name tables that exist, etc.)
+// before enqueueing them, but they may still fail upon
+// later execution (e.g., if a statement from another batch of
+// statements is applied first and it conflicts in some way, or if
+// there is some data-related problem like a `NULL` value in a column to
+// which `NOT NULL` would be added). If a statement fails, all
+// subsequent statements in the batch are automatically cancelled.
+//
+// Each batch of statements is assigned a name which can be used with
+// the [Operations][google.longrunning.Operations] API to monitor
+// progress. See the
+// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+// field for more details.
+message UpdateDatabaseDdlRequest {
+ // Required. The database to update.
+ string database = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Required. DDL statements to be applied to the database.
+ repeated string statements = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // If empty, the new update request is assigned an
+ // automatically-generated operation ID. Otherwise, `operation_id`
+ // is used to construct the name of the resulting
+ // [Operation][google.longrunning.Operation].
+ //
+ // Specifying an explicit operation ID simplifies determining
+ // whether the statements were executed in the event that the
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // call is replayed, or the return value is otherwise lost: the
+ // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
+ // and `operation_id` fields can be combined to form the
+ // [name][google.longrunning.Operation.name] of the resulting
+ // [longrunning.Operation][google.longrunning.Operation]:
+ // `/operations/`.
+ //
+ // `operation_id` should be unique within the database, and must be
+ // a valid identifier: `[a-z][a-z0-9_]*`. Note that
+ // automatically-generated operation IDs always begin with an
+ // underscore. If the named operation already exists,
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // returns `ALREADY_EXISTS`.
+ string operation_id = 3;
+
+ // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
+ // Contains a protobuf-serialized
+ // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
+ // To generate it, [install](https://grpc.io/docs/protoc-installation/) and
+ // run `protoc` with --include_imports and --descriptor_set_out. For example,
+ // to generate for moon/shot/app.proto, run
+ // ```
+ // $protoc --proto_path=/app_path --proto_path=/lib_path \
+ // --include_imports \
+ // --descriptor_set_out=descriptors.data \
+ // moon/shot/app.proto
+ // ```
+ // For more details, see protobuffer [self
+ // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
+ bytes proto_descriptors = 4 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. This field is exposed to be used by the Spanner Migration Tool.
+ // For more details, see
+ // [SMT](https://github.com/GoogleCloudPlatform/spanner-migration-tool).
+ bool throughput_mode = 5 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Action information extracted from a DDL statement. This proto is used to
+// display the brief info of the DDL statement for the operation
+// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+message DdlStatementActionInfo {
+ // The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
+ // This field is a non-empty string.
+ string action = 1;
+
+ // The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
+ // This field can be empty string for some DDL statement,
+ // e.g. for statement "ANALYZE", `entity_type` = "".
+ string entity_type = 2;
+
+ // The entity name(s) being operated on the DDL statement.
+ // E.g.
+ // 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
+ // 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
+ // 3. For statement "ANALYZE", `entity_names` = [].
+ repeated string entity_names = 3;
+}
+
+// Metadata type for the operation returned by
+// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+message UpdateDatabaseDdlMetadata {
+ // The database being modified.
+ string database = 1 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+
+ // For an update this list contains all the statements. For an
+ // individual statement, this list contains only that statement.
+ repeated string statements = 2;
+
+ // Reports the commit timestamps of all statements that have
+ // succeeded so far, where `commit_timestamps[i]` is the commit
+ // timestamp for the statement `statements[i]`.
+ repeated google.protobuf.Timestamp commit_timestamps = 3;
+
+ // Output only. When true, indicates that the operation is throttled e.g.
+ // due to resource constraints. When resources become available the operation
+ // will resume and this field will be false again.
+ bool throttled = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // The progress of the
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // operations. All DDL statements will have continuously updating progress,
+ // and `progress[i]` is the operation progress for `statements[i]`. Also,
+ // `progress[i]` will have start time and end time populated with commit
+ // timestamp of operation, as well as a progress of 100% once the operation
+ // has completed.
+ repeated OperationProgress progress = 5;
+
+ // The brief action info for the DDL statements.
+ // `actions[i]` is the brief info for `statements[i]`.
+ repeated DdlStatementActionInfo actions = 6;
+}
+
+// The request for
+// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+message DropDatabaseRequest {
+ // Required. The database to be dropped.
+ string database = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+}
+
+// The request for
+// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+message GetDatabaseDdlRequest {
+ // Required. The database whose schema we wish to get.
+ // Values are of the form
+ // `projects//instances//databases/`
+ string database = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+}
+
+// The response for
+// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+message GetDatabaseDdlResponse {
+ // A list of formatted DDL statements defining the schema of the database
+ // specified in the request.
+ repeated string statements = 1;
+
+ // Proto descriptors stored in the database.
+ // Contains a protobuf-serialized
+ // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
+ // For more details, see protobuffer [self
+ // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
+ bytes proto_descriptors = 2;
+}
+
+// The request for
+// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+message ListDatabaseOperationsRequest {
+ // Required. The instance of the database operations.
+ // Values are of the form `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // An expression that filters the list of returned operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the [Operation][google.longrunning.Operation]
+ // are eligible for filtering:
+ //
+ // * `name` - The name of the long-running operation
+ // * `done` - False if the operation is in progress, else true.
+ // * `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
+ // * `metadata.` - any field in metadata.value.
+ // `metadata.@type` must be specified first, if filtering on metadata
+ // fields.
+ // * `error` - Error associated with the long-running operation.
+ // * `response.@type` - the type of response.
+ // * `response.` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic. However,
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // * `done:true` - The operation is complete.
+ // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
+ // `(metadata.source_type:BACKUP) AND` \
+ // `(metadata.backup_info.backup:backup_howl) AND` \
+ // `(metadata.name:restored_howl) AND` \
+ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Return operations where:
+ // * The operation's metadata type is
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ // * The database is restored from a backup.
+ // * The backup name contains "backup_howl".
+ // * The restored database's name contains "restored_howl".
+ // * The operation started before 2018-03-28T14:50:00Z.
+ // * The operation resulted in an error.
+ string filter = 2;
+
+ // Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 3;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
+ // from a previous
+ // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ string page_token = 4;
+}
+
+// The response for
+// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+message ListDatabaseOperationsResponse {
+ // The list of matching database [long-running
+ // operations][google.longrunning.Operation]. Each operation's name will be
+ // prefixed by the database's name. The operation's
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata.
+ repeated google.longrunning.Operation operations = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
+ // call to fetch more of the matching metadata.
+ string next_page_token = 2;
+}
+
+// The request for
+// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+message RestoreDatabaseRequest {
+ // Required. The name of the instance in which to create the
+ // restored database. This instance must be in the same project and
+ // have the same instance configuration as the instance containing
+ // the source backup. Values are of the form
+ // `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Required. The id of the database to create and restore to. This
+ // database must not already exist. The `database_id` appended to
+ // `parent` forms the full database name of the form
+ // `projects//instances//databases/`.
+ string database_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The source from which to restore.
+ oneof source {
+ // Name of the backup from which to restore. Values are of the form
+ // `projects//instances//backups/`.
+ string backup = 3 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Backup"
+ }];
+ }
+
+ // Optional. An encryption configuration describing the encryption type and
+ // key resources in Cloud KMS used to encrypt/decrypt the database to restore
+ // to. If this field is not specified, the restored database will use the same
+ // encryption configuration as the backup by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
+ RestoreDatabaseEncryptionConfig encryption_config = 4
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Encryption configuration for the restored database.
+message RestoreDatabaseEncryptionConfig {
+ // Encryption types for the database to be restored.
+ enum EncryptionType {
+ // Unspecified. Do not use.
+ ENCRYPTION_TYPE_UNSPECIFIED = 0;
+
+ // This is the default option when
+ // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
+ // is not specified.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1;
+
+ // Use Google default encryption.
+ GOOGLE_DEFAULT_ENCRYPTION = 2;
+
+ // Use customer managed encryption. If specified, `kms_key_name` must
+ // must contain a valid Cloud KMS key.
+ CUSTOMER_MANAGED_ENCRYPTION = 3;
+ }
+
+ // Required. The encryption type of the restored database.
+ EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The Cloud KMS key that will be used to encrypt/decrypt the
+ // restored database. This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ string kms_key_name = 2 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
+
+ // Optional. Specifies the KMS configuration for the one or more keys used to
+ // encrypt the database. Values are of the form
+ // `projects//locations//keyRings//cryptoKeys/`.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the database instance configuration. Some examples:
+ // * For single region database instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional database instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For a database instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ repeated string kms_key_names = 3 [
+ (google.api.field_behavior) = OPTIONAL,
+ (google.api.resource_reference) = {
+ type: "cloudkms.googleapis.com/CryptoKey"
+ }
+ ];
+}
+
+// Metadata type for the long-running operation returned by
+// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+message RestoreDatabaseMetadata {
+ // Name of the database being created and restored to.
+ string name = 1 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+
+ // The type of the restore source.
+ RestoreSourceType source_type = 2;
+
+ // Information about the source used to restore the database, as specified by
+ // `source` in
+ // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest].
+ oneof source_info {
+ // Information about the backup used to restore the database.
+ BackupInfo backup_info = 3;
+ }
+
+ // The progress of the
+ // [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
+ // operation.
+ OperationProgress progress = 4;
+
+ // The time at which cancellation of this operation was received.
+ // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ // starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not guaranteed.
+ // Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
+ // `Code.CANCELLED`.
+ google.protobuf.Timestamp cancel_time = 5;
+
+ // If exists, the name of the long-running operation that will be used to
+ // track the post-restore optimization process to optimize the performance of
+ // the restored database, and remove the dependency on the restore source.
+ // The name is of the form
+ // `projects//instances//databases//operations/`
+ // where the is the name of database being created and restored to.
+ // The metadata type of the long-running operation is
+ // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
+ // This long-running operation will be automatically created by the system
+ // after the RestoreDatabase long-running operation completes successfully.
+ // This operation will not be created if the restore was not successful.
+ string optimize_database_operation_name = 6;
+}
+
+// Metadata type for the long-running operation used to track the progress
+// of optimizations performed on a newly restored database. This long-running
+// operation is automatically created by the system after the successful
+// completion of a database restore, and cannot be cancelled.
+message OptimizeRestoredDatabaseMetadata {
+ // Name of the restored database being optimized.
+ string name = 1 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }];
+
+ // The progress of the post-restore optimizations.
+ OperationProgress progress = 2;
+}
+
+// Indicates the type of the restore source.
+enum RestoreSourceType {
+ // No restore associated.
+ TYPE_UNSPECIFIED = 0;
+
+ // A backup was used as the source of the restore.
+ BACKUP = 1;
+}
+
+// A Cloud Spanner database role.
+message DatabaseRole {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/DatabaseRole"
+ pattern: "projects/{project}/instances/{instance}/databases/{database}/databaseRoles/{role}"
+ };
+
+ // Required. The name of the database role. Values are of the form
+ // `projects//instances//databases//databaseRoles/`
+ // where `` is as specified in the `CREATE ROLE` DDL statement.
+ string name = 1 [(google.api.field_behavior) = REQUIRED];
+}
+
+// The request for
+// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+message ListDatabaseRolesRequest {
+ // Required. The database whose roles should be listed.
+ // Values are of the form
+ // `projects//instances//databases/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Number of database roles to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ int32 page_size = 2;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
+ // from a previous
+ // [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
+ string page_token = 3;
+}
+
+// The response for
+// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+message ListDatabaseRolesResponse {
+ // Database roles that matched the request.
+ repeated DatabaseRole database_roles = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
+ // call to fetch more of the matching roles.
+ string next_page_token = 2;
+}
+
+// The request for
+// [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+message AddSplitPointsRequest {
+ // Required. The database on whose tables/indexes split points are to be
+ // added. Values are of the form
+ // `projects//instances//databases/`.
+ string database = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+
+ // Required. The split points to add.
+ repeated SplitPoints split_points = 2
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. A user-supplied tag associated with the split points.
+ // For example, "intital_data_load", "special_event_1".
+ // Defaults to "CloudAddSplitPointsAPI" if not specified.
+ // The length of the tag must not exceed 50 characters,else will be trimmed.
+ // Only valid UTF8 characters are allowed.
+ string initiator = 3 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The response for
+// [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+message AddSplitPointsResponse {}
+
+// The split points of a table/index.
+message SplitPoints {
+ // A split key.
+ message Key {
+ // Required. The column values making up the split key.
+ google.protobuf.ListValue key_parts = 1
+ [(google.api.field_behavior) = REQUIRED];
+ }
+
+ // The table to split.
+ string table = 1;
+
+ // The index to split.
+ // If specified, the `table` field must refer to the index's base table.
+ string index = 2;
+
+ // Required. The list of split keys, i.e., the split boundaries.
+ repeated Key keys = 3 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The expiration timestamp of the split points.
+ // A timestamp in the past means immediate expiration.
+ // The maximum value can be 30 days in the future.
+ // Defaults to 10 days in the future if not specified.
+ google.protobuf.Timestamp expire_time = 5
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Internal request proto, do not use directly.
+message InternalUpdateGraphOperationRequest {
+ // Internal field, do not use directly.
+ string database = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Database"
+ }
+ ];
+ // Internal field, do not use directly.
+ string operation_id = 2 [(google.api.field_behavior) = REQUIRED];
+ // Internal field, do not use directly.
+ string vm_identity_token = 5 [(google.api.field_behavior) = REQUIRED];
+ // Internal field, do not use directly.
+ double progress = 3 [(google.api.field_behavior) = OPTIONAL];
+ // Internal field, do not use directly.
+ google.rpc.Status status = 6 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Internal response proto, do not use directly.
+message InternalUpdateGraphOperationResponse {}
diff --git a/handwritten/spanner/protos/google/spanner/admin/instance/v1/common.proto b/handwritten/spanner/protos/google/spanner/admin/instance/v1/common.proto
new file mode 100644
index 00000000000..0b5282c7d87
--- /dev/null
+++ b/handwritten/spanner/protos/google/spanner/admin/instance/v1/common.proto
@@ -0,0 +1,64 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.instance.v1;
+
+import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
+import "google/protobuf/timestamp.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1";
+option go_package = "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb;instancepb";
+option java_multiple_files = true;
+option java_outer_classname = "CommonProto";
+option java_package = "com.google.spanner.admin.instance.v1";
+option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1";
+option ruby_package = "Google::Cloud::Spanner::Admin::Instance::V1";
+
+// Encapsulates progress related information for a Cloud Spanner long
+// running instance operations.
+message OperationProgress {
+ // Percent completion of the operation.
+ // Values are between 0 and 100 inclusive.
+ int32 progress_percent = 1;
+
+ // Time the request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // If set, the time at which this operation failed or was completed
+ // successfully.
+ google.protobuf.Timestamp end_time = 3;
+}
+
+// Indicates the expected fulfillment period of an operation.
+enum FulfillmentPeriod {
+ // Not specified.
+ FULFILLMENT_PERIOD_UNSPECIFIED = 0;
+
+ // Normal fulfillment period. The operation is expected to complete within
+ // minutes.
+ FULFILLMENT_PERIOD_NORMAL = 1;
+
+ // Extended fulfillment period. It can take up to an hour for the operation
+ // to complete.
+ FULFILLMENT_PERIOD_EXTENDED = 2;
+}
+
+// ReplicaSelection identifies replicas with common properties.
+message ReplicaSelection {
+ // Required. Name of the location of the replicas (e.g., "us-central1").
+ string location = 1 [(google.api.field_behavior) = REQUIRED];
+}
diff --git a/handwritten/spanner/protos/google/spanner/admin/instance/v1/spanner_instance_admin.proto b/handwritten/spanner/protos/google/spanner/admin/instance/v1/spanner_instance_admin.proto
new file mode 100644
index 00000000000..d16ab2ca583
--- /dev/null
+++ b/handwritten/spanner/protos/google/spanner/admin/instance/v1/spanner_instance_admin.proto
@@ -0,0 +1,2184 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.spanner.admin.instance.v1;
+
+import "google/api/annotations.proto";
+import "google/api/client.proto";
+import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
+import "google/iam/v1/iam_policy.proto";
+import "google/iam/v1/policy.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/spanner/admin/instance/v1/common.proto";
+
+option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1";
+option go_package = "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb;instancepb";
+option java_multiple_files = true;
+option java_outer_classname = "SpannerInstanceAdminProto";
+option java_package = "com.google.spanner.admin.instance.v1";
+option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1";
+option ruby_package = "Google::Cloud::Spanner::Admin::Instance::V1";
+
+// Cloud Spanner Instance Admin API
+//
+// The Cloud Spanner Instance Admin API can be used to create, delete,
+// modify and list instances. Instances are dedicated Cloud Spanner serving
+// and storage resources to be used by Cloud Spanner databases.
+//
+// Each instance has a "configuration", which dictates where the
+// serving resources for the Cloud Spanner instance are located (e.g.,
+// US-central, Europe). Configurations are created by Google based on
+// resource availability.
+//
+// Cloud Spanner billing is based on the instances that exist and their
+// sizes. After an instance exists, there are no additional
+// per-database or per-operation charges for use of the instance
+// (though there may be additional network bandwidth charges).
+// Instances offer isolation: problems with databases in one instance
+// will not affect other instances. However, within an instance
+// databases can affect each other. For example, if one database in an
+// instance receives a lot of requests and consumes most of the
+// instance resources, fewer resources are available for other
+// databases in that instance, and their performance may suffer.
+service InstanceAdmin {
+ option (google.api.default_host) = "spanner.googleapis.com";
+ option (google.api.oauth_scopes) =
+ "https://www.googleapis.com/auth/cloud-platform,"
+ "https://www.googleapis.com/auth/spanner.admin";
+
+ // Lists the supported instance configurations for a given project.
+ //
+ // Returns both Google-managed configurations and user-managed
+ // configurations.
+ rpc ListInstanceConfigs(ListInstanceConfigsRequest)
+ returns (ListInstanceConfigsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*}/instanceConfigs"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Gets information about a particular instance configuration.
+ rpc GetInstanceConfig(GetInstanceConfigRequest) returns (InstanceConfig) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/instanceConfigs/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Creates an instance configuration and begins preparing it to be used. The
+ // returned long-running operation
+ // can be used to track the progress of preparing the new
+ // instance configuration. The instance configuration name is assigned by the
+ // caller. If the named instance configuration already exists,
+ // `CreateInstanceConfig` returns `ALREADY_EXISTS`.
+ //
+ // Immediately after the request returns:
+ //
+ // * The instance configuration is readable via the API, with all requested
+ // attributes. The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field is set to true. Its state is `CREATING`.
+ //
+ // While the operation is pending:
+ //
+ // * Cancelling the operation renders the instance configuration immediately
+ // unreadable via the API.
+ // * Except for deleting the creating resource, all other attempts to modify
+ // the instance configuration are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Instances can be created using the instance configuration.
+ // * The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field becomes false. Its state becomes `READY`.
+ //
+ // The returned long-running operation will
+ // have a name of the format
+ // `/operations/` and can be used to track
+ // creation of the instance configuration. The
+ // metadata field type is
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ // The response field type is
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
+ // successful.
+ //
+ // Authorization requires `spanner.instanceConfigs.create` permission on
+ // the resource
+ // [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
+ rpc CreateInstanceConfig(CreateInstanceConfigRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*}/instanceConfigs"
+ body: "*"
+ };
+ option (google.api.method_signature) =
+ "parent,instance_config,instance_config_id";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.InstanceConfig"
+ metadata_type: "google.spanner.admin.instance.v1.CreateInstanceConfigMetadata"
+ };
+ }
+
+ // Updates an instance configuration. The returned
+ // long-running operation can be used to track
+ // the progress of updating the instance. If the named instance configuration
+ // does not exist, returns `NOT_FOUND`.
+ //
+ // Only user-managed configurations can be updated.
+ //
+ // Immediately after the request returns:
+ //
+ // * The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field is set to true.
+ //
+ // While the operation is pending:
+ //
+ // * Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
+ // The operation is guaranteed to succeed at undoing all changes, after
+ // which point it terminates with a `CANCELLED` status.
+ // * All other attempts to modify the instance configuration are rejected.
+ // * Reading the instance configuration via the API continues to give the
+ // pre-request values.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Creating instances using the instance configuration uses the new
+ // values.
+ // * The new values of the instance configuration are readable via the API.
+ // * The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field becomes false.
+ //
+ // The returned long-running operation will
+ // have a name of the format
+ // `/operations/` and can be used to track
+ // the instance configuration modification. The
+ // metadata field type is
+ // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
+ // The response field type is
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
+ // successful.
+ //
+ // Authorization requires `spanner.instanceConfigs.update` permission on
+ // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
+ rpc UpdateInstanceConfig(UpdateInstanceConfigRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ patch: "/v1/{instance_config.name=projects/*/instanceConfigs/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "instance_config,update_mask";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.InstanceConfig"
+ metadata_type: "google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata"
+ };
+ }
+
+ // Deletes the instance configuration. Deletion is only allowed when no
+ // instances are using the configuration. If any instances are using
+ // the configuration, returns `FAILED_PRECONDITION`.
+ //
+ // Only user-managed configurations can be deleted.
+ //
+ // Authorization requires `spanner.instanceConfigs.delete` permission on
+ // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
+ rpc DeleteInstanceConfig(DeleteInstanceConfigRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/instanceConfigs/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Lists the user-managed instance configuration long-running
+ // operations in the given project. An instance
+ // configuration operation has a name of the form
+ // `projects//instanceConfigs//operations/`.
+ // The long-running operation
+ // metadata field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.start_time` in descending order starting
+ // from the most recently started operation.
+ rpc ListInstanceConfigOperations(ListInstanceConfigOperationsRequest)
+ returns (ListInstanceConfigOperationsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*}/instanceConfigOperations"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Lists all instances in the given project.
+ rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*}/instances"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Lists all instance partitions for the given instance.
+ rpc ListInstancePartitions(ListInstancePartitionsRequest)
+ returns (ListInstancePartitionsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*}/instancePartitions"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Gets information about a particular instance.
+ rpc GetInstance(GetInstanceRequest) returns (Instance) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/instances/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Creates an instance and begins preparing it to begin serving. The
+ // returned long-running operation
+ // can be used to track the progress of preparing the new
+ // instance. The instance name is assigned by the caller. If the
+ // named instance already exists, `CreateInstance` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // * The instance is readable via the API, with all requested attributes
+ // but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // * Cancelling the operation renders the instance immediately unreadable
+ // via the API.
+ // * The instance can be deleted.
+ // * All other attempts to modify the instance are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // * Databases can be created in the instance.
+ // * The instance's allocated resource levels are readable via the API.
+ // * The instance's state becomes `READY`.
+ //
+ // The returned long-running operation will
+ // have a name of the format `/operations/` and
+ // can be used to track creation of the instance. The
+ // metadata field type is
+ // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
+ // The response field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ rpc CreateInstance(CreateInstanceRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*}/instances"
+ body: "*"
+ };
+ option (google.api.method_signature) = "parent,instance_id,instance";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.Instance"
+ metadata_type: "google.spanner.admin.instance.v1.CreateInstanceMetadata"
+ };
+ }
+
+ // Updates an instance, and begins allocating or releasing resources
+ // as requested. The returned long-running operation can be used to track the
+ // progress of updating the instance. If the named instance does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // * For resource types for which a decrease in the instance's allocation
+ // has been requested, billing is based on the newly-requested level.
+ //
+ // Until completion of the returned operation:
+ //
+ // * Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
+ // and begins restoring resources to their pre-request values. The
+ // operation is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // * All other attempts to modify the instance are rejected.
+ // * Reading the instance via the API continues to give the pre-request
+ // resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // * All newly-reserved resources are available for serving the instance's
+ // tables.
+ // * The instance's new resource levels are readable via the API.
+ //
+ // The returned long-running operation will
+ // have a name of the format `/operations/` and
+ // can be used to track the instance modification. The
+ // metadata field type is
+ // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
+ // The response field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ //
+ // Authorization requires `spanner.instances.update` permission on
+ // the resource [name][google.spanner.admin.instance.v1.Instance.name].
+ rpc UpdateInstance(UpdateInstanceRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ patch: "/v1/{instance.name=projects/*/instances/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "instance,field_mask";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.Instance"
+ metadata_type: "google.spanner.admin.instance.v1.UpdateInstanceMetadata"
+ };
+ }
+
+ // Deletes an instance.
+ //
+ // Immediately upon completion of the request:
+ //
+ // * Billing ceases for all of the instance's reserved resources.
+ //
+ // Soon afterward:
+ //
+ // * The instance and *all of its databases* immediately and
+ // irrevocably disappear from the API. All data in the databases
+ // is permanently deleted.
+ rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/instances/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Sets the access control policy on an instance resource. Replaces any
+ // existing policy.
+ //
+ // Authorization requires `spanner.instances.setIamPolicy` on
+ // [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest)
+ returns (google.iam.v1.Policy) {
+ option (google.api.http) = {
+ post: "/v1/{resource=projects/*/instances/*}:setIamPolicy"
+ body: "*"
+ };
+ option (google.api.method_signature) = "resource,policy";
+ }
+
+ // Gets the access control policy for an instance resource. Returns an empty
+ // policy if an instance exists but does not have a policy set.
+ //
+ // Authorization requires `spanner.instances.getIamPolicy` on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest)
+ returns (google.iam.v1.Policy) {
+ option (google.api.http) = {
+ post: "/v1/{resource=projects/*/instances/*}:getIamPolicy"
+ body: "*"
+ };
+ option (google.api.method_signature) = "resource";
+ }
+
+ // Returns permissions that the caller has on the specified instance resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner instance resource will
+ // result in a NOT_FOUND error if the user has `spanner.instances.list`
+ // permission on the containing Google Cloud Project. Otherwise returns an
+ // empty set of permissions.
+ rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest)
+ returns (google.iam.v1.TestIamPermissionsResponse) {
+ option (google.api.http) = {
+ post: "/v1/{resource=projects/*/instances/*}:testIamPermissions"
+ body: "*"
+ };
+ option (google.api.method_signature) = "resource,permissions";
+ }
+
+ // Gets information about a particular instance partition.
+ rpc GetInstancePartition(GetInstancePartitionRequest)
+ returns (InstancePartition) {
+ option (google.api.http) = {
+ get: "/v1/{name=projects/*/instances/*/instancePartitions/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Creates an instance partition and begins preparing it to be used. The
+ // returned long-running operation
+ // can be used to track the progress of preparing the new instance partition.
+ // The instance partition name is assigned by the caller. If the named
+ // instance partition already exists, `CreateInstancePartition` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // * The instance partition is readable via the API, with all requested
+ // attributes but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // * Cancelling the operation renders the instance partition immediately
+ // unreadable via the API.
+ // * The instance partition can be deleted.
+ // * All other attempts to modify the instance partition are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // * Databases can start using this instance partition.
+ // * The instance partition's allocated resource levels are readable via the
+ // API.
+ // * The instance partition's state becomes `READY`.
+ //
+ // The returned long-running operation will
+ // have a name of the format
+ // `/operations/` and can be used to
+ // track creation of the instance partition. The
+ // metadata field type is
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
+ // The response field type is
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
+ // successful.
+ rpc CreateInstancePartition(CreateInstancePartitionRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{parent=projects/*/instances/*}/instancePartitions"
+ body: "*"
+ };
+ option (google.api.method_signature) =
+ "parent,instance_partition,instance_partition_id";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.InstancePartition"
+ metadata_type: "google.spanner.admin.instance.v1.CreateInstancePartitionMetadata"
+ };
+ }
+
+ // Deletes an existing instance partition. Requires that the
+ // instance partition is not used by any database or backup and is not the
+ // default instance partition of an instance.
+ //
+ // Authorization requires `spanner.instancePartitions.delete` permission on
+ // the resource
+ // [name][google.spanner.admin.instance.v1.InstancePartition.name].
+ rpc DeleteInstancePartition(DeleteInstancePartitionRequest)
+ returns (google.protobuf.Empty) {
+ option (google.api.http) = {
+ delete: "/v1/{name=projects/*/instances/*/instancePartitions/*}"
+ };
+ option (google.api.method_signature) = "name";
+ }
+
+ // Updates an instance partition, and begins allocating or releasing resources
+ // as requested. The returned long-running operation can be used to track the
+ // progress of updating the instance partition. If the named instance
+ // partition does not exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // * For resource types for which a decrease in the instance partition's
+ // allocation has been requested, billing is based on the newly-requested
+ // level.
+ //
+ // Until completion of the returned operation:
+ //
+ // * Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
+ // and begins restoring resources to their pre-request values. The
+ // operation is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // * All other attempts to modify the instance partition are rejected.
+ // * Reading the instance partition via the API continues to give the
+ // pre-request resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // * Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // * All newly-reserved resources are available for serving the instance
+ // partition's tables.
+ // * The instance partition's new resource levels are readable via the API.
+ //
+ // The returned long-running operation will
+ // have a name of the format
+ // `/operations/` and can be used to
+ // track the instance partition modification. The
+ // metadata field type is
+ // [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
+ // The response field type is
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
+ // successful.
+ //
+ // Authorization requires `spanner.instancePartitions.update` permission on
+ // the resource
+ // [name][google.spanner.admin.instance.v1.InstancePartition.name].
+ rpc UpdateInstancePartition(UpdateInstancePartitionRequest)
+ returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ patch: "/v1/{instance_partition.name=projects/*/instances/*/instancePartitions/*}"
+ body: "*"
+ };
+ option (google.api.method_signature) = "instance_partition,field_mask";
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.InstancePartition"
+ metadata_type: "google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata"
+ };
+ }
+
+ // Lists instance partition long-running operations in the given instance.
+ // An instance partition operation has a name of the form
+ // `projects//instances//instancePartitions//operations/`.
+ // The long-running operation
+ // metadata field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.start_time` in descending order starting from the
+ // most recently started operation.
+ //
+ // Authorization requires `spanner.instancePartitionOperations.list`
+ // permission on the resource
+ // [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
+ rpc ListInstancePartitionOperations(ListInstancePartitionOperationsRequest)
+ returns (ListInstancePartitionOperationsResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/instances/*}/instancePartitionOperations"
+ };
+ option (google.api.method_signature) = "parent";
+ }
+
+ // Moves an instance to the target instance configuration. You can use the
+ // returned long-running operation to track
+ // the progress of moving the instance.
+ //
+ // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
+ // the following criteria:
+ //
+ // * Is undergoing a move to a different instance configuration
+ // * Has backups
+ // * Has an ongoing update
+ // * Contains any CMEK-enabled databases
+ // * Is a free trial instance
+ //
+ // While the operation is pending:
+ //
+ // * All other attempts to modify the instance, including changes to its
+ // compute capacity, are rejected.
+ // * The following database and backup admin operations are rejected:
+ //
+ // * `DatabaseAdmin.CreateDatabase`
+ // * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
+ // specified in the request.)
+ // * `DatabaseAdmin.RestoreDatabase`
+ // * `DatabaseAdmin.CreateBackup`
+ // * `DatabaseAdmin.CopyBackup`
+ //
+ // * Both the source and target instance configurations are subject to
+ // hourly compute and storage charges.
+ // * The instance might experience higher read-write latencies and a higher
+ // transaction abort rate. However, moving an instance doesn't cause any
+ // downtime.
+ //
+ // The returned long-running operation has
+ // a name of the format
+ // `/operations/` and can be used to track
+ // the move instance operation. The
+ // metadata field type is
+ // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
+ // The response field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance],
+ // if successful.
+ // Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
+ // Cancellation is not immediate because it involves moving any data
+ // previously moved to the target instance configuration back to the original
+ // instance configuration. You can use this operation to track the progress of
+ // the cancellation. Upon successful completion of the cancellation, the
+ // operation terminates with `CANCELLED` status.
+ //
+ // If not cancelled, upon completion of the returned operation:
+ //
+ // * The instance successfully moves to the target instance
+ // configuration.
+ // * You are billed for compute and storage in target instance
+ // configuration.
+ //
+ // Authorization requires the `spanner.instances.update` permission on
+ // the resource [instance][google.spanner.admin.instance.v1.Instance].
+ //
+ // For more details, see
+ // [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
+ rpc MoveInstance(MoveInstanceRequest) returns (google.longrunning.Operation) {
+ option (google.api.http) = {
+ post: "/v1/{name=projects/*/instances/*}:move"
+ body: "*"
+ };
+ option (google.longrunning.operation_info) = {
+ response_type: "google.spanner.admin.instance.v1.MoveInstanceResponse"
+ metadata_type: "google.spanner.admin.instance.v1.MoveInstanceMetadata"
+ };
+ }
+}
+
+message ReplicaInfo {
+ // Indicates the type of replica. See the [replica types
+ // documentation](https://cloud.google.com/spanner/docs/replication#replica_types)
+ // for more details.
+ enum ReplicaType {
+ // Not specified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Read-write replicas support both reads and writes. These replicas:
+ //
+ // * Maintain a full copy of your data.
+ // * Serve reads.
+ // * Can vote whether to commit a write.
+ // * Participate in leadership election.
+ // * Are eligible to become a leader.
+ READ_WRITE = 1;
+
+ // Read-only replicas only support reads (not writes). Read-only replicas:
+ //
+ // * Maintain a full copy of your data.
+ // * Serve reads.
+ // * Do not participate in voting to commit writes.
+ // * Are not eligible to become a leader.
+ READ_ONLY = 2;
+
+ // Witness replicas don't support reads but do participate in voting to
+ // commit writes. Witness replicas:
+ //
+ // * Do not maintain a full copy of data.
+ // * Do not serve reads.
+ // * Vote whether to commit writes.
+ // * Participate in leader election but are not eligible to become leader.
+ WITNESS = 3;
+ }
+
+ // The location of the serving resources, e.g., "us-central1".
+ string location = 1;
+
+ // The type of replica.
+ ReplicaType type = 2;
+
+ // If true, this location is designated as the default leader location where
+ // leader replicas are placed. See the [region types
+ // documentation](https://cloud.google.com/spanner/docs/instances#region_types)
+ // for more details.
+ bool default_leader_location = 3;
+}
+
+// A possible configuration for a Cloud Spanner instance. Configurations
+// define the geographic placement of nodes and their replication.
+message InstanceConfig {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ pattern: "projects/{project}/instanceConfigs/{instance_config}"
+ plural: "instanceConfigs"
+ singular: "instanceConfig"
+ };
+
+ // The type of this configuration.
+ enum Type {
+ // Unspecified.
+ TYPE_UNSPECIFIED = 0;
+
+ // Google-managed configuration.
+ GOOGLE_MANAGED = 1;
+
+ // User-managed configuration.
+ USER_MANAGED = 2;
+ }
+
+ // Indicates the current state of the instance configuration.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The instance configuration is still being created.
+ CREATING = 1;
+
+ // The instance configuration is fully created and ready to be used to
+ // create instances.
+ READY = 2;
+ }
+
+ // Describes the availability for free instances to be created in an instance
+ // configuration.
+ enum FreeInstanceAvailability {
+ // Not specified.
+ FREE_INSTANCE_AVAILABILITY_UNSPECIFIED = 0;
+
+ // Indicates that free instances are available to be created in this
+ // instance configuration.
+ AVAILABLE = 1;
+
+ // Indicates that free instances are not supported in this instance
+ // configuration.
+ UNSUPPORTED = 2;
+
+ // Indicates that free instances are currently not available to be created
+ // in this instance configuration.
+ DISABLED = 3;
+
+ // Indicates that additional free instances cannot be created in this
+ // instance configuration because the project has reached its limit of free
+ // instances.
+ QUOTA_EXCEEDED = 4;
+ }
+
+ // Indicates the quorum type of this instance configuration.
+ enum QuorumType {
+ // Quorum type not specified.
+ QUORUM_TYPE_UNSPECIFIED = 0;
+
+ // An instance configuration tagged with `REGION` quorum type forms a write
+ // quorum in a single region.
+ REGION = 1;
+
+ // An instance configuration tagged with the `DUAL_REGION` quorum type forms
+ // a write quorum with exactly two read-write regions in a multi-region
+ // configuration.
+ //
+ // This instance configuration requires failover in the event of
+ // regional failures.
+ DUAL_REGION = 2;
+
+ // An instance configuration tagged with the `MULTI_REGION` quorum type
+ // forms a write quorum from replicas that are spread across more than one
+ // region in a multi-region configuration.
+ MULTI_REGION = 3;
+ }
+
+ // A unique identifier for the instance configuration. Values
+ // are of the form
+ // `projects//instanceConfigs/[a-z][-a-z0-9]*`.
+ //
+ // User instance configuration must start with `custom-`.
+ string name = 1;
+
+ // The name of this instance configuration as it appears in UIs.
+ string display_name = 2;
+
+ // Output only. Whether this instance configuration is a Google-managed or
+ // user-managed configuration.
+ Type config_type = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // The geographic placement of nodes in this instance configuration and their
+ // replication properties.
+ //
+ // To create user-managed configurations, input
+ // `replicas` must include all replicas in `replicas` of the `base_config`
+ // and include one or more replicas in the `optional_replicas` of the
+ // `base_config`.
+ repeated ReplicaInfo replicas = 3;
+
+ // Output only. The available optional replicas to choose from for
+ // user-managed configurations. Populated for Google-managed configurations.
+ repeated ReplicaInfo optional_replicas = 6
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Base configuration name, e.g. projects//instanceConfigs/nam3,
+ // based on which this configuration is created. Only set for user-managed
+ // configurations. `base_config` must refer to a configuration of type
+ // `GOOGLE_MANAGED` in the same project as this configuration.
+ string base_config = 7 [(google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ }];
+
+ // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
+ // resources into groups that reflect a customer's organizational needs and
+ // deployment strategies. Cloud Labels can be used to filter collections of
+ // resources. They can be used to control how resource metrics are aggregated.
+ // And they can be used as arguments to policy management rules (e.g. route,
+ // firewall, load balancing, etc.).
+ //
+ // * Label keys must be between 1 and 63 characters long and must conform to
+ // the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
+ // * Label values must be between 0 and 63 characters long and must conform
+ // to the regular expression `[a-z0-9_-]{0,63}`.
+ // * No more than 64 labels can be associated with a given resource.
+ //
+ // See https://goo.gl/xmQnxf for more information on and examples of labels.
+ //
+ // If you plan to use labels in your own code, please note that additional
+ // characters may be allowed in the future. Therefore, you are advised to use
+ // an internal label representation, such as JSON, which doesn't rely upon
+ // specific characters being disallowed. For example, representing labels
+ // as the string: name + "_" + value would prove problematic if we were to
+ // allow "_" in a future release.
+ map labels = 8;
+
+ // etag is used for optimistic concurrency control as a way
+ // to help prevent simultaneous updates of a instance configuration from
+ // overwriting each other. It is strongly suggested that systems make use of
+ // the etag in the read-modify-write cycle to perform instance configuration
+ // updates in order to avoid race conditions: An etag is returned in the
+ // response which contains instance configurations, and systems are expected
+ // to put that etag in the request to update instance configuration to ensure
+ // that their change is applied to the same version of the instance
+ // configuration. If no etag is provided in the call to update the instance
+ // configuration, then the existing instance configuration is overwritten
+ // blindly.
+ string etag = 9;
+
+ // Allowed values of the "default_leader" schema option for databases in
+ // instances that use this instance configuration.
+ repeated string leader_options = 4;
+
+ // Output only. If true, the instance configuration is being created or
+ // updated. If false, there are no ongoing operations for the instance
+ // configuration.
+ bool reconciling = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The current instance configuration state. Applicable only for
+ // `USER_MANAGED` configurations.
+ State state = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Describes whether free instances are available to be created
+ // in this instance configuration.
+ FreeInstanceAvailability free_instance_availability = 12
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The `QuorumType` of the instance configuration.
+ QuorumType quorum_type = 18 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The storage limit in bytes per processing unit.
+ int64 storage_limit_per_processing_unit = 19
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+}
+
+// ReplicaComputeCapacity describes the amount of server resources that are
+// allocated to each replica identified by the replica selection.
+message ReplicaComputeCapacity {
+ // Required. Identifies replicas by specified properties.
+ // All replicas in the selection have the same amount of compute capacity.
+ ReplicaSelection replica_selection = 1
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Compute capacity allocated to each replica identified by the specified
+ // selection.
+ // The unit is selected based on the unit used to specify the instance size
+ // for non-autoscaling instances, or the unit used in autoscaling limit for
+ // autoscaling instances.
+ oneof compute_capacity {
+ // The number of nodes allocated to each replica.
+ //
+ // This may be zero in API responses for instances that are not yet in
+ // state `READY`.
+ int32 node_count = 2;
+
+ // The number of processing units allocated to each replica.
+ //
+ // This may be zero in API responses for instances that are not yet in
+ // state `READY`.
+ int32 processing_units = 3;
+ }
+}
+
+// Autoscaling configuration for an instance.
+message AutoscalingConfig {
+ // The autoscaling limits for the instance. Users can define the minimum and
+ // maximum compute capacity allocated to the instance, and the autoscaler will
+ // only scale within that range. Users can either use nodes or processing
+ // units to specify the limits, but should use the same unit to set both the
+ // min_limit and max_limit.
+ message AutoscalingLimits {
+ // The minimum compute capacity for the instance.
+ oneof min_limit {
+ // Minimum number of nodes allocated to the instance. If set, this number
+ // should be greater than or equal to 1.
+ int32 min_nodes = 1;
+
+ // Minimum number of processing units allocated to the instance. If set,
+ // this number should be multiples of 1000.
+ int32 min_processing_units = 2;
+ }
+
+ // The maximum compute capacity for the instance. The maximum compute
+ // capacity should be less than or equal to 10X the minimum compute
+ // capacity.
+ oneof max_limit {
+ // Maximum number of nodes allocated to the instance. If set, this number
+ // should be greater than or equal to min_nodes.
+ int32 max_nodes = 3;
+
+ // Maximum number of processing units allocated to the instance. If set,
+ // this number should be multiples of 1000 and be greater than or equal to
+ // min_processing_units.
+ int32 max_processing_units = 4;
+ }
+ }
+
+ // The autoscaling targets for an instance.
+ message AutoscalingTargets {
+ // Optional. The target high priority cpu utilization percentage that the
+ // autoscaler should be trying to achieve for the instance. This number is
+ // on a scale from 0 (no utilization) to 100 (full utilization). The valid
+ // range is [10, 90] inclusive. If not specified or set to 0, the autoscaler
+ // skips scaling based on high priority CPU utilization.
+ int32 high_priority_cpu_utilization_percent = 1
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The target total CPU utilization percentage that the autoscaler
+ // should be trying to achieve for the instance. This number is on a scale
+ // from 0 (no utilization) to 100 (full utilization). The valid range is
+ // [10, 90] inclusive. If not specified or set to 0, the autoscaler skips
+ // scaling based on total CPU utilization. If both
+ // `high_priority_cpu_utilization_percent` and
+ // `total_cpu_utilization_percent` are specified, the autoscaler provisions
+ // the larger of the two required compute capacities to satisfy both
+ // targets.
+ int32 total_cpu_utilization_percent = 4
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Required. The target storage utilization percentage that the autoscaler
+ // should be trying to achieve for the instance. This number is on a scale
+ // from 0 (no utilization) to 100 (full utilization). The valid range is
+ // [10, 99] inclusive.
+ int32 storage_utilization_percent = 2
+ [(google.api.field_behavior) = REQUIRED];
+ }
+
+ // AsymmetricAutoscalingOption specifies the scaling of replicas identified by
+ // the given selection.
+ message AsymmetricAutoscalingOption {
+ // Overrides the top-level autoscaling configuration for the replicas
+ // identified by `replica_selection`. All fields in this message are
+ // optional. Any unspecified fields will use the corresponding values from
+ // the top-level autoscaling configuration.
+ message AutoscalingConfigOverrides {
+ // Optional. If specified, overrides the min/max limit in the top-level
+ // autoscaling configuration for the selected replicas.
+ AutoscalingLimits autoscaling_limits = 1
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. If specified, overrides the autoscaling target
+ // high_priority_cpu_utilization_percent in the top-level autoscaling
+ // configuration for the selected replicas.
+ int32 autoscaling_target_high_priority_cpu_utilization_percent = 2
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. If specified, overrides the
+ // autoscaling target `total_cpu_utilization_percent`
+ // in the top-level autoscaling configuration for the selected replicas.
+ int32 autoscaling_target_total_cpu_utilization_percent = 4
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. If true, disables high priority CPU autoscaling for the
+ // selected replicas and ignores
+ // [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
+ // in the top-level autoscaling configuration.
+ //
+ // When setting this field to true, setting
+ // [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
+ // field to a non-zero value for the same replica is not supported.
+ //
+ // If false, the
+ // [autoscaling_target_high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_high_priority_cpu_utilization_percent]
+ // field in the replica will be used if set to a non-zero value.
+ // Otherwise, the
+ // [high_priority_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.high_priority_cpu_utilization_percent]
+ // field in the top-level autoscaling configuration will be used.
+ //
+ // Setting both
+ // [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
+ // and
+ // [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
+ // to true for the same replica is not supported.
+ bool disable_high_priority_cpu_autoscaling = 5
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. If true, disables total CPU autoscaling for the selected
+ // replicas and ignores
+ // [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
+ // in the top-level autoscaling configuration.
+ //
+ // When setting this field to true, setting
+ // [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
+ // field to a non-zero value for the same replica is not supported.
+ //
+ // If false, the
+ // [autoscaling_target_total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.autoscaling_target_total_cpu_utilization_percent]
+ // field in the replica will be used if set to a non-zero value.
+ // Otherwise, the
+ // [total_cpu_utilization_percent][google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets.total_cpu_utilization_percent]
+ // field in the top-level autoscaling configuration will be used.
+ //
+ // Setting both
+ // [disable_high_priority_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_high_priority_cpu_autoscaling]
+ // and
+ // [disable_total_cpu_autoscaling][google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.disable_total_cpu_autoscaling]
+ // to true for the same replica is not supported.
+ bool disable_total_cpu_autoscaling = 6
+ [(google.api.field_behavior) = OPTIONAL];
+ }
+
+ // Required. Selects the replicas to which this AsymmetricAutoscalingOption
+ // applies. Only read-only replicas are supported.
+ ReplicaSelection replica_selection = 1
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. Overrides applied to the top-level autoscaling configuration
+ // for the selected replicas.
+ AutoscalingConfigOverrides overrides = 2
+ [(google.api.field_behavior) = OPTIONAL];
+ }
+
+ // Required. Autoscaling limits for an instance.
+ AutoscalingLimits autoscaling_limits = 1
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The autoscaling targets for an instance.
+ AutoscalingTargets autoscaling_targets = 2
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. Optional asymmetric autoscaling options.
+ // Replicas matching the replica selection criteria will be autoscaled
+ // independently from other replicas. The autoscaler will scale the replicas
+ // based on the utilization of replicas identified by the replica selection.
+ // Replica selections should not overlap with each other.
+ //
+ // Other replicas (those do not match any replica selection) will be
+ // autoscaled together and will have the same compute capacity allocated to
+ // them.
+ repeated AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// An isolated set of Cloud Spanner resources on which databases can be hosted.
+message Instance {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/Instance"
+ pattern: "projects/{project}/instances/{instance}"
+ plural: "instances"
+ singular: "instance"
+ };
+
+ // Indicates the current state of the instance.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The instance is still being created. Resources may not be
+ // available yet, and operations such as database creation may not
+ // work.
+ CREATING = 1;
+
+ // The instance is fully created and ready to do work such as
+ // creating databases.
+ READY = 2;
+ }
+
+ // The type of this instance. The type can be used to distinguish product
+ // variants, that can affect aspects like: usage restrictions, quotas and
+ // billing. Currently this is used to distinguish FREE_INSTANCE vs PROVISIONED
+ // instances.
+ enum InstanceType {
+ // Not specified.
+ INSTANCE_TYPE_UNSPECIFIED = 0;
+
+ // Provisioned instances have dedicated resources, standard usage limits and
+ // support.
+ PROVISIONED = 1;
+
+ // Free instances provide no guarantee for dedicated resources,
+ // [node_count, processing_units] should be 0. They come
+ // with stricter usage limits and limited support.
+ FREE_INSTANCE = 2;
+ }
+
+ // The edition selected for this instance. Different editions provide
+ // different capabilities at different price points.
+ enum Edition {
+ // Edition not specified.
+ EDITION_UNSPECIFIED = 0;
+
+ // Standard edition.
+ STANDARD = 1;
+
+ // Enterprise edition.
+ ENTERPRISE = 2;
+
+ // Enterprise Plus edition.
+ ENTERPRISE_PLUS = 3;
+ }
+
+ // Indicates the
+ // [default backup
+ // schedule](https://cloud.google.com/spanner/docs/backup#default-backup-schedules)
+ // behavior for new databases within the instance.
+ enum DefaultBackupScheduleType {
+ // Not specified.
+ DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED = 0;
+
+ // A default backup schedule isn't created automatically when a new database
+ // is created in the instance.
+ NONE = 1;
+
+ // A default backup schedule is created automatically when a new database
+ // is created in the instance. The default backup schedule creates a full
+ // backup every 24 hours. These full backups are retained for 7 days.
+ // You can edit or delete the default backup schedule once it's created.
+ AUTOMATIC = 2;
+ }
+
+ // Required. A unique identifier for the instance, which cannot be changed
+ // after the instance is created. Values are of the form
+ // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
+ // segment of the name must be between 2 and 64 characters in length.
+ string name = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The name of the instance's configuration. Values are of the form
+ // `projects//instanceConfigs/`. See
+ // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ string config = 2 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ }
+ ];
+
+ // Required. The descriptive name for this instance as it appears in UIs.
+ // Must be unique per project and between 4 and 30 characters in length.
+ string display_name = 3 [(google.api.field_behavior) = REQUIRED];
+
+ // The number of nodes allocated to this instance. At most, one of either
+ // `node_count` or `processing_units` should be present in the message.
+ //
+ // Users can set the `node_count` field to specify the target number of nodes
+ // allocated to the instance.
+ //
+ // If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
+ // field and reflects the current number of nodes allocated to the instance.
+ //
+ // This might be zero in API responses for instances that are not yet in the
+ // `READY` state.
+ //
+ //
+ // For more information, see
+ // [Compute capacity, nodes, and processing
+ // units](https://cloud.google.com/spanner/docs/compute-capacity).
+ int32 node_count = 5;
+
+ // The number of processing units allocated to this instance. At most, one of
+ // either `processing_units` or `node_count` should be present in the message.
+ //
+ // Users can set the `processing_units` field to specify the target number of
+ // processing units allocated to the instance.
+ //
+ // If autoscaling is enabled, `processing_units` is treated as an
+ // `OUTPUT_ONLY` field and reflects the current number of processing units
+ // allocated to the instance.
+ //
+ // This might be zero in API responses for instances that are not yet in the
+ // `READY` state.
+ //
+ //
+ // For more information, see
+ // [Compute capacity, nodes and processing
+ // units](https://cloud.google.com/spanner/docs/compute-capacity).
+ int32 processing_units = 9;
+
+ // Output only. Lists the compute capacity per ReplicaSelection. A replica
+ // selection identifies a set of replicas with common properties. Replicas
+ // identified by a ReplicaSelection are scaled with the same compute capacity.
+ repeated ReplicaComputeCapacity replica_compute_capacity = 19
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Optional. The autoscaling configuration. Autoscaling is enabled if this
+ // field is set. When autoscaling is enabled, node_count and processing_units
+ // are treated as OUTPUT_ONLY fields and reflect the current compute capacity
+ // allocated to the instance.
+ AutoscalingConfig autoscaling_config = 17
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Output only. The current instance state. For
+ // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
+ // the state must be either omitted or set to `CREATING`. For
+ // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
+ // the state must be either omitted or set to `READY`.
+ State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
+ // resources into groups that reflect a customer's organizational needs and
+ // deployment strategies. Cloud Labels can be used to filter collections of
+ // resources. They can be used to control how resource metrics are aggregated.
+ // And they can be used as arguments to policy management rules (e.g. route,
+ // firewall, load balancing, etc.).
+ //
+ // * Label keys must be between 1 and 63 characters long and must conform to
+ // the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
+ // * Label values must be between 0 and 63 characters long and must conform
+ // to the regular expression `[a-z0-9_-]{0,63}`.
+ // * No more than 64 labels can be associated with a given resource.
+ //
+ // See https://goo.gl/xmQnxf for more information on and examples of labels.
+ //
+ // If you plan to use labels in your own code, please note that additional
+ // characters may be allowed in the future. And so you are advised to use an
+ // internal label representation, such as JSON, which doesn't rely upon
+ // specific characters being disallowed. For example, representing labels
+ // as the string: name + "_" + value would prove problematic if we were to
+ // allow "_" in a future release.
+ map labels = 7;
+
+ // The `InstanceType` of the current instance.
+ InstanceType instance_type = 10;
+
+ // Deprecated. This field is not populated.
+ repeated string endpoint_uris = 8;
+
+ // Output only. The time at which the instance was created.
+ google.protobuf.Timestamp create_time = 11
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The time at which the instance was most recently updated.
+ google.protobuf.Timestamp update_time = 12
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Free instance metadata. Only populated for free instances.
+ FreeInstanceMetadata free_instance_metadata = 13;
+
+ // Optional. The `Edition` of the current instance.
+ Edition edition = 20 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. Controls the default backup schedule behavior for new databases
+ // within the instance. By default, a backup schedule is created automatically
+ // when a new database is created in a new instance.
+ //
+ // Note that the `AUTOMATIC` value isn't permitted for free instances,
+ // as backups and backup schedules aren't supported for free instances.
+ //
+ // In the `GetInstance` or `ListInstances` response, if the value of
+ // `default_backup_schedule_type` isn't set, or set to `NONE`, Spanner doesn't
+ // create a default backup schedule for new databases in the instance.
+ DefaultBackupScheduleType default_backup_schedule_type = 23
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The request for
+// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+message ListInstanceConfigsRequest {
+ // Required. The name of the project for which a list of supported instance
+ // configurations is requested. Values are of the form
+ // `projects/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "cloudresourcemanager.googleapis.com/Project"
+ }
+ ];
+
+ // Number of instance configurations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 2;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
+ // from a previous
+ // [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
+ string page_token = 3;
+}
+
+// The response for
+// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+message ListInstanceConfigsResponse {
+ // The list of requested instance configurations.
+ repeated InstanceConfig instance_configs = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
+ // call to fetch more of the matching instance configurations.
+ string next_page_token = 2;
+}
+
+// The request for
+// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
+message GetInstanceConfigRequest {
+ // Required. The name of the requested instance configuration. Values are of
+ // the form `projects//instanceConfigs/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ }
+ ];
+}
+
+// The request for
+// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
+message CreateInstanceConfigRequest {
+ // Required. The name of the project in which to create the instance
+ // configuration. Values are of the form `projects/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "cloudresourcemanager.googleapis.com/Project"
+ }
+ ];
+
+ // Required. The ID of the instance configuration to create. Valid identifiers
+ // are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
+ // characters in length. The `custom-` prefix is required to avoid name
+ // conflicts with Google-managed configurations.
+ string instance_config_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The `InstanceConfig` proto of the configuration to create.
+ // `instance_config.name` must be
+ // `/instanceConfigs/`.
+ // `instance_config.base_config` must be a Google-managed configuration name,
+ // e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3.
+ InstanceConfig instance_config = 3 [(google.api.field_behavior) = REQUIRED];
+
+ // An option to validate, but not actually execute, a request,
+ // and provide the same response.
+ bool validate_only = 4;
+}
+
+// The request for
+// [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
+message UpdateInstanceConfigRequest {
+ // Required. The user instance configuration to update, which must always
+ // include the instance configuration name. Otherwise, only fields mentioned
+ // in
+ // [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
+ // need be included. To prevent conflicts of concurrent updates,
+ // [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
+ // be used.
+ InstanceConfig instance_config = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. A mask specifying which fields in
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
+ // updated. The field mask must always be specified; this prevents any future
+ // fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
+ // from being erased accidentally by clients that do not know about them. Only
+ // display_name and labels can be updated.
+ google.protobuf.FieldMask update_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
+
+ // An option to validate, but not actually execute, a request,
+ // and provide the same response.
+ bool validate_only = 3;
+}
+
+// The request for
+// [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig].
+message DeleteInstanceConfigRequest {
+ // Required. The name of the instance configuration to be deleted.
+ // Values are of the form
+ // `projects//instanceConfigs/`
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ }
+ ];
+
+ // Used for optimistic concurrency control as a way to help prevent
+ // simultaneous deletes of an instance configuration from overwriting each
+ // other. If not empty, the API
+ // only deletes the instance configuration when the etag provided matches the
+ // current status of the requested instance configuration. Otherwise, deletes
+ // the instance configuration without checking the current status of the
+ // requested instance configuration.
+ string etag = 2;
+
+ // An option to validate, but not actually execute, a request,
+ // and provide the same response.
+ bool validate_only = 3;
+}
+
+// The request for
+// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
+message ListInstanceConfigOperationsRequest {
+ // Required. The project of the instance configuration operations.
+ // Values are of the form `projects/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "cloudresourcemanager.googleapis.com/Project"
+ }
+ ];
+
+ // An expression that filters the list of returned operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the Operation are eligible for filtering:
+ //
+ // * `name` - The name of the long-running operation
+ // * `done` - False if the operation is in progress, else true.
+ // * `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
+ // * `metadata.` - any field in metadata.value.
+ // `metadata.@type` must be specified first, if filtering on metadata
+ // fields.
+ // * `error` - Error associated with the long-running operation.
+ // * `response.@type` - the type of response.
+ // * `response.` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic. However,
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // * `done:true` - The operation is complete.
+ // * `(metadata.@type=` \
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
+ // AND` \
+ // `(metadata.instance_config.name:custom-config) AND` \
+ // `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Return operations where:
+ // * The operation's metadata type is
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ // * The instance configuration name contains "custom-config".
+ // * The operation started before 2021-03-28T14:50:00Z.
+ // * The operation resulted in an error.
+ string filter = 2;
+
+ // Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 3;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
+ // from a previous
+ // [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ string page_token = 4;
+}
+
+// The response for
+// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
+message ListInstanceConfigOperationsResponse {
+ // The list of matching instance configuration long-running operations. Each
+ // operation's name will be
+ // prefixed by the name of the instance configuration. The operation's
+ // metadata field type
+ // `metadata.type_url` describes the type of the metadata.
+ repeated google.longrunning.Operation operations = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
+ // call to fetch more of the matching metadata.
+ string next_page_token = 2;
+}
+
+// The request for
+// [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
+message GetInstanceRequest {
+ // Required. The name of the requested instance. Values are of the form
+ // `projects//instances/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // If field_mask is present, specifies the subset of
+ // [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
+ // returned. If absent, all
+ // [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
+ google.protobuf.FieldMask field_mask = 2;
+}
+
+// The request for
+// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+message CreateInstanceRequest {
+ // Required. The name of the project in which to create the instance. Values
+ // are of the form `projects/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "cloudresourcemanager.googleapis.com/Project"
+ }
+ ];
+
+ // Required. The ID of the instance to create. Valid identifiers are of the
+ // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
+ // length.
+ string instance_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The instance to create. The name may be omitted, but if
+ // specified must be `/instances/`.
+ Instance instance = 3 [(google.api.field_behavior) = REQUIRED];
+}
+
+// The request for
+// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+message ListInstancesRequest {
+ // Required. The name of the project for which a list of instances is
+ // requested. Values are of the form `projects/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "cloudresourcemanager.googleapis.com/Project"
+ }
+ ];
+
+ // Number of instances to be returned in the response. If 0 or less, defaults
+ // to the server's maximum allowed page size.
+ int32 page_size = 2;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
+ // from a previous
+ // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
+ string page_token = 3;
+
+ // An expression for filtering the results of the request. Filter rules are
+ // case insensitive. The fields eligible for filtering are:
+ //
+ // * `name`
+ // * `display_name`
+ // * `labels.key` where key is the name of a label
+ //
+ // Some examples of using filters are:
+ //
+ // * `name:*` --> The instance has a name.
+ // * `name:Howl` --> The instance's name contains the string "howl".
+ // * `name:HOWL` --> Equivalent to above.
+ // * `NAME:howl` --> Equivalent to above.
+ // * `labels.env:*` --> The instance has the label "env".
+ // * `labels.env:dev` --> The instance has the label "env" and the value of
+ // the label contains the string "dev".
+ // * `name:howl labels.env:dev` --> The instance's name contains "howl" and
+ // it has the label "env" with its value
+ // containing "dev".
+ string filter = 4;
+
+ // Deadline used while retrieving metadata for instances.
+ // Instances whose metadata cannot be retrieved within this deadline will be
+ // added to
+ // [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
+ // in
+ // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
+ google.protobuf.Timestamp instance_deadline = 5;
+}
+
+// The response for
+// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+message ListInstancesResponse {
+ // The list of requested instances.
+ repeated Instance instances = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
+ // call to fetch more of the matching instances.
+ string next_page_token = 2;
+
+ // The list of unreachable instances.
+ // It includes the names of instances whose metadata could not be retrieved
+ // within
+ // [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
+ repeated string unreachable = 3;
+}
+
+// The request for
+// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+message UpdateInstanceRequest {
+ // Required. The instance to update, which must always include the instance
+ // name. Otherwise, only fields mentioned in
+ // [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
+ // need be included.
+ Instance instance = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. A mask specifying which fields in
+ // [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
+ // The field mask must always be specified; this prevents any future fields in
+ // [Instance][google.spanner.admin.instance.v1.Instance] from being erased
+ // accidentally by clients that do not know about them.
+ google.protobuf.FieldMask field_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
+}
+
+// The request for
+// [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
+message DeleteInstanceRequest {
+ // Required. The name of the instance to be deleted. Values are of the form
+ // `projects//instances/`
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+}
+
+// Metadata type for the operation returned by
+// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+message CreateInstanceMetadata {
+ // The instance being created.
+ Instance instance = 1;
+
+ // The time at which the
+ // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
+ // request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ google.protobuf.Timestamp cancel_time = 3;
+
+ // The time at which this operation failed or was completed successfully.
+ google.protobuf.Timestamp end_time = 4;
+
+ // The expected fulfillment period of this create operation.
+ FulfillmentPeriod expected_fulfillment_period = 5;
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+message UpdateInstanceMetadata {
+ // The desired end state of the update.
+ Instance instance = 1;
+
+ // The time at which
+ // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
+ // request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ google.protobuf.Timestamp cancel_time = 3;
+
+ // The time at which this operation failed or was completed successfully.
+ google.protobuf.Timestamp end_time = 4;
+
+ // The expected fulfillment period of this update operation.
+ FulfillmentPeriod expected_fulfillment_period = 5;
+}
+
+// Free instance specific metadata that is kept even after an instance has been
+// upgraded for tracking purposes.
+message FreeInstanceMetadata {
+ // Allows users to change behavior when a free instance expires.
+ enum ExpireBehavior {
+ // Not specified.
+ EXPIRE_BEHAVIOR_UNSPECIFIED = 0;
+
+ // When the free instance expires, upgrade the instance to a provisioned
+ // instance.
+ FREE_TO_PROVISIONED = 1;
+
+ // When the free instance expires, disable the instance, and delete it
+ // after the grace period passes if it has not been upgraded.
+ REMOVE_AFTER_GRACE_PERIOD = 2;
+ }
+
+ // Output only. Timestamp after which the instance will either be upgraded or
+ // scheduled for deletion after a grace period. ExpireBehavior is used to
+ // choose between upgrading or scheduling the free instance for deletion. This
+ // timestamp is set during the creation of a free instance.
+ google.protobuf.Timestamp expire_time = 1
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. If present, the timestamp at which the free instance was
+ // upgraded to a provisioned instance.
+ google.protobuf.Timestamp upgrade_time = 2
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Specifies the expiration behavior of a free instance. The default of
+ // ExpireBehavior is `REMOVE_AFTER_GRACE_PERIOD`. This can be modified during
+ // or after creation, and before expiration.
+ ExpireBehavior expire_behavior = 3;
+}
+
+// Metadata type for the operation returned by
+// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
+message CreateInstanceConfigMetadata {
+ // The target instance configuration end state.
+ InstanceConfig instance_config = 1;
+
+ // The progress of the
+ // [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
+ // operation.
+ OperationProgress progress = 2;
+
+ // The time at which this operation was cancelled.
+ google.protobuf.Timestamp cancel_time = 3;
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
+message UpdateInstanceConfigMetadata {
+ // The desired instance configuration after updating.
+ InstanceConfig instance_config = 1;
+
+ // The progress of the
+ // [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
+ // operation.
+ OperationProgress progress = 2;
+
+ // The time at which this operation was cancelled.
+ google.protobuf.Timestamp cancel_time = 3;
+}
+
+// An isolated set of Cloud Spanner resources that databases can define
+// placements on.
+message InstancePartition {
+ option (google.api.resource) = {
+ type: "spanner.googleapis.com/InstancePartition"
+ pattern: "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}"
+ plural: "instancePartitions"
+ singular: "instancePartition"
+ };
+
+ // Indicates the current state of the instance partition.
+ enum State {
+ // Not specified.
+ STATE_UNSPECIFIED = 0;
+
+ // The instance partition is still being created. Resources may not be
+ // available yet, and operations such as creating placements using this
+ // instance partition may not work.
+ CREATING = 1;
+
+ // The instance partition is fully created and ready to do work such as
+ // creating placements and using in databases.
+ READY = 2;
+ }
+
+ // Required. A unique identifier for the instance partition. Values are of the
+ // form
+ // `projects//instances//instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
+ // The final segment of the name must be between 2 and 64 characters in
+ // length. An instance partition's name cannot be changed after the instance
+ // partition is created.
+ string name = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The name of the instance partition's configuration. Values are of
+ // the form `projects//instanceConfigs/`. See also
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ string config = 2 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ }
+ ];
+
+ // Required. The descriptive name for this instance partition as it appears in
+ // UIs. Must be unique per project and between 4 and 30 characters in length.
+ string display_name = 3 [(google.api.field_behavior) = REQUIRED];
+
+ // Compute capacity defines amount of server and storage resources that are
+ // available to the databases in an instance partition. At most, one of either
+ // `node_count` or` processing_units` should be present in the message. For
+ // more information, see
+ // [Compute capacity, nodes, and processing
+ // units](https://cloud.google.com/spanner/docs/compute-capacity).
+ oneof compute_capacity {
+ // The number of nodes allocated to this instance partition.
+ //
+ // Users can set the `node_count` field to specify the target number of
+ // nodes allocated to the instance partition.
+ //
+ // This may be zero in API responses for instance partitions that are not
+ // yet in state `READY`.
+ int32 node_count = 5;
+
+ // The number of processing units allocated to this instance partition.
+ //
+ // Users can set the `processing_units` field to specify the target number
+ // of processing units allocated to the instance partition.
+ //
+ // This might be zero in API responses for instance partitions that are not
+ // yet in the `READY` state.
+ int32 processing_units = 6;
+ }
+
+ // Optional. The autoscaling configuration. Autoscaling is enabled if this
+ // field is set. When autoscaling is enabled, fields in compute_capacity are
+ // treated as OUTPUT_ONLY fields and reflect the current compute capacity
+ // allocated to the instance partition.
+ AutoscalingConfig autoscaling_config = 13
+ [(google.api.field_behavior) = OPTIONAL];
+
+ // Output only. The current instance partition state.
+ State state = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The time at which the instance partition was created.
+ google.protobuf.Timestamp create_time = 8
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The time at which the instance partition was most recently
+ // updated.
+ google.protobuf.Timestamp update_time = 9
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. The names of the databases that reference this
+ // instance partition. Referencing databases should share the parent instance.
+ // The existence of any referencing database prevents the instance partition
+ // from being deleted.
+ repeated string referencing_databases = 10
+ [(google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Output only. Deprecated: This field is not populated.
+ // Output only. The names of the backups that reference this instance
+ // partition. Referencing backups should share the parent instance. The
+ // existence of any referencing backup prevents the instance partition from
+ // being deleted.
+ repeated string referencing_backups = 11
+ [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY];
+
+ // Used for optimistic concurrency control as a way
+ // to help prevent simultaneous updates of a instance partition from
+ // overwriting each other. It is strongly suggested that systems make use of
+ // the etag in the read-modify-write cycle to perform instance partition
+ // updates in order to avoid race conditions: An etag is returned in the
+ // response which contains instance partitions, and systems are expected to
+ // put that etag in the request to update instance partitions to ensure that
+ // their change will be applied to the same version of the instance partition.
+ // If no etag is provided in the call to update instance partition, then the
+ // existing instance partition is overwritten blindly.
+ string etag = 12;
+}
+
+// Metadata type for the operation returned by
+// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
+message CreateInstancePartitionMetadata {
+ // The instance partition being created.
+ InstancePartition instance_partition = 1;
+
+ // The time at which the
+ // [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
+ // request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ google.protobuf.Timestamp cancel_time = 3;
+
+ // The time at which this operation failed or was completed successfully.
+ google.protobuf.Timestamp end_time = 4;
+}
+
+// The request for
+// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
+message CreateInstancePartitionRequest {
+ // Required. The name of the instance in which to create the instance
+ // partition. Values are of the form
+ // `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Required. The ID of the instance partition to create. Valid identifiers are
+ // of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
+ // characters in length.
+ string instance_partition_id = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Required. The instance partition to create. The instance_partition.name may
+ // be omitted, but if specified must be
+ // `/instancePartitions/`.
+ InstancePartition instance_partition = 3
+ [(google.api.field_behavior) = REQUIRED];
+}
+
+// The request for
+// [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition].
+message DeleteInstancePartitionRequest {
+ // Required. The name of the instance partition to be deleted.
+ // Values are of the form
+ // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstancePartition"
+ }
+ ];
+
+ // Optional. If not empty, the API only deletes the instance partition when
+ // the etag provided matches the current status of the requested instance
+ // partition. Otherwise, deletes the instance partition without checking the
+ // current status of the requested instance partition.
+ string etag = 2;
+}
+
+// The request for
+// [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition].
+message GetInstancePartitionRequest {
+ // Required. The name of the requested instance partition. Values are of
+ // the form
+ // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstancePartition"
+ }
+ ];
+}
+
+// The request for
+// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
+message UpdateInstancePartitionRequest {
+ // Required. The instance partition to update, which must always include the
+ // instance partition name. Otherwise, only fields mentioned in
+ // [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
+ // need be included.
+ InstancePartition instance_partition = 1
+ [(google.api.field_behavior) = REQUIRED];
+
+ // Required. A mask specifying which fields in
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
+ // should be updated. The field mask must always be specified; this prevents
+ // any future fields in
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
+ // from being erased accidentally by clients that do not know about them.
+ google.protobuf.FieldMask field_mask = 2
+ [(google.api.field_behavior) = REQUIRED];
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
+message UpdateInstancePartitionMetadata {
+ // The desired end state of the update.
+ InstancePartition instance_partition = 1;
+
+ // The time at which
+ // [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
+ // request was received.
+ google.protobuf.Timestamp start_time = 2;
+
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ google.protobuf.Timestamp cancel_time = 3;
+
+ // The time at which this operation failed or was completed successfully.
+ google.protobuf.Timestamp end_time = 4;
+}
+
+// The request for
+// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
+message ListInstancePartitionsRequest {
+ // Required. The instance whose instance partitions should be listed. Values
+ // are of the form `projects//instances/`. Use `{instance}
+ // = '-'` to list instance partitions for all Instances in a project, e.g.,
+ // `projects/myproject/instances/-`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Number of instance partitions to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ int32 page_size = 2;
+
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
+ // from a previous
+ // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
+ string page_token = 3;
+
+ // Optional. Deadline used while retrieving metadata for instance partitions.
+ // Instance partitions whose metadata cannot be retrieved within this deadline
+ // will be added to
+ // [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
+ // in
+ // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
+ google.protobuf.Timestamp instance_partition_deadline = 4
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The response for
+// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
+message ListInstancePartitionsResponse {
+ // The list of requested instancePartitions.
+ repeated InstancePartition instance_partitions = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
+ // call to fetch more of the matching instance partitions.
+ string next_page_token = 2;
+
+ // The list of unreachable instances or instance partitions.
+ // It includes the names of instances or instance partitions whose metadata
+ // could not be retrieved within
+ // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
+ repeated string unreachable = 3;
+}
+
+// The request for
+// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
+message ListInstancePartitionOperationsRequest {
+ // Required. The parent instance of the instance partition operations.
+ // Values are of the form `projects//instances/`.
+ string parent = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Optional. An expression that filters the list of returned operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the Operation are eligible for filtering:
+ //
+ // * `name` - The name of the long-running operation
+ // * `done` - False if the operation is in progress, else true.
+ // * `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
+ // * `metadata.` - any field in metadata.value.
+ // `metadata.@type` must be specified first, if filtering on metadata
+ // fields.
+ // * `error` - Error associated with the long-running operation.
+ // * `response.@type` - the type of response.
+ // * `response.` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic. However,
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // * `done:true` - The operation is complete.
+ // * `(metadata.@type=` \
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
+ // AND` \
+ // `(metadata.instance_partition.name:custom-instance-partition) AND` \
+ // `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Return operations where:
+ // * The operation's metadata type is
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
+ // * The instance partition name contains "custom-instance-partition".
+ // * The operation started before 2021-03-28T14:50:00Z.
+ // * The operation resulted in an error.
+ string filter = 2 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
+ // from a previous
+ // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ string page_token = 4 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. Deadline used while retrieving metadata for instance partition
+ // operations. Instance partitions whose operation metadata cannot be
+ // retrieved within this deadline will be added to
+ // [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions]
+ // in
+ // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
+ google.protobuf.Timestamp instance_partition_deadline = 5
+ [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The response for
+// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
+message ListInstancePartitionOperationsResponse {
+ // The list of matching instance partition long-running operations. Each
+ // operation's name will be
+ // prefixed by the instance partition's name. The operation's
+ // metadata field type
+ // `metadata.type_url` describes the type of the metadata.
+ repeated google.longrunning.Operation operations = 1;
+
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
+ // call to fetch more of the matching metadata.
+ string next_page_token = 2;
+
+ // The list of unreachable instance partitions.
+ // It includes the names of instance partitions whose operation metadata could
+ // not be retrieved within
+ // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
+ repeated string unreachable_instance_partitions = 3;
+}
+
+// The request for
+// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
+message MoveInstanceRequest {
+ // Required. The instance to move.
+ // Values are of the form `projects//instances/`.
+ string name = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/Instance"
+ }
+ ];
+
+ // Required. The target instance configuration where to move the instance.
+ // Values are of the form `projects//instanceConfigs/`.
+ string target_config = 2 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "spanner.googleapis.com/InstanceConfig"
+ }
+ ];
+}
+
+// The response for
+// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
+message MoveInstanceResponse {}
+
+// Metadata type for the operation returned by
+// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
+message MoveInstanceMetadata {
+ // The target instance configuration where to move the instance.
+ // Values are of the form `projects/