diff --git a/CHANGELOG.md b/CHANGELOG.md
index d5b3f8cb..4f73b3e9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,12 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [0.8.5](https://github.com/traceloop/openllmetry-js/compare/v0.8.4...v0.8.5) (2024-05-31)
+
+### Bug Fixes
+
+- **vertex:** support v1.2.0 ([#290](https://github.com/traceloop/openllmetry-js/issues/290)) ([e62c9b4](https://github.com/traceloop/openllmetry-js/commit/e62c9b420881b69971d3ee910c5d3f613df3be50))
+
 ## [0.8.4](https://github.com/traceloop/openllmetry-js/compare/v0.8.3...v0.8.4) (2024-05-20)
 
 ### Bug Fixes
diff --git a/lerna.json b/lerna.json
index f3337b7a..1c6ebe94 100644
--- a/lerna.json
+++ b/lerna.json
@@ -1,6 +1,6 @@
 {
   "$schema": "node_modules/lerna/schemas/lerna-schema.json",
-  "version": "0.8.4",
+  "version": "0.8.5",
   "packages": ["packages/*"],
   "useNx": true
 }
diff --git a/package-lock.json b/package-lock.json
index f25d6721..3bc3114a 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -3643,9 +3643,9 @@
       }
     },
     "node_modules/@google-cloud/vertexai": {
-      "version": "0.2.1",
-      "resolved": "https://registry.npmjs.org/@google-cloud/vertexai/-/vertexai-0.2.1.tgz",
-      "integrity": "sha512-5oJwLfoRO/Oh61KZnXA9jtuOiSPCQW3tgCa9BhmfOE702F6jMqb65TSbVIMJbzJwjCZIhoRdnrh0rgTkKQXTDg==",
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/@google-cloud/vertexai/-/vertexai-1.2.0.tgz",
+      "integrity": "sha512-EH0dnoMRIBQzJEEOUWN03eWPSdLBFdsZA/am3eU+qYrnNyY9okUueOajZd79U48KwgFbqoFrCA9yHQ30DgfD8Q==",
       "dependencies": {
         "google-auth-library": "^9.1.0"
       },
@@ -23502,7 +23502,7 @@
     },
     "packages/instrumentation-vertexai": {
       "name": "@traceloop/instrumentation-vertexai",
-      "version": "0.8.0",
+      "version": "0.8.5",
       "license": "Apache-2.0",
       "dependencies": {
         "@opentelemetry/core": "^1.22.0",
@@ -23512,7 +23512,7 @@
       },
       "devDependencies": {
         "@google-cloud/aiplatform": "^3.10.0",
-        "@google-cloud/vertexai": "^0.2.1"
+        "@google-cloud/vertexai": "^1.2.0"
       },
       "engines": {
         "node": ">=14"
@@ -23554,7 +23554,7 @@
         "@aws-sdk/client-bedrock-runtime": "^3.499.0",
         "@azure/openai": "^1.0.0-beta.11",
         "@google-cloud/aiplatform": "^3.10.0",
-        "@google-cloud/vertexai": "^0.2.1",
+        "@google-cloud/vertexai": "^1.2.0",
         "@langchain/community": "^0.0.34",
         "@pinecone-database/pinecone": "^2.0.1",
         "@traceloop/node-server-sdk": "*",
@@ -24139,7 +24139,7 @@
     },
     "packages/traceloop-sdk": {
       "name": "@traceloop/node-server-sdk",
-      "version": "0.8.4",
+      "version": "0.8.5",
       "license": "Apache-2.0",
       "dependencies": {
         "@opentelemetry/exporter-trace-otlp-proto": "^0.49.1",
@@ -24155,7 +24155,7 @@
         "@traceloop/instrumentation-llamaindex": "^0.8.0",
         "@traceloop/instrumentation-openai": "^0.8.2",
         "@traceloop/instrumentation-pinecone": "^0.8.3",
-        "@traceloop/instrumentation-vertexai": "^0.8.0",
+        "@traceloop/instrumentation-vertexai": "^0.8.5",
         "@types/nunjucks": "^3.2.5",
         "cross-fetch": "^4.0.0",
         "fetch-retry": "^5.0.6",
@@ -24169,7 +24169,7 @@
         "@aws-sdk/client-bedrock-runtime": "^3.499.0",
         "@azure/openai": "^1.0.0-beta.11",
         "@google-cloud/aiplatform": "^3.10.0",
-        "@google-cloud/vertexai": "^0.2.1",
+        "@google-cloud/vertexai": "^1.2.0",
         "@pinecone-database/pinecone": "^2.0.1",
         "@pollyjs/adapter-node-http": "^6.0.6",
         "@pollyjs/core": "^6.0.6",
diff --git a/packages/instrumentation-vertexai/CHANGELOG.md b/packages/instrumentation-vertexai/CHANGELOG.md
index 9a3d0179..62f6fec3 100644
--- a/packages/instrumentation-vertexai/CHANGELOG.md
+++ b/packages/instrumentation-vertexai/CHANGELOG.md
@@ -3,6 +3,12 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [0.8.5](https://github.com/traceloop/openllmetry-js/compare/v0.8.4...v0.8.5) (2024-05-31)
+
+### Bug Fixes
+
+- **vertex:** support v1.2.0 ([#290](https://github.com/traceloop/openllmetry-js/issues/290)) ([e62c9b4](https://github.com/traceloop/openllmetry-js/commit/e62c9b420881b69971d3ee910c5d3f613df3be50))
+
 # [0.8.0](https://github.com/traceloop/openllmetry-js/compare/v0.7.0...v0.8.0) (2024-04-29)
 
 ### Features
diff --git a/packages/instrumentation-vertexai/package.json b/packages/instrumentation-vertexai/package.json
index 563b0185..c512b3cb 100644
--- a/packages/instrumentation-vertexai/package.json
+++ b/packages/instrumentation-vertexai/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@traceloop/instrumentation-vertexai",
-  "version": "0.8.0",
+  "version": "0.8.5",
   "description": "Google's VertexAI Instrumentation",
   "main": "dist/index.js",
   "module": "dist/index.mjs",
@@ -43,7 +43,7 @@
   },
   "devDependencies": {
     "@google-cloud/aiplatform": "^3.10.0",
-    "@google-cloud/vertexai": "^0.2.1"
+    "@google-cloud/vertexai": "^1.2.0"
   },
   "homepage": "https://github.com/traceloop/openllmetry-js/tree/main/packages/instrumentation-openai",
   "gitHead": "ef1e70d6037f7b5c061056ef2be16e3f55f02ed5"
diff --git a/packages/instrumentation-vertexai/src/vertexai-instrumentation.ts b/packages/instrumentation-vertexai/src/vertexai-instrumentation.ts
index c737f972..3b118f79 100644
--- a/packages/instrumentation-vertexai/src/vertexai-instrumentation.ts
+++ b/packages/instrumentation-vertexai/src/vertexai-instrumentation.ts
@@ -49,7 +49,7 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
   protected init(): InstrumentationModuleDefinition<any> {
     const vertexAIModule = new InstrumentationNodeModuleDefinition<any>(
       "@google-cloud/vertexai",
-      [">=0.2.1"],
+      [">=1.1.0"],
       this.wrap.bind(this),
       this.unwrap.bind(this),
     );
@@ -57,24 +57,18 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
     return vertexAIModule;
   }
 
-  private modelConfig: vertexAI.ModelParams = { model: "" };
-
-  private setModel(newValue: vertexAI.ModelParams) {
-    this.modelConfig = { ...newValue };
-  }
-
   public manuallyInstrument(module: typeof vertexAI) {
     this._diag.debug("Manually instrumenting @google-cloud/vertexai");
 
     this._wrap(
-      module.VertexAI_Preview.prototype,
-      "getGenerativeModel",
-      this.wrapperMethod("getGenerativeModel"),
+      module.GenerativeModel.prototype,
+      "generateContentStream",
+      this.wrapperMethod(),
     );
     this._wrap(
       module.GenerativeModel.prototype,
-      "generateContentStream",
-      this.wrapperMethod("generateContentStream"),
+      "generateContent",
+      this.wrapperMethod(),
     );
   }
 
@@ -82,14 +76,14 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
     this._diag.debug(`Patching @google-cloud/vertexai@${moduleVersion}`);
 
     this._wrap(
-      module.VertexAI_Preview.prototype,
-      "getGenerativeModel",
-      this.wrapperMethod("getGenerativeModel"),
+      module.GenerativeModel.prototype,
+      "generateContentStream",
+      this.wrapperMethod(),
     );
     this._wrap(
       module.GenerativeModel.prototype,
-      "generateContentStream",
-      this.wrapperMethod("generateContentStream"),
+      "generateContent",
+      this.wrapperMethod(),
     );
 
     return module;
@@ -98,42 +92,21 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
   private unwrap(module: typeof vertexAI, moduleVersion?: string): void {
     this._diag.debug(`Unpatching @google-cloud/vertexai@${moduleVersion}`);
 
-    this._unwrap(module.VertexAI_Preview.prototype, "getGenerativeModel");
     this._unwrap(module.GenerativeModel.prototype, "generateContentStream");
+    this._unwrap(module.GenerativeModel.prototype, "generateContent");
   }
 
-  private wrapperMethod(
-    wrappedMethodName: "getGenerativeModel" | "generateContentStream",
-  ) {
+  private wrapperMethod() {
     // eslint-disable-next-line @typescript-eslint/no-this-alias
     const plugin = this;
     // eslint-disable-next-line @typescript-eslint/ban-types
     return (original: Function) => {
       return function method(
-        this: any,
+        this: vertexAI.GenerativeModel,
         ...args: (vertexAI.GenerateContentRequest & vertexAI.ModelParams)[]
       ) {
-        if (wrappedMethodName === "getGenerativeModel") {
-          plugin.setModel(args[0]);
-
-          return context.bind(
-            context.active(),
-            safeExecuteInTheMiddle(
-              () => {
-                return context.with(context.active(), () => {
-                  return original.apply(this, args);
-                });
-              },
-              (e) => {
-                if (e) {
-                  plugin._diag.error("Error in VertexAI Instrumentation", e);
-                }
-              },
-            ),
-          );
-        }
-
         const span = plugin._startSpan({
+          instance: this,
           params: args[0],
         });
 
@@ -157,8 +130,10 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
   }
 
   private _startSpan({
+    instance,
     params,
   }: {
+    instance: vertexAI.GenerativeModel;
     params: vertexAI.GenerateContentRequest;
   }): Span {
     const attributes: Attributes = {
@@ -167,28 +142,18 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
     };
 
     try {
-      attributes[SpanAttributes.LLM_REQUEST_MODEL] = this.modelConfig.model;
-
-      if (
-        this.modelConfig.generation_config !== undefined &&
-        typeof this.modelConfig.generation_config === "object"
-      ) {
-        if (this.modelConfig.generation_config.max_output_tokens) {
-          attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] =
-            this.modelConfig.generation_config.max_output_tokens;
-        }
-        if (this.modelConfig.generation_config.temperature) {
-          attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] =
-            this.modelConfig.generation_config.temperature;
-        }
-        if (this.modelConfig.generation_config.top_p) {
-          attributes[SpanAttributes.LLM_REQUEST_TOP_P] =
-            this.modelConfig.generation_config.top_p;
-        }
-        if (this.modelConfig.generation_config.top_k) {
-          attributes[SpanAttributes.LLM_TOP_K] =
-            this.modelConfig.generation_config.top_k;
-        }
+      attributes[SpanAttributes.LLM_REQUEST_MODEL] = instance["model"];
+      attributes[SpanAttributes.LLM_RESPONSE_MODEL] = instance["model"];
+
+      if (instance["generationConfig"]) {
+        attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] =
+          instance["generationConfig"].max_output_tokens;
+        attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] =
+          instance["generationConfig"].temperature;
+        attributes[SpanAttributes.LLM_REQUEST_TOP_P] =
+          instance["generationConfig"].top_p;
+        attributes[SpanAttributes.LLM_TOP_K] =
+          instance["generationConfig"].top_k;
       }
 
       if (this._shouldSendPrompts() && "contents" in params) {
@@ -213,7 +178,9 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
       .then(async (result) => {
         await this._endSpan({
           span,
-          result: result as vertexAI.StreamGenerateContentResult,
+          result: result as
+            | vertexAI.StreamGenerateContentResult
+            | vertexAI.GenerateContentResult,
         });
         return new Promise<T>((resolve) => resolve(result));
       })
@@ -236,14 +203,11 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
     result,
   }: {
     span: Span;
-    result: vertexAI.StreamGenerateContentResult;
+    result:
+      | vertexAI.StreamGenerateContentResult
+      | vertexAI.GenerateContentResult;
   }) {
     try {
-      span.setAttribute(
-        SpanAttributes.LLM_RESPONSE_MODEL,
-        this.modelConfig.model,
-      );
-
       const streamResponse = await result.response;
 
       if (streamResponse.usageMetadata?.totalTokenCount !== undefined)
@@ -252,20 +216,20 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
           streamResponse.usageMetadata.totalTokenCount,
         );
 
-      if (streamResponse.usageMetadata?.candidates_token_count)
+      if (streamResponse.usageMetadata?.candidatesTokenCount)
         span.setAttribute(
           SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
-          streamResponse.usageMetadata.candidates_token_count,
+          streamResponse.usageMetadata.candidatesTokenCount,
         );
 
-      if (streamResponse.usageMetadata?.prompt_token_count)
+      if (streamResponse.usageMetadata?.promptTokenCount)
         span.setAttribute(
           SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
-          streamResponse.usageMetadata.prompt_token_count,
+          streamResponse.usageMetadata.promptTokenCount,
         );
 
       if (this._shouldSendPrompts()) {
-        streamResponse.candidates.forEach((candidate, index) => {
+        streamResponse.candidates?.forEach((candidate, index) => {
           if (candidate.finishReason)
             span.setAttribute(
               `${SpanAttributes.LLM_COMPLETIONS}.${index}.finish_reason`,
@@ -298,10 +262,10 @@ export class VertexAIInstrumentation extends InstrumentationBase<any> {
     const result = parts
       .map((part) => {
         if (part.text) return part.text;
-        else if (part.file_data)
-          return part.file_data.file_uri + "-" + part.file_data.mime_type;
-        else if (part.inline_data)
-          return part.inline_data.data + "-" + part.inline_data.mime_type;
+        else if (part.fileData)
+          return part.fileData.fileUri + "-" + part.fileData.mimeType;
+        else if (part.inlineData)
+          return part.inlineData.data + "-" + part.inlineData.mimeType;
         else return "";
       })
       .filter(Boolean);
diff --git a/packages/instrumentation-vertexai/tests/gemini.test.ts b/packages/instrumentation-vertexai/tests/gemini.test.ts
index 4c04a8c2..e083597a 100644
--- a/packages/instrumentation-vertexai/tests/gemini.test.ts
+++ b/packages/instrumentation-vertexai/tests/gemini.test.ts
@@ -60,9 +60,9 @@ describe.skip("Test Gemini GenerativeModel Instrumentation", () => {
 
     const generativeModel = vertexAI.preview.getGenerativeModel({
       model,
-      generation_config: {
-        top_p: 0.9,
-        max_output_tokens: 256,
+      generationConfig: {
+        topP: 0.9,
+        maxOutputTokens: 256,
       },
     });
     const prompt = "What is Node.js?";
@@ -78,7 +78,7 @@ describe.skip("Test Gemini GenerativeModel Instrumentation", () => {
     const aggregatedResponse = await responseStream.response;
 
     const fullTextResponse =
-      aggregatedResponse.candidates[0].content.parts[0].text;
+      aggregatedResponse.candidates![0].content.parts[0].text;
 
     const spans = memoryExporter.getFinishedSpans();
 
@@ -87,10 +87,7 @@ describe.skip("Test Gemini GenerativeModel Instrumentation", () => {
     assert.strictEqual(attributes["gen_ai.system"], "VertexAI");
     assert.strictEqual(attributes["llm.request.type"], "completion");
     assert.strictEqual(attributes["gen_ai.request.model"], model);
-    assert.strictEqual(
-      attributes["gen_ai.request.top_p"],
-      generativeModel.generation_config?.top_p,
-    );
+    assert.strictEqual(attributes["gen_ai.request.top_p"], 0.9);
     assert.strictEqual(attributes["gen_ai.prompt.0.content"], prompt);
     assert.strictEqual(attributes["gen_ai.prompt.0.role"], "user");
     assert.strictEqual(attributes["gen_ai.response.model"], model);
@@ -111,9 +108,9 @@ describe.skip("Test Gemini GenerativeModel Instrumentation", () => {
 
     const generativeModel = vertexAI.preview.getGenerativeModel({
       model,
-      generation_config: {
-        top_p: 0.9,
-        max_output_tokens: 256,
+      generationConfig: {
+        topP: 0.9,
+        maxOutputTokens: 256,
       },
     });
     const prompt = "What are the 4 cardinal directions?";
@@ -129,7 +126,7 @@ describe.skip("Test Gemini GenerativeModel Instrumentation", () => {
 
     const fullTextResponse = [];
     for await (const item of responseStream.stream) {
-      fullTextResponse.push(item.candidates[0].content.parts[0].text);
+      fullTextResponse.push(item.candidates![0].content.parts[0].text);
     }
 
     assert.ok(fullTextResponse);
@@ -143,14 +140,8 @@ describe.skip("Test Gemini GenerativeModel Instrumentation", () => {
     assert.strictEqual(attributes["gen_ai.system"], "VertexAI");
     assert.strictEqual(attributes["llm.request.type"], "completion");
     assert.strictEqual(attributes["gen_ai.request.model"], model);
-    assert.strictEqual(
-      attributes["gen_ai.request.top_p"],
-      generativeModel.generation_config?.top_p,
-    );
-    assert.strictEqual(
-      attributes["gen_ai.request.max_tokens"],
-      generativeModel.generation_config?.max_output_tokens,
-    );
+    assert.strictEqual(attributes["gen_ai.request.top_p"], 0.9);
+    assert.strictEqual(attributes["gen_ai.request.max_tokens"], 256);
     assert.strictEqual(attributes["gen_ai.prompt.0.content"], prompt);
     assert.strictEqual(attributes["gen_ai.prompt.0.role"], "user");
     assert.strictEqual(attributes["gen_ai.response.model"], model);
diff --git a/packages/sample-app/package.json b/packages/sample-app/package.json
index 6dea7782..a417275a 100644
--- a/packages/sample-app/package.json
+++ b/packages/sample-app/package.json
@@ -36,7 +36,7 @@
     "@aws-sdk/client-bedrock-runtime": "^3.499.0",
     "@azure/openai": "^1.0.0-beta.11",
     "@google-cloud/aiplatform": "^3.10.0",
-    "@google-cloud/vertexai": "^0.2.1",
+    "@google-cloud/vertexai": "^1.2.0",
     "@langchain/community": "^0.0.34",
     "@pinecone-database/pinecone": "^2.0.1",
     "@traceloop/node-server-sdk": "*",
diff --git a/packages/sample-app/src/vertexai/gemini.ts b/packages/sample-app/src/vertexai/gemini.ts
index 6a629925..b5b2afe5 100644
--- a/packages/sample-app/src/vertexai/gemini.ts
+++ b/packages/sample-app/src/vertexai/gemini.ts
@@ -18,11 +18,8 @@ async function createNonStreamingContent() {
     { name: "sample_completion" },
     async () => {
       // Instantiate the model
-      const generativeModel = vertexAI.preview.getGenerativeModel({
-        model: "gemini-pro-vision",
-        generation_config: {
-          max_output_tokens: 256,
-        },
+      const generativeModel = vertexAI.getGenerativeModel({
+        model: "gemini-1.5-flash",
       });
 
       const request = {
@@ -42,7 +39,7 @@ async function createNonStreamingContent() {
 
       // Select the text from the response
       const fullTextResponse =
-        aggregatedResponse.candidates[0].content.parts[0].text;
+        aggregatedResponse.candidates![0].content.parts[0].text;
 
       return fullTextResponse;
     },
@@ -54,11 +51,8 @@ async function createStreamingContent() {
     { name: "sample_stream_completion" },
     async () => {
       // Instantiate the model
-      const generativeModel = vertexAI.preview.getGenerativeModel({
-        model: "gemini-pro-vision",
-        generation_config: {
-          max_output_tokens: 256,
-        },
+      const generativeModel = vertexAI.getGenerativeModel({
+        model: "gemini-1.5-flash",
       });
 
       const request = {
@@ -79,7 +73,7 @@ async function createStreamingContent() {
 
       // Select the text from the response
       const fullTextResponse =
-        aggregatedResponse.candidates[0].content.parts[0].text;
+        aggregatedResponse.candidates![0].content.parts[0].text;
 
       return fullTextResponse;
     },
diff --git a/packages/traceloop-sdk/CHANGELOG.md b/packages/traceloop-sdk/CHANGELOG.md
index a68fe3c3..d6ac3266 100644
--- a/packages/traceloop-sdk/CHANGELOG.md
+++ b/packages/traceloop-sdk/CHANGELOG.md
@@ -3,6 +3,12 @@
 All notable changes to this project will be documented in this file.
 See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
 
+## [0.8.5](https://github.com/traceloop/openllmetry-js/compare/v0.8.4...v0.8.5) (2024-05-31)
+
+### Bug Fixes
+
+- **vertex:** support v1.2.0 ([#290](https://github.com/traceloop/openllmetry-js/issues/290)) ([e62c9b4](https://github.com/traceloop/openllmetry-js/commit/e62c9b420881b69971d3ee910c5d3f613df3be50))
+
 ## [0.8.4](https://github.com/traceloop/openllmetry-js/compare/v0.8.3...v0.8.4) (2024-05-20)
 
 ### Bug Fixes
diff --git a/packages/traceloop-sdk/package.json b/packages/traceloop-sdk/package.json
index 121c9c85..6eff2b07 100644
--- a/packages/traceloop-sdk/package.json
+++ b/packages/traceloop-sdk/package.json
@@ -1,6 +1,6 @@
 {
   "name": "@traceloop/node-server-sdk",
-  "version": "0.8.4",
+  "version": "0.8.5",
   "description": "Traceloop Software Development Kit (SDK) for Node.js",
   "main": "dist/index.js",
   "module": "dist/index.mjs",
@@ -48,7 +48,7 @@
     "@traceloop/instrumentation-llamaindex": "^0.8.0",
     "@traceloop/instrumentation-openai": "^0.8.2",
     "@traceloop/instrumentation-pinecone": "^0.8.3",
-    "@traceloop/instrumentation-vertexai": "^0.8.0",
+    "@traceloop/instrumentation-vertexai": "^0.8.5",
     "@types/nunjucks": "^3.2.5",
     "cross-fetch": "^4.0.0",
     "fetch-retry": "^5.0.6",
@@ -64,7 +64,7 @@
     "@aws-sdk/client-bedrock-runtime": "^3.499.0",
     "@azure/openai": "^1.0.0-beta.11",
     "@google-cloud/aiplatform": "^3.10.0",
-    "@google-cloud/vertexai": "^0.2.1",
+    "@google-cloud/vertexai": "^1.2.0",
     "@pinecone-database/pinecone": "^2.0.1",
     "@pollyjs/adapter-node-http": "^6.0.6",
     "@pollyjs/core": "^6.0.6",